FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 /* These are for CD-ROM XA ADPCM */
65 static const int8_t xa_adpcm_table[5][2] = {
66  { 0, 0 },
67  { 60, 0 },
68  { 115, -52 },
69  { 98, -55 },
70  { 122, -60 }
71 };
72 
73 static const int16_t ea_adpcm_table[] = {
74  0, 240, 460, 392,
75  0, 0, -208, -220,
76  0, 1, 3, 4,
77  7, 8, 10, 11,
78  0, -1, -3, -4
79 };
80 
81 // padded to zero where table size is less then 16
82 static const int8_t swf_index_tables[4][16] = {
83  /*2*/ { -1, 2 },
84  /*3*/ { -1, -1, 2, 4 },
85  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
86  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
87 };
88 
89 static const int8_t zork_index_table[8] = {
90  -1, -1, -1, 1, 4, 7, 10, 12,
91 };
92 
93 static const int8_t mtf_index_table[16] = {
94  8, 6, 4, 2, -1, -1, -1, -1,
95  -1, -1, -1, -1, 2, 4, 6, 8,
96 };
97 
98 /* end of tables */
99 
100 typedef struct ADPCMDecodeContext {
102  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
105 
107 {
108  ADPCMDecodeContext *c = avctx->priv_data;
109  unsigned int min_channels = 1;
110  unsigned int max_channels = 2;
111 
112  switch(avctx->codec->id) {
114  max_channels = 1;
115  break;
118  min_channels = 2;
119  break;
126  max_channels = 6;
127  break;
129  min_channels = 2;
130  max_channels = 8;
131  if (avctx->channels & 1) {
132  avpriv_request_sample(avctx, "channel count %d\n", avctx->channels);
133  return AVERROR_PATCHWELCOME;
134  }
135  break;
137  max_channels = 8;
138  break;
142  max_channels = 14;
143  break;
144  }
145  if (avctx->channels < min_channels || avctx->channels > max_channels) {
146  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
147  return AVERROR(EINVAL);
148  }
149 
150  switch(avctx->codec->id) {
152  c->status[0].step = c->status[1].step = 511;
153  break;
155  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
156  return AVERROR_INVALIDDATA;
157  break;
159  if (avctx->extradata && avctx->extradata_size >= 8) {
160  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
161  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
162  }
163  break;
165  if (avctx->extradata) {
166  if (avctx->extradata_size >= 28) {
167  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
168  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
169  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
170  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
171  } else if (avctx->extradata_size >= 16) {
172  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
173  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
174  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
175  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
176  }
177  }
178  break;
180  if (avctx->extradata && avctx->extradata_size >= 2)
181  c->vqa_version = AV_RL16(avctx->extradata);
182  break;
184  if (avctx->bits_per_coded_sample != 4)
185  return AVERROR_INVALIDDATA;
186  break;
188  if (avctx->bits_per_coded_sample != 8)
189  return AVERROR_INVALIDDATA;
190  break;
191  default:
192  break;
193  }
194 
195  switch (avctx->codec->id) {
215  break;
217  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
219  break;
221  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
223  break;
224  default:
225  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
226  }
227 
228  return 0;
229 }
230 
231 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
232 {
233  int delta, pred, step, add;
234 
235  pred = c->predictor;
236  delta = nibble & 7;
237  step = c->step;
238  add = (delta * 2 + 1) * step;
239  if (add < 0)
240  add = add + 7;
241 
242  if ((nibble & 8) == 0)
243  pred = av_clip(pred + (add >> 3), -32767, 32767);
244  else
245  pred = av_clip(pred - (add >> 3), -32767, 32767);
246 
247  switch (delta) {
248  case 7:
249  step *= 0x99;
250  break;
251  case 6:
252  c->step = av_clip(c->step * 2, 127, 24576);
253  c->predictor = pred;
254  return pred;
255  case 5:
256  step *= 0x66;
257  break;
258  case 4:
259  step *= 0x4d;
260  break;
261  default:
262  step *= 0x39;
263  break;
264  }
265 
266  if (step < 0)
267  step += 0x3f;
268 
269  c->step = step >> 6;
270  c->step = av_clip(c->step, 127, 24576);
271  c->predictor = pred;
272  return pred;
273 }
274 
275 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
276 {
277  int step_index;
278  int predictor;
279  int sign, delta, diff, step;
280 
281  step = ff_adpcm_step_table[c->step_index];
282  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
283  step_index = av_clip(step_index, 0, 88);
284 
285  sign = nibble & 8;
286  delta = nibble & 7;
287  /* perform direct multiplication instead of series of jumps proposed by
288  * the reference ADPCM implementation since modern CPUs can do the mults
289  * quickly enough */
290  diff = ((2 * delta + 1) * step) >> shift;
291  predictor = c->predictor;
292  if (sign) predictor -= diff;
293  else predictor += diff;
294 
295  c->predictor = av_clip_int16(predictor);
296  c->step_index = step_index;
297 
298  return (int16_t)c->predictor;
299 }
300 
301 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
302 {
303  int step_index;
304  int predictor;
305  int sign, delta, diff, step;
306 
307  step = ff_adpcm_step_table[c->step_index];
308  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
309  step_index = av_clip(step_index, 0, 88);
310 
311  sign = nibble & 8;
312  delta = nibble & 7;
313  diff = (delta * step) >> shift;
314  predictor = c->predictor;
315  if (sign) predictor -= diff;
316  else predictor += diff;
317 
318  c->predictor = av_clip_int16(predictor);
319  c->step_index = step_index;
320 
321  return (int16_t)c->predictor;
322 }
323 
324 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
325 {
326  int step_index, step, delta, predictor;
327 
328  step = ff_adpcm_step_table[c->step_index];
329 
330  delta = step * (2 * nibble - 15);
331  predictor = c->predictor + delta;
332 
333  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
334  c->predictor = av_clip_int16(predictor >> 4);
335  c->step_index = av_clip(step_index, 0, 88);
336 
337  return (int16_t)c->predictor;
338 }
339 
340 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
341 {
342  int step_index;
343  int predictor;
344  int step;
345 
346  nibble = sign_extend(nibble & 0xF, 4);
347 
349  step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
350  step_index = av_clip(step_index, 0, 60);
351 
352  predictor = c->predictor + step * nibble;
353 
354  c->predictor = av_clip_int16(predictor);
355  c->step_index = step_index;
356 
357  return c->predictor;
358 }
359 
361 {
362  int nibble, step_index, predictor, sign, delta, diff, step, shift;
363 
364  shift = bps - 1;
365  nibble = get_bits_le(gb, bps),
366  step = ff_adpcm_step_table[c->step_index];
367  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
368  step_index = av_clip(step_index, 0, 88);
369 
370  sign = nibble & (1 << shift);
371  delta = av_mod_uintp2(nibble, shift);
372  diff = ((2 * delta + 1) * step) >> shift;
373  predictor = c->predictor;
374  if (sign) predictor -= diff;
375  else predictor += diff;
376 
377  c->predictor = av_clip_int16(predictor);
378  c->step_index = step_index;
379 
380  return (int16_t)c->predictor;
381 }
382 
383 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
384 {
385  int step_index;
386  int predictor;
387  int diff, step;
388 
389  step = ff_adpcm_step_table[c->step_index];
390  step_index = c->step_index + ff_adpcm_index_table[nibble];
391  step_index = av_clip(step_index, 0, 88);
392 
393  diff = step >> 3;
394  if (nibble & 4) diff += step;
395  if (nibble & 2) diff += step >> 1;
396  if (nibble & 1) diff += step >> 2;
397 
398  if (nibble & 8)
399  predictor = c->predictor - diff;
400  else
401  predictor = c->predictor + diff;
402 
403  c->predictor = av_clip_int16(predictor);
404  c->step_index = step_index;
405 
406  return c->predictor;
407 }
408 
409 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
410 {
411  int predictor;
412 
413  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
414  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
415 
416  c->sample2 = c->sample1;
417  c->sample1 = av_clip_int16(predictor);
418  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
419  if (c->idelta < 16) c->idelta = 16;
420  if (c->idelta > INT_MAX/768) {
421  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
422  c->idelta = INT_MAX/768;
423  }
424 
425  return c->sample1;
426 }
427 
428 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
429 {
430  int step_index, predictor, sign, delta, diff, step;
431 
433  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
434  step_index = av_clip(step_index, 0, 48);
435 
436  sign = nibble & 8;
437  delta = nibble & 7;
438  diff = ((2 * delta + 1) * step) >> 3;
439  predictor = c->predictor;
440  if (sign) predictor -= diff;
441  else predictor += diff;
442 
443  c->predictor = av_clip_intp2(predictor, 11);
444  c->step_index = step_index;
445 
446  return c->predictor * 16;
447 }
448 
449 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
450 {
451  int sign, delta, diff;
452  int new_step;
453 
454  sign = nibble & 8;
455  delta = nibble & 7;
456  /* perform direct multiplication instead of series of jumps proposed by
457  * the reference ADPCM implementation since modern CPUs can do the mults
458  * quickly enough */
459  diff = ((2 * delta + 1) * c->step) >> 3;
460  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
461  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
462  c->predictor = av_clip_int16(c->predictor);
463  /* calculate new step and clamp it to range 511..32767 */
464  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
465  c->step = av_clip(new_step, 511, 32767);
466 
467  return (int16_t)c->predictor;
468 }
469 
470 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
471 {
472  int sign, delta, diff;
473 
474  sign = nibble & (1<<(size-1));
475  delta = nibble & ((1<<(size-1))-1);
476  diff = delta << (7 + c->step + shift);
477 
478  /* clamp result */
479  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
480 
481  /* calculate new step */
482  if (delta >= (2*size - 3) && c->step < 3)
483  c->step++;
484  else if (delta == 0 && c->step > 0)
485  c->step--;
486 
487  return (int16_t) c->predictor;
488 }
489 
491 {
492  if(!c->step) {
493  c->predictor = 0;
494  c->step = 127;
495  }
496 
497  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
498  c->predictor = av_clip_int16(c->predictor);
499  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
500  c->step = av_clip(c->step, 127, 24576);
501  return c->predictor;
502 }
503 
504 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
505 {
506  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
507  c->predictor = av_clip_int16(c->predictor);
508  c->step += ff_adpcm_index_table[nibble];
509  c->step = av_clip_uintp2(c->step, 5);
510  return c->predictor;
511 }
512 
513 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
514 {
515  int16_t index = c->step_index;
516  uint32_t lookup_sample = ff_adpcm_step_table[index];
517  int32_t sample = 0;
518 
519  if (nibble & 0x40)
520  sample += lookup_sample;
521  if (nibble & 0x20)
522  sample += lookup_sample >> 1;
523  if (nibble & 0x10)
524  sample += lookup_sample >> 2;
525  if (nibble & 0x08)
526  sample += lookup_sample >> 3;
527  if (nibble & 0x04)
528  sample += lookup_sample >> 4;
529  if (nibble & 0x02)
530  sample += lookup_sample >> 5;
531  if (nibble & 0x01)
532  sample += lookup_sample >> 6;
533  if (nibble & 0x80)
534  sample = -sample;
535 
536  sample += c->predictor;
537  sample = av_clip_int16(sample);
538 
539  index += zork_index_table[(nibble >> 4) & 7];
540  index = av_clip(index, 0, 88);
541 
542  c->predictor = sample;
543  c->step_index = index;
544 
545  return sample;
546 }
547 
548 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
550  ADPCMChannelStatus *right, int channels, int sample_offset)
551 {
552  int i, j;
553  int shift,filter,f0,f1;
554  int s_1,s_2;
555  int d,s,t;
556 
557  out0 += sample_offset;
558  if (channels == 1)
559  out1 = out0 + 28;
560  else
561  out1 += sample_offset;
562 
563  for(i=0;i<4;i++) {
564  shift = 12 - (in[4+i*2] & 15);
565  filter = in[4+i*2] >> 4;
566  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
567  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
568  filter=0;
569  }
570  if (shift < 0) {
571  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
572  shift = 0;
573  }
574  f0 = xa_adpcm_table[filter][0];
575  f1 = xa_adpcm_table[filter][1];
576 
577  s_1 = left->sample1;
578  s_2 = left->sample2;
579 
580  for(j=0;j<28;j++) {
581  d = in[16+i+j*4];
582 
583  t = sign_extend(d, 4);
584  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
585  s_2 = s_1;
586  s_1 = av_clip_int16(s);
587  out0[j] = s_1;
588  }
589 
590  if (channels == 2) {
591  left->sample1 = s_1;
592  left->sample2 = s_2;
593  s_1 = right->sample1;
594  s_2 = right->sample2;
595  }
596 
597  shift = 12 - (in[5+i*2] & 15);
598  filter = in[5+i*2] >> 4;
599  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
600  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
601  filter=0;
602  }
603  if (shift < 0) {
604  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
605  shift = 0;
606  }
607 
608  f0 = xa_adpcm_table[filter][0];
609  f1 = xa_adpcm_table[filter][1];
610 
611  for(j=0;j<28;j++) {
612  d = in[16+i+j*4];
613 
614  t = sign_extend(d >> 4, 4);
615  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
616  s_2 = s_1;
617  s_1 = av_clip_int16(s);
618  out1[j] = s_1;
619  }
620 
621  if (channels == 2) {
622  right->sample1 = s_1;
623  right->sample2 = s_2;
624  } else {
625  left->sample1 = s_1;
626  left->sample2 = s_2;
627  }
628 
629  out0 += 28 * (3 - channels);
630  out1 += 28 * (3 - channels);
631  }
632 
633  return 0;
634 }
635 
636 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
637 {
638  ADPCMDecodeContext *c = avctx->priv_data;
639  GetBitContext gb;
640  const int8_t *table;
641  int k0, signmask, nb_bits, count;
642  int size = buf_size*8;
643  int i;
644 
645  init_get_bits(&gb, buf, size);
646 
647  //read bits & initial values
648  nb_bits = get_bits(&gb, 2)+2;
649  table = swf_index_tables[nb_bits-2];
650  k0 = 1 << (nb_bits-2);
651  signmask = 1 << (nb_bits-1);
652 
653  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
654  for (i = 0; i < avctx->channels; i++) {
655  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
656  c->status[i].step_index = get_bits(&gb, 6);
657  }
658 
659  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
660  int i;
661 
662  for (i = 0; i < avctx->channels; i++) {
663  // similar to IMA adpcm
664  int delta = get_bits(&gb, nb_bits);
666  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
667  int k = k0;
668 
669  do {
670  if (delta & k)
671  vpdiff += step;
672  step >>= 1;
673  k >>= 1;
674  } while(k);
675  vpdiff += step;
676 
677  if (delta & signmask)
678  c->status[i].predictor -= vpdiff;
679  else
680  c->status[i].predictor += vpdiff;
681 
682  c->status[i].step_index += table[delta & (~signmask)];
683 
684  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
685  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
686 
687  *samples++ = c->status[i].predictor;
688  }
689  }
690  }
691 }
692 
693 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
694 {
695  int sample = sign_extend(nibble, 4) * (1 << shift);
696 
697  if (flag)
698  sample += (8 * cs->sample1) - (4 * cs->sample2);
699  else
700  sample += 4 * cs->sample1;
701 
702  sample = av_clip_int16(sample >> 2);
703 
704  cs->sample2 = cs->sample1;
705  cs->sample1 = sample;
706 
707  return sample;
708 }
709 
710 /**
711  * Get the number of samples (per channel) that will be decoded from the packet.
712  * In one case, this is actually the maximum number of samples possible to
713  * decode with the given buf_size.
714  *
715  * @param[out] coded_samples set to the number of samples as coded in the
716  * packet, or 0 if the codec does not encode the
717  * number of samples in each frame.
718  * @param[out] approx_nb_samples set to non-zero if the number of samples
719  * returned is an approximation.
720  */
722  int buf_size, int *coded_samples, int *approx_nb_samples)
723 {
724  ADPCMDecodeContext *s = avctx->priv_data;
725  int nb_samples = 0;
726  int ch = avctx->channels;
727  int has_coded_samples = 0;
728  int header_size;
729 
730  *coded_samples = 0;
731  *approx_nb_samples = 0;
732 
733  if(ch <= 0)
734  return 0;
735 
736  switch (avctx->codec->id) {
737  /* constant, only check buf_size */
739  if (buf_size < 76 * ch)
740  return 0;
741  nb_samples = 128;
742  break;
744  if (buf_size < 34 * ch)
745  return 0;
746  nb_samples = 64;
747  break;
749  if (buf_size < 17 * ch)
750  return 0;
751  nb_samples = 32;
752  break;
753  /* simple 4-bit adpcm */
766  nb_samples = buf_size * 2 / ch;
767  break;
768  }
769  if (nb_samples)
770  return nb_samples;
771 
772  /* simple 4-bit adpcm, with header */
773  header_size = 0;
774  switch (avctx->codec->id) {
779  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
780  case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
781  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
782  }
783  if (header_size > 0)
784  return (buf_size - header_size) * 2 / ch;
785 
786  /* more complex formats */
787  switch (avctx->codec->id) {
789  has_coded_samples = 1;
790  *coded_samples = bytestream2_get_le32(gb);
791  *coded_samples -= *coded_samples % 28;
792  nb_samples = (buf_size - 12) / 30 * 28;
793  break;
795  has_coded_samples = 1;
796  *coded_samples = bytestream2_get_le32(gb);
797  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
798  break;
800  nb_samples = (buf_size - ch) / ch * 2;
801  break;
805  /* maximum number of samples */
806  /* has internal offsets and a per-frame switch to signal raw 16-bit */
807  has_coded_samples = 1;
808  switch (avctx->codec->id) {
810  header_size = 4 + 9 * ch;
811  *coded_samples = bytestream2_get_le32(gb);
812  break;
814  header_size = 4 + 5 * ch;
815  *coded_samples = bytestream2_get_le32(gb);
816  break;
818  header_size = 4 + 5 * ch;
819  *coded_samples = bytestream2_get_be32(gb);
820  break;
821  }
822  *coded_samples -= *coded_samples % 28;
823  nb_samples = (buf_size - header_size) * 2 / ch;
824  nb_samples -= nb_samples % 28;
825  *approx_nb_samples = 1;
826  break;
828  if (avctx->block_align > 0)
829  buf_size = FFMIN(buf_size, avctx->block_align);
830  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
831  break;
833  if (avctx->block_align > 0)
834  buf_size = FFMIN(buf_size, avctx->block_align);
835  if (buf_size < 4 * ch)
836  return AVERROR_INVALIDDATA;
837  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
838  break;
840  if (avctx->block_align > 0)
841  buf_size = FFMIN(buf_size, avctx->block_align);
842  nb_samples = (buf_size - 4 * ch) * 2 / ch;
843  break;
845  {
846  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
847  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
848  if (avctx->block_align > 0)
849  buf_size = FFMIN(buf_size, avctx->block_align);
850  if (buf_size < 4 * ch)
851  return AVERROR_INVALIDDATA;
852  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
853  break;
854  }
856  if (avctx->block_align > 0)
857  buf_size = FFMIN(buf_size, avctx->block_align);
858  nb_samples = (buf_size - 6 * ch) * 2 / ch;
859  break;
861  if (avctx->block_align > 0)
862  buf_size = FFMIN(buf_size, avctx->block_align);
863  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
864  break;
868  {
869  int samples_per_byte;
870  switch (avctx->codec->id) {
871  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
872  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
873  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
874  }
875  if (!s->status[0].step_index) {
876  if (buf_size < ch)
877  return AVERROR_INVALIDDATA;
878  nb_samples++;
879  buf_size -= ch;
880  }
881  nb_samples += buf_size * samples_per_byte / ch;
882  break;
883  }
885  {
886  int buf_bits = buf_size * 8 - 2;
887  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
888  int block_hdr_size = 22 * ch;
889  int block_size = block_hdr_size + nbits * ch * 4095;
890  int nblocks = buf_bits / block_size;
891  int bits_left = buf_bits - nblocks * block_size;
892  nb_samples = nblocks * 4096;
893  if (bits_left >= block_hdr_size)
894  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
895  break;
896  }
899  if (avctx->extradata) {
900  nb_samples = buf_size * 14 / (8 * ch);
901  break;
902  }
903  has_coded_samples = 1;
904  bytestream2_skip(gb, 4); // channel size
905  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
906  bytestream2_get_le32(gb) :
907  bytestream2_get_be32(gb);
908  buf_size -= 8 + 36 * ch;
909  buf_size /= ch;
910  nb_samples = buf_size / 8 * 14;
911  if (buf_size % 8 > 1)
912  nb_samples += (buf_size % 8 - 1) * 2;
913  *approx_nb_samples = 1;
914  break;
916  nb_samples = buf_size / (9 * ch) * 16;
917  break;
919  nb_samples = (buf_size / 128) * 224 / ch;
920  break;
923  nb_samples = buf_size / (16 * ch) * 28;
924  break;
926  nb_samples = buf_size / ch;
927  break;
928  }
929 
930  /* validate coded sample count */
931  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
932  return AVERROR_INVALIDDATA;
933 
934  return nb_samples;
935 }
936 
937 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
938  int *got_frame_ptr, AVPacket *avpkt)
939 {
940  AVFrame *frame = data;
941  const uint8_t *buf = avpkt->data;
942  int buf_size = avpkt->size;
943  ADPCMDecodeContext *c = avctx->priv_data;
944  ADPCMChannelStatus *cs;
945  int n, m, channel, i;
946  int16_t *samples;
947  int16_t **samples_p;
948  int st; /* stereo */
949  int count1, count2;
950  int nb_samples, coded_samples, approx_nb_samples, ret;
951  GetByteContext gb;
952 
953  bytestream2_init(&gb, buf, buf_size);
954  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
955  if (nb_samples <= 0) {
956  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
957  return AVERROR_INVALIDDATA;
958  }
959 
960  /* get output buffer */
961  frame->nb_samples = nb_samples;
962  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
963  return ret;
964  samples = (int16_t *)frame->data[0];
965  samples_p = (int16_t **)frame->extended_data;
966 
967  /* use coded_samples when applicable */
968  /* it is always <= nb_samples, so the output buffer will be large enough */
969  if (coded_samples) {
970  if (!approx_nb_samples && coded_samples != nb_samples)
971  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
972  frame->nb_samples = nb_samples = coded_samples;
973  }
974 
975  st = avctx->channels == 2 ? 1 : 0;
976 
977  switch(avctx->codec->id) {
979  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
980  Channel data is interleaved per-chunk. */
981  for (channel = 0; channel < avctx->channels; channel++) {
982  int predictor;
983  int step_index;
984  cs = &(c->status[channel]);
985  /* (pppppp) (piiiiiii) */
986 
987  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
988  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
989  step_index = predictor & 0x7F;
990  predictor &= ~0x7F;
991 
992  if (cs->step_index == step_index) {
993  int diff = predictor - cs->predictor;
994  if (diff < 0)
995  diff = - diff;
996  if (diff > 0x7f)
997  goto update;
998  } else {
999  update:
1000  cs->step_index = step_index;
1001  cs->predictor = predictor;
1002  }
1003 
1004  if (cs->step_index > 88u){
1005  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1006  channel, cs->step_index);
1007  return AVERROR_INVALIDDATA;
1008  }
1009 
1010  samples = samples_p[channel];
1011 
1012  for (m = 0; m < 64; m += 2) {
1013  int byte = bytestream2_get_byteu(&gb);
1014  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1015  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1016  }
1017  }
1018  break;
1020  for(i=0; i<avctx->channels; i++){
1021  cs = &(c->status[i]);
1022  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1023 
1024  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1025  if (cs->step_index > 88u){
1026  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1027  i, cs->step_index);
1028  return AVERROR_INVALIDDATA;
1029  }
1030  }
1031 
1032  if (avctx->bits_per_coded_sample != 4) {
1033  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1034  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1036  GetBitContext g;
1037 
1038  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1039  for (i = 0; i < avctx->channels; i++) {
1040  int j;
1041 
1042  cs = &c->status[i];
1043  samples = &samples_p[i][1 + n * samples_per_block];
1044  for (j = 0; j < block_size; j++) {
1045  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1046  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1047  }
1048  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1049  if (ret < 0)
1050  return ret;
1051  for (m = 0; m < samples_per_block; m++) {
1052  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
1053  avctx->bits_per_coded_sample);
1054  }
1055  }
1056  }
1057  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1058  } else {
1059  for (n = 0; n < (nb_samples - 1) / 8; n++) {
1060  for (i = 0; i < avctx->channels; i++) {
1061  cs = &c->status[i];
1062  samples = &samples_p[i][1 + n * 8];
1063  for (m = 0; m < 8; m += 2) {
1064  int v = bytestream2_get_byteu(&gb);
1065  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1066  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1067  }
1068  }
1069  }
1070  }
1071  break;
1072  case AV_CODEC_ID_ADPCM_4XM:
1073  for (i = 0; i < avctx->channels; i++)
1074  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1075 
1076  for (i = 0; i < avctx->channels; i++) {
1077  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1078  if (c->status[i].step_index > 88u) {
1079  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1080  i, c->status[i].step_index);
1081  return AVERROR_INVALIDDATA;
1082  }
1083  }
1084 
1085  for (i = 0; i < avctx->channels; i++) {
1086  samples = (int16_t *)frame->data[i];
1087  cs = &c->status[i];
1088  for (n = nb_samples >> 1; n > 0; n--) {
1089  int v = bytestream2_get_byteu(&gb);
1090  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1091  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1092  }
1093  }
1094  break;
1095  case AV_CODEC_ID_ADPCM_AGM:
1096  for (i = 0; i < avctx->channels; i++)
1097  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1098  for (i = 0; i < avctx->channels; i++)
1099  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1100 
1101  for (n = 0; n < nb_samples >> (1 - st); n++) {
1102  int v = bytestream2_get_byteu(&gb);
1103  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1104  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1105  }
1106  break;
1107  case AV_CODEC_ID_ADPCM_MS:
1108  {
1109  int block_predictor;
1110 
1111  if (avctx->channels > 2) {
1112  for (channel = 0; channel < avctx->channels; channel++) {
1113  samples = samples_p[channel];
1114  block_predictor = bytestream2_get_byteu(&gb);
1115  if (block_predictor > 6) {
1116  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1117  channel, block_predictor);
1118  return AVERROR_INVALIDDATA;
1119  }
1120  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1121  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1122  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1123  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1124  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1125  *samples++ = c->status[channel].sample2;
1126  *samples++ = c->status[channel].sample1;
1127  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1128  int byte = bytestream2_get_byteu(&gb);
1129  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1130  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1131  }
1132  }
1133  } else {
1134  block_predictor = bytestream2_get_byteu(&gb);
1135  if (block_predictor > 6) {
1136  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1137  block_predictor);
1138  return AVERROR_INVALIDDATA;
1139  }
1140  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1141  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1142  if (st) {
1143  block_predictor = bytestream2_get_byteu(&gb);
1144  if (block_predictor > 6) {
1145  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1146  block_predictor);
1147  return AVERROR_INVALIDDATA;
1148  }
1149  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1150  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1151  }
1152  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1153  if (st){
1154  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1155  }
1156 
1157  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1158  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1159  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1160  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1161 
1162  *samples++ = c->status[0].sample2;
1163  if (st) *samples++ = c->status[1].sample2;
1164  *samples++ = c->status[0].sample1;
1165  if (st) *samples++ = c->status[1].sample1;
1166  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1167  int byte = bytestream2_get_byteu(&gb);
1168  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1169  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1170  }
1171  }
1172  break;
1173  }
1175  for (channel = 0; channel < avctx->channels; channel+=2) {
1176  bytestream2_skipu(&gb, 4);
1177  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1178  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1179  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1180  bytestream2_skipu(&gb, 2);
1181  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1182  bytestream2_skipu(&gb, 2);
1183  for (n = 0; n < nb_samples; n+=2) {
1184  int v = bytestream2_get_byteu(&gb);
1185  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1186  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1187  }
1188  for (n = 0; n < nb_samples; n+=2) {
1189  int v = bytestream2_get_byteu(&gb);
1190  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1191  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1192  }
1193  }
1194  break;
1196  for (channel = 0; channel < avctx->channels; channel++) {
1197  cs = &c->status[channel];
1198  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1199  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1200  if (cs->step_index > 88u){
1201  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1202  channel, cs->step_index);
1203  return AVERROR_INVALIDDATA;
1204  }
1205  }
1206  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1207  int v = bytestream2_get_byteu(&gb);
1208  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1209  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1210  }
1211  break;
1213  {
1214  int last_byte = 0;
1215  int nibble;
1216  int decode_top_nibble_next = 0;
1217  int diff_channel;
1218  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1219 
1220  bytestream2_skipu(&gb, 10);
1221  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1222  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1223  c->status[0].step_index = bytestream2_get_byteu(&gb);
1224  c->status[1].step_index = bytestream2_get_byteu(&gb);
1225  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1226  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1227  c->status[0].step_index, c->status[1].step_index);
1228  return AVERROR_INVALIDDATA;
1229  }
1230  /* sign extend the predictors */
1231  diff_channel = c->status[1].predictor;
1232 
1233  /* DK3 ADPCM support macro */
1234 #define DK3_GET_NEXT_NIBBLE() \
1235  if (decode_top_nibble_next) { \
1236  nibble = last_byte >> 4; \
1237  decode_top_nibble_next = 0; \
1238  } else { \
1239  last_byte = bytestream2_get_byteu(&gb); \
1240  nibble = last_byte & 0x0F; \
1241  decode_top_nibble_next = 1; \
1242  }
1243 
1244  while (samples < samples_end) {
1245 
1246  /* for this algorithm, c->status[0] is the sum channel and
1247  * c->status[1] is the diff channel */
1248 
1249  /* process the first predictor of the sum channel */
1251  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1252 
1253  /* process the diff channel predictor */
1255  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1256 
1257  /* process the first pair of stereo PCM samples */
1258  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1259  *samples++ = c->status[0].predictor + c->status[1].predictor;
1260  *samples++ = c->status[0].predictor - c->status[1].predictor;
1261 
1262  /* process the second predictor of the sum channel */
1264  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1265 
1266  /* process the second pair of stereo PCM samples */
1267  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1268  *samples++ = c->status[0].predictor + c->status[1].predictor;
1269  *samples++ = c->status[0].predictor - c->status[1].predictor;
1270  }
1271 
1272  if ((bytestream2_tell(&gb) & 1))
1273  bytestream2_skip(&gb, 1);
1274  break;
1275  }
1277  for (channel = 0; channel < avctx->channels; channel++) {
1278  cs = &c->status[channel];
1279  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1280  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1281  if (cs->step_index > 88u){
1282  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1283  channel, cs->step_index);
1284  return AVERROR_INVALIDDATA;
1285  }
1286  }
1287 
1288  for (n = nb_samples >> (1 - st); n > 0; n--) {
1289  int v1, v2;
1290  int v = bytestream2_get_byteu(&gb);
1291  /* nibbles are swapped for mono */
1292  if (st) {
1293  v1 = v >> 4;
1294  v2 = v & 0x0F;
1295  } else {
1296  v2 = v >> 4;
1297  v1 = v & 0x0F;
1298  }
1299  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1300  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1301  }
1302  break;
1304  for (channel = 0; channel < avctx->channels; channel++) {
1305  cs = &c->status[channel];
1306  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1307  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1308  if (cs->step_index > 88u){
1309  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1310  channel, cs->step_index);
1311  return AVERROR_INVALIDDATA;
1312  }
1313  }
1314 
1315  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1316  for (channel = 0; channel < avctx->channels; channel++) {
1317  samples = samples_p[channel] + 256 * subframe;
1318  for (n = 0; n < 256; n += 2) {
1319  int v = bytestream2_get_byteu(&gb);
1320  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1321  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1322  }
1323  }
1324  }
1325  break;
1327  for (channel = 0; channel < avctx->channels; channel++) {
1328  cs = &c->status[channel];
1329  samples = samples_p[channel];
1330  bytestream2_skip(&gb, 4);
1331  for (n = 0; n < nb_samples; n += 2) {
1332  int v = bytestream2_get_byteu(&gb);
1333  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1334  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1335  }
1336  }
1337  break;
1339  for (n = nb_samples >> (1 - st); n > 0; n--) {
1340  int v = bytestream2_get_byteu(&gb);
1341  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1342  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1343  }
1344  break;
1346  for (n = nb_samples >> (1 - st); n > 0; n--) {
1347  int v = bytestream2_get_byteu(&gb);
1348  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1349  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1350  }
1351  break;
1353  for (n = nb_samples / 2; n > 0; n--) {
1354  for (channel = 0; channel < avctx->channels; channel++) {
1355  int v = bytestream2_get_byteu(&gb);
1356  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1357  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1358  }
1359  samples += avctx->channels;
1360  }
1361  break;
1363  for (n = nb_samples / 2; n > 0; n--) {
1364  for (channel = 0; channel < avctx->channels; channel++) {
1365  int v = bytestream2_get_byteu(&gb);
1366  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1367  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1368  }
1369  samples += avctx->channels;
1370  }
1371  break;
1373  for (n = 0; n < nb_samples / 2; n++) {
1374  int v = bytestream2_get_byteu(&gb);
1375  *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v & 0x0F);
1376  *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v >> 4);
1377  }
1378  break;
1380  for (n = nb_samples >> (1 - st); n > 0; n--) {
1381  int v = bytestream2_get_byteu(&gb);
1382  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1383  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1384  }
1385  break;
1387  for (channel = 0; channel < avctx->channels; channel++) {
1388  cs = &c->status[channel];
1389  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1390  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1391  if (cs->step_index > 88u){
1392  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1393  channel, cs->step_index);
1394  return AVERROR_INVALIDDATA;
1395  }
1396  }
1397  for (n = 0; n < nb_samples / 2; n++) {
1398  int byte[2];
1399 
1400  byte[0] = bytestream2_get_byteu(&gb);
1401  if (st)
1402  byte[1] = bytestream2_get_byteu(&gb);
1403  for(channel = 0; channel < avctx->channels; channel++) {
1404  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1405  }
1406  for(channel = 0; channel < avctx->channels; channel++) {
1407  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1408  }
1409  }
1410  break;
1412  if (c->vqa_version == 3) {
1413  for (channel = 0; channel < avctx->channels; channel++) {
1414  int16_t *smp = samples_p[channel];
1415 
1416  for (n = nb_samples / 2; n > 0; n--) {
1417  int v = bytestream2_get_byteu(&gb);
1418  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1419  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1420  }
1421  }
1422  } else {
1423  for (n = nb_samples / 2; n > 0; n--) {
1424  for (channel = 0; channel < avctx->channels; channel++) {
1425  int v = bytestream2_get_byteu(&gb);
1426  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1427  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1428  }
1429  samples += avctx->channels;
1430  }
1431  }
1432  bytestream2_seek(&gb, 0, SEEK_END);
1433  break;
1434  case AV_CODEC_ID_ADPCM_XA:
1435  {
1436  int16_t *out0 = samples_p[0];
1437  int16_t *out1 = samples_p[1];
1438  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1439  int sample_offset = 0;
1440  int bytes_remaining;
1441  while (bytestream2_get_bytes_left(&gb) >= 128) {
1442  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1443  &c->status[0], &c->status[1],
1444  avctx->channels, sample_offset)) < 0)
1445  return ret;
1446  bytestream2_skipu(&gb, 128);
1447  sample_offset += samples_per_block;
1448  }
1449  /* Less than a full block of data left, e.g. when reading from
1450  * 2324 byte per sector XA; the remainder is padding */
1451  bytes_remaining = bytestream2_get_bytes_left(&gb);
1452  if (bytes_remaining > 0) {
1453  bytestream2_skip(&gb, bytes_remaining);
1454  }
1455  break;
1456  }
1458  for (i=0; i<=st; i++) {
1459  c->status[i].step_index = bytestream2_get_le32u(&gb);
1460  if (c->status[i].step_index > 88u) {
1461  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1462  i, c->status[i].step_index);
1463  return AVERROR_INVALIDDATA;
1464  }
1465  }
1466  for (i=0; i<=st; i++) {
1467  c->status[i].predictor = bytestream2_get_le32u(&gb);
1468  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1469  return AVERROR_INVALIDDATA;
1470  }
1471 
1472  for (n = nb_samples >> (1 - st); n > 0; n--) {
1473  int byte = bytestream2_get_byteu(&gb);
1474  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1475  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1476  }
1477  break;
1479  for (n = nb_samples >> (1 - st); n > 0; n--) {
1480  int byte = bytestream2_get_byteu(&gb);
1481  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1482  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1483  }
1484  break;
1485  case AV_CODEC_ID_ADPCM_EA:
1486  {
1487  int previous_left_sample, previous_right_sample;
1488  int current_left_sample, current_right_sample;
1489  int next_left_sample, next_right_sample;
1490  int coeff1l, coeff2l, coeff1r, coeff2r;
1491  int shift_left, shift_right;
1492 
1493  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1494  each coding 28 stereo samples. */
1495 
1496  if(avctx->channels != 2)
1497  return AVERROR_INVALIDDATA;
1498 
1499  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1500  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1501  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1502  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1503 
1504  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1505  int byte = bytestream2_get_byteu(&gb);
1506  coeff1l = ea_adpcm_table[ byte >> 4 ];
1507  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1508  coeff1r = ea_adpcm_table[ byte & 0x0F];
1509  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1510 
1511  byte = bytestream2_get_byteu(&gb);
1512  shift_left = 20 - (byte >> 4);
1513  shift_right = 20 - (byte & 0x0F);
1514 
1515  for (count2 = 0; count2 < 28; count2++) {
1516  byte = bytestream2_get_byteu(&gb);
1517  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1518  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1519 
1520  next_left_sample = (next_left_sample +
1521  (current_left_sample * coeff1l) +
1522  (previous_left_sample * coeff2l) + 0x80) >> 8;
1523  next_right_sample = (next_right_sample +
1524  (current_right_sample * coeff1r) +
1525  (previous_right_sample * coeff2r) + 0x80) >> 8;
1526 
1527  previous_left_sample = current_left_sample;
1528  current_left_sample = av_clip_int16(next_left_sample);
1529  previous_right_sample = current_right_sample;
1530  current_right_sample = av_clip_int16(next_right_sample);
1531  *samples++ = current_left_sample;
1532  *samples++ = current_right_sample;
1533  }
1534  }
1535 
1536  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1537 
1538  break;
1539  }
1541  {
1542  int coeff[2][2], shift[2];
1543 
1544  for(channel = 0; channel < avctx->channels; channel++) {
1545  int byte = bytestream2_get_byteu(&gb);
1546  for (i=0; i<2; i++)
1547  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1548  shift[channel] = 20 - (byte & 0x0F);
1549  }
1550  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1551  int byte[2];
1552 
1553  byte[0] = bytestream2_get_byteu(&gb);
1554  if (st) byte[1] = bytestream2_get_byteu(&gb);
1555  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1556  for(channel = 0; channel < avctx->channels; channel++) {
1557  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1558  sample = (sample +
1559  c->status[channel].sample1 * coeff[channel][0] +
1560  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1562  c->status[channel].sample1 = av_clip_int16(sample);
1563  *samples++ = c->status[channel].sample1;
1564  }
1565  }
1566  }
1567  bytestream2_seek(&gb, 0, SEEK_END);
1568  break;
1569  }
1572  case AV_CODEC_ID_ADPCM_EA_R3: {
1573  /* channel numbering
1574  2chan: 0=fl, 1=fr
1575  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1576  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1577  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1578  int previous_sample, current_sample, next_sample;
1579  int coeff1, coeff2;
1580  int shift;
1581  unsigned int channel;
1582  uint16_t *samplesC;
1583  int count = 0;
1584  int offsets[6];
1585 
1586  for (channel=0; channel<avctx->channels; channel++)
1587  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1588  bytestream2_get_le32(&gb)) +
1589  (avctx->channels + 1) * 4;
1590 
1591  for (channel=0; channel<avctx->channels; channel++) {
1592  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1593  samplesC = samples_p[channel];
1594 
1595  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1596  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1597  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1598  } else {
1599  current_sample = c->status[channel].predictor;
1600  previous_sample = c->status[channel].prev_sample;
1601  }
1602 
1603  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1604  int byte = bytestream2_get_byte(&gb);
1605  if (byte == 0xEE) { /* only seen in R2 and R3 */
1606  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1607  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1608 
1609  for (count2=0; count2<28; count2++)
1610  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1611  } else {
1612  coeff1 = ea_adpcm_table[ byte >> 4 ];
1613  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1614  shift = 20 - (byte & 0x0F);
1615 
1616  for (count2=0; count2<28; count2++) {
1617  if (count2 & 1)
1618  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1619  else {
1620  byte = bytestream2_get_byte(&gb);
1621  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1622  }
1623 
1624  next_sample += (current_sample * coeff1) +
1625  (previous_sample * coeff2);
1626  next_sample = av_clip_int16(next_sample >> 8);
1627 
1628  previous_sample = current_sample;
1629  current_sample = next_sample;
1630  *samplesC++ = current_sample;
1631  }
1632  }
1633  }
1634  if (!count) {
1635  count = count1;
1636  } else if (count != count1) {
1637  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1638  count = FFMAX(count, count1);
1639  }
1640 
1641  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1642  c->status[channel].predictor = current_sample;
1643  c->status[channel].prev_sample = previous_sample;
1644  }
1645  }
1646 
1647  frame->nb_samples = count * 28;
1648  bytestream2_seek(&gb, 0, SEEK_END);
1649  break;
1650  }
1652  for (channel=0; channel<avctx->channels; channel++) {
1653  int coeff[2][4], shift[4];
1654  int16_t *s = samples_p[channel];
1655  for (n = 0; n < 4; n++, s += 32) {
1656  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1657  for (i=0; i<2; i++)
1658  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1659  s[0] = val & ~0x0F;
1660 
1661  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1662  shift[n] = 20 - (val & 0x0F);
1663  s[1] = val & ~0x0F;
1664  }
1665 
1666  for (m=2; m<32; m+=2) {
1667  s = &samples_p[channel][m];
1668  for (n = 0; n < 4; n++, s += 32) {
1669  int level, pred;
1670  int byte = bytestream2_get_byteu(&gb);
1671 
1672  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1673  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1674  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1675 
1676  level = sign_extend(byte, 4) * (1 << shift[n]);
1677  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1678  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1679  }
1680  }
1681  }
1682  break;
1684  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1685  c->status[0].step_index = bytestream2_get_byteu(&gb);
1686  bytestream2_skipu(&gb, 5);
1687  if (c->status[0].step_index > 88u) {
1688  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1689  c->status[0].step_index);
1690  return AVERROR_INVALIDDATA;
1691  }
1692 
1693  for (n = nb_samples >> (1 - st); n > 0; n--) {
1694  int v = bytestream2_get_byteu(&gb);
1695 
1696  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1697  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1698  }
1699  break;
1701  for (i = 0; i < avctx->channels; i++) {
1702  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1703  c->status[i].step_index = bytestream2_get_byteu(&gb);
1704  bytestream2_skipu(&gb, 1);
1705  if (c->status[i].step_index > 88u) {
1706  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1707  c->status[i].step_index);
1708  return AVERROR_INVALIDDATA;
1709  }
1710  }
1711 
1712  for (n = nb_samples >> (1 - st); n > 0; n--) {
1713  int v = bytestream2_get_byteu(&gb);
1714 
1715  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1716  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1717  }
1718  break;
1719  case AV_CODEC_ID_ADPCM_CT:
1720  for (n = nb_samples >> (1 - st); n > 0; n--) {
1721  int v = bytestream2_get_byteu(&gb);
1722  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1723  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1724  }
1725  break;
1729  if (!c->status[0].step_index) {
1730  /* the first byte is a raw sample */
1731  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1732  if (st)
1733  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1734  c->status[0].step_index = 1;
1735  nb_samples--;
1736  }
1737  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1738  for (n = nb_samples >> (1 - st); n > 0; n--) {
1739  int byte = bytestream2_get_byteu(&gb);
1740  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1741  byte >> 4, 4, 0);
1742  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1743  byte & 0x0F, 4, 0);
1744  }
1745  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1746  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1747  int byte = bytestream2_get_byteu(&gb);
1748  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1749  byte >> 5 , 3, 0);
1750  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1751  (byte >> 2) & 0x07, 3, 0);
1752  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1753  byte & 0x03, 2, 0);
1754  }
1755  } else {
1756  for (n = nb_samples >> (2 - st); n > 0; n--) {
1757  int byte = bytestream2_get_byteu(&gb);
1758  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1759  byte >> 6 , 2, 2);
1760  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1761  (byte >> 4) & 0x03, 2, 2);
1762  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1763  (byte >> 2) & 0x03, 2, 2);
1764  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1765  byte & 0x03, 2, 2);
1766  }
1767  }
1768  break;
1769  case AV_CODEC_ID_ADPCM_SWF:
1770  adpcm_swf_decode(avctx, buf, buf_size, samples);
1771  bytestream2_seek(&gb, 0, SEEK_END);
1772  break;
1774  for (n = nb_samples >> (1 - st); n > 0; n--) {
1775  int v = bytestream2_get_byteu(&gb);
1776  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1777  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1778  }
1779  break;
1781  if (!c->has_status) {
1782  for (channel = 0; channel < avctx->channels; channel++)
1783  c->status[channel].step = 0;
1784  c->has_status = 1;
1785  }
1786  for (channel = 0; channel < avctx->channels; channel++) {
1787  samples = samples_p[channel];
1788  for (n = nb_samples >> 1; n > 0; n--) {
1789  int v = bytestream2_get_byteu(&gb);
1790  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1791  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1792  }
1793  }
1794  break;
1795  case AV_CODEC_ID_ADPCM_AFC:
1796  {
1797  int samples_per_block;
1798  int blocks;
1799 
1800  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1801  samples_per_block = avctx->extradata[0] / 16;
1802  blocks = nb_samples / avctx->extradata[0];
1803  } else {
1804  samples_per_block = nb_samples / 16;
1805  blocks = 1;
1806  }
1807 
1808  for (m = 0; m < blocks; m++) {
1809  for (channel = 0; channel < avctx->channels; channel++) {
1810  int prev1 = c->status[channel].sample1;
1811  int prev2 = c->status[channel].sample2;
1812 
1813  samples = samples_p[channel] + m * 16;
1814  /* Read in every sample for this channel. */
1815  for (i = 0; i < samples_per_block; i++) {
1816  int byte = bytestream2_get_byteu(&gb);
1817  int scale = 1 << (byte >> 4);
1818  int index = byte & 0xf;
1819  int factor1 = ff_adpcm_afc_coeffs[0][index];
1820  int factor2 = ff_adpcm_afc_coeffs[1][index];
1821 
1822  /* Decode 16 samples. */
1823  for (n = 0; n < 16; n++) {
1824  int32_t sampledat;
1825 
1826  if (n & 1) {
1827  sampledat = sign_extend(byte, 4);
1828  } else {
1829  byte = bytestream2_get_byteu(&gb);
1830  sampledat = sign_extend(byte >> 4, 4);
1831  }
1832 
1833  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1834  sampledat * scale;
1835  *samples = av_clip_int16(sampledat);
1836  prev2 = prev1;
1837  prev1 = *samples++;
1838  }
1839  }
1840 
1841  c->status[channel].sample1 = prev1;
1842  c->status[channel].sample2 = prev2;
1843  }
1844  }
1845  bytestream2_seek(&gb, 0, SEEK_END);
1846  break;
1847  }
1848  case AV_CODEC_ID_ADPCM_THP:
1850  {
1851  int table[14][16];
1852  int ch;
1853 
1854 #define THP_GET16(g) \
1855  sign_extend( \
1856  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1857  bytestream2_get_le16u(&(g)) : \
1858  bytestream2_get_be16u(&(g)), 16)
1859 
1860  if (avctx->extradata) {
1862  if (avctx->extradata_size < 32 * avctx->channels) {
1863  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1864  return AVERROR_INVALIDDATA;
1865  }
1866 
1867  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1868  for (i = 0; i < avctx->channels; i++)
1869  for (n = 0; n < 16; n++)
1870  table[i][n] = THP_GET16(tb);
1871  } else {
1872  for (i = 0; i < avctx->channels; i++)
1873  for (n = 0; n < 16; n++)
1874  table[i][n] = THP_GET16(gb);
1875 
1876  if (!c->has_status) {
1877  /* Initialize the previous sample. */
1878  for (i = 0; i < avctx->channels; i++) {
1879  c->status[i].sample1 = THP_GET16(gb);
1880  c->status[i].sample2 = THP_GET16(gb);
1881  }
1882  c->has_status = 1;
1883  } else {
1884  bytestream2_skip(&gb, avctx->channels * 4);
1885  }
1886  }
1887 
1888  for (ch = 0; ch < avctx->channels; ch++) {
1889  samples = samples_p[ch];
1890 
1891  /* Read in every sample for this channel. */
1892  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1893  int byte = bytestream2_get_byteu(&gb);
1894  int index = (byte >> 4) & 7;
1895  unsigned int exp = byte & 0x0F;
1896  int64_t factor1 = table[ch][index * 2];
1897  int64_t factor2 = table[ch][index * 2 + 1];
1898 
1899  /* Decode 14 samples. */
1900  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1901  int32_t sampledat;
1902 
1903  if (n & 1) {
1904  sampledat = sign_extend(byte, 4);
1905  } else {
1906  byte = bytestream2_get_byteu(&gb);
1907  sampledat = sign_extend(byte >> 4, 4);
1908  }
1909 
1910  sampledat = ((c->status[ch].sample1 * factor1
1911  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1912  *samples = av_clip_int16(sampledat);
1913  c->status[ch].sample2 = c->status[ch].sample1;
1914  c->status[ch].sample1 = *samples++;
1915  }
1916  }
1917  }
1918  break;
1919  }
1920  case AV_CODEC_ID_ADPCM_DTK:
1921  for (channel = 0; channel < avctx->channels; channel++) {
1922  samples = samples_p[channel];
1923 
1924  /* Read in every sample for this channel. */
1925  for (i = 0; i < nb_samples / 28; i++) {
1926  int byte, header;
1927  if (channel)
1928  bytestream2_skipu(&gb, 1);
1929  header = bytestream2_get_byteu(&gb);
1930  bytestream2_skipu(&gb, 3 - channel);
1931 
1932  /* Decode 28 samples. */
1933  for (n = 0; n < 28; n++) {
1934  int32_t sampledat, prev;
1935 
1936  switch (header >> 4) {
1937  case 1:
1938  prev = (c->status[channel].sample1 * 0x3c);
1939  break;
1940  case 2:
1941  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1942  break;
1943  case 3:
1944  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1945  break;
1946  default:
1947  prev = 0;
1948  }
1949 
1950  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1951 
1952  byte = bytestream2_get_byteu(&gb);
1953  if (!channel)
1954  sampledat = sign_extend(byte, 4);
1955  else
1956  sampledat = sign_extend(byte >> 4, 4);
1957 
1958  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1959  *samples++ = av_clip_int16(sampledat >> 6);
1961  c->status[channel].sample1 = sampledat;
1962  }
1963  }
1964  if (!channel)
1965  bytestream2_seek(&gb, 0, SEEK_SET);
1966  }
1967  break;
1968  case AV_CODEC_ID_ADPCM_PSX:
1969  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
1970  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
1971  for (channel = 0; channel < avctx->channels; channel++) {
1972  samples = samples_p[channel] + block * nb_samples_per_block;
1973 
1974  /* Read in every sample for this channel. */
1975  for (i = 0; i < nb_samples_per_block / 28; i++) {
1976  int filter, shift, flag, byte;
1977 
1978  filter = bytestream2_get_byteu(&gb);
1979  shift = filter & 0xf;
1980  filter = filter >> 4;
1981  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
1982  return AVERROR_INVALIDDATA;
1983  flag = bytestream2_get_byteu(&gb);
1984 
1985  /* Decode 28 samples. */
1986  for (n = 0; n < 28; n++) {
1987  int sample = 0, scale;
1988 
1989  if (flag < 0x07) {
1990  if (n & 1) {
1991  scale = sign_extend(byte >> 4, 4);
1992  } else {
1993  byte = bytestream2_get_byteu(&gb);
1994  scale = sign_extend(byte, 4);
1995  }
1996 
1997  scale = scale * (1 << 12);
1998  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
1999  }
2000  *samples++ = av_clip_int16(sample);
2002  c->status[channel].sample1 = sample;
2003  }
2004  }
2005  }
2006  }
2007  break;
2009  /*
2010  * The format of each block:
2011  * uint8_t left_control;
2012  * uint4_t left_samples[nb_samples];
2013  * ---- and if stereo ----
2014  * uint8_t right_control;
2015  * uint4_t right_samples[nb_samples];
2016  *
2017  * Format of the control byte:
2018  * MSB [SSSSRDRR] LSB
2019  * S = (Shift Amount - 2)
2020  * D = Decoder flag.
2021  * R = Reserved
2022  *
2023  * Each block relies on the previous two samples of each channel.
2024  * They should be 0 initially.
2025  */
2026  for (channel = 0; channel < avctx->channels; channel++) {
2027  int control, shift;
2028 
2029  samples = samples_p[channel];
2030  cs = c->status + channel;
2031 
2032  /* Get the control byte and decode the samples, 2 at a time. */
2033  control = bytestream2_get_byteu(&gb);
2034  shift = (control >> 4) + 2;
2035 
2036  for (n = 0; n < nb_samples / 2; n++) {
2037  int sample = bytestream2_get_byteu(&gb);
2038  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2039  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2040  }
2041  }
2042  break;
2044  if (!c->has_status) {
2045  for (channel = 0; channel < avctx->channels; channel++) {
2046  c->status[channel].predictor = 0;
2047  c->status[channel].step_index = 0;
2048  }
2049  c->has_status = 1;
2050  }
2051  for (n = 0; n < nb_samples * avctx->channels; n++) {
2052  int v = bytestream2_get_byteu(&gb);
2053  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2054  }
2055  break;
2057  for (n = nb_samples / 2; n > 0; n--) {
2058  for (channel = 0; channel < avctx->channels; channel++) {
2059  int v = bytestream2_get_byteu(&gb);
2060  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2061  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2062  }
2063  samples += avctx->channels;
2064  }
2065  break;
2066  default:
2067  av_assert0(0); // unsupported codec_id should not happen
2068  }
2069 
2070  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2071  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2072  return AVERROR_INVALIDDATA;
2073  }
2074 
2075  *got_frame_ptr = 1;
2076 
2077  if (avpkt->size < bytestream2_tell(&gb)) {
2078  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2079  return avpkt->size;
2080  }
2081 
2082  return bytestream2_tell(&gb);
2083 }
2084 
2085 static void adpcm_flush(AVCodecContext *avctx)
2086 {
2087  ADPCMDecodeContext *c = avctx->priv_data;
2088  c->has_status = 0;
2089 }
2090 
2091 
2099 
2100 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2101 AVCodec ff_ ## name_ ## _decoder = { \
2102  .name = #name_, \
2103  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2104  .type = AVMEDIA_TYPE_AUDIO, \
2105  .id = id_, \
2106  .priv_data_size = sizeof(ADPCMDecodeContext), \
2107  .init = adpcm_decode_init, \
2108  .decode = adpcm_decode_frame, \
2109  .flush = adpcm_flush, \
2110  .capabilities = AV_CODEC_CAP_DR1, \
2111  .sample_fmts = sample_fmts_, \
2112 }
2113 
2114 /* Note: Do not forget to add new entries to the Makefile as well. */
2115 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
2116 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
2117 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
2118 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
2119 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
2120 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
2121 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
2122 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
2123 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
2124 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
2125 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
2126 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
2127 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
2128 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
2129 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
2130 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
2131 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
2132 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
2133 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
2134 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
2135 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
2136 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
2137 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
2138 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
2139 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
2140 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
2141 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
2142 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
2143 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
2144 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
2145 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
2146 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
2147 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
2148 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
2149 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
2150 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
2151 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
2152 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
2153 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
2154 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
2155 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
2156 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
2157 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
2158 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
2159 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork");
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:535
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:73
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:504
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define THP_GET16(g)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:115
#define avpriv_request_sample(...)
int size
Definition: packet.h:364
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2092
#define sample
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:1223
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:409
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:106
float delta
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2085
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:636
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
ADPCM tables.
uint8_t * data
Definition: packet.h:363
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static const int8_t mtf_index_table[16]
Definition: adpcm.c:93
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:721
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:513
bitstream reader API header.
ptrdiff_t size
Definition: opengl_enc.c:100
static const uint8_t header[24]
Definition: sdr2.c:67
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1750
channels
Definition: aptx.h:33
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
enum AVCodecID id
Definition: codec.h:204
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:340
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:65
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
ADPCM encoder/decoder common header.
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:693
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
GLsizei count
Definition: opengl_enc.c:108
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:428
#define FFMAX(a, b)
Definition: common.h:94
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:360
int8_t exp
Definition: eval.c:72
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:301
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:82
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:548
#define FFMIN(a, b)
Definition: common.h:96
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
int vqa_version
VQA version.
Definition: adpcm.c:102
int32_t
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2094
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define abs(x)
Definition: cuda_runtime.h:35
main external API structure.
Definition: avcodec.h:526
const int16_t ff_adpcm_ima_cunning_step_table[61]
Definition: adpcm_data.c:185
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:490
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1872
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:275
int extradata_size
Definition: avcodec.h:628
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:324
int index
Definition: gxfenc.c:89
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:231
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:383
ADPCMChannelStatus status[14]
Definition: adpcm.c:101
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:470
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
const int8_t ff_adpcm_ima_cunning_index_table[9]
Definition: adpcm_data.c:181
uint8_t level
Definition: svq3.c:205
int
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:937
signed 16 bits
Definition: samplefmt.h:61
#define flag(name)
Definition: cbs_av1.c:552
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
unsigned bps
Definition: movenc.c:1533
static const int8_t zork_index_table[8]
Definition: adpcm.c:89
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
void * priv_data
Definition: avcodec.h:553
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:663
int channels
number of audio channels
Definition: avcodec.h:1187
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:449
static float add(float src0, float src1)
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2096
Filter the word “frame” indicates either a video frame or a group of audio samples
int16_t step_index
Definition: adpcm.h:35
signed 16 bits, planar
Definition: samplefmt.h:67
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define tb
Definition: regdef.h:68
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:2100