FFmpeg
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 /* These are for CD-ROM XA ADPCM */
65 static const int8_t xa_adpcm_table[5][2] = {
66  { 0, 0 },
67  { 60, 0 },
68  { 115, -52 },
69  { 98, -55 },
70  { 122, -60 }
71 };
72 
73 static const int16_t ea_adpcm_table[] = {
74  0, 240, 460, 392,
75  0, 0, -208, -220,
76  0, 1, 3, 4,
77  7, 8, 10, 11,
78  0, -1, -3, -4
79 };
80 
81 // padded to zero where table size is less then 16
82 static const int8_t swf_index_tables[4][16] = {
83  /*2*/ { -1, 2 },
84  /*3*/ { -1, -1, 2, 4 },
85  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
86  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
87 };
88 
89 static const int8_t zork_index_table[8] = {
90  -1, -1, -1, 1, 4, 7, 10, 12,
91 };
92 
93 static const int8_t mtf_index_table[16] = {
94  8, 6, 4, 2, -1, -1, -1, -1,
95  -1, -1, -1, -1, 2, 4, 6, 8,
96 };
97 
98 /* end of tables */
99 
100 typedef struct ADPCMDecodeContext {
102  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
103  int has_status; /**< Status flag. Reset to 0 after a flush. */
105 
106 static void adpcm_flush(AVCodecContext *avctx);
107 
109 {
110  ADPCMDecodeContext *c = avctx->priv_data;
111  unsigned int min_channels = 1;
112  unsigned int max_channels = 2;
113 
114  switch(avctx->codec->id) {
116  max_channels = 1;
117  break;
120  min_channels = 2;
121  break;
128  max_channels = 6;
129  break;
131  min_channels = 2;
132  max_channels = 8;
133  if (avctx->channels & 1) {
134  avpriv_request_sample(avctx, "channel count %d", avctx->channels);
135  return AVERROR_PATCHWELCOME;
136  }
137  break;
139  max_channels = 8;
140  if (avctx->channels <= 0 || avctx->block_align % (16 * avctx->channels))
141  return AVERROR_INVALIDDATA;
142  break;
146  max_channels = 14;
147  break;
148  }
149  if (avctx->channels < min_channels || avctx->channels > max_channels) {
150  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
151  return AVERROR(EINVAL);
152  }
153 
154  switch(avctx->codec->id) {
156  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
157  return AVERROR_INVALIDDATA;
158  break;
160  if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
161  return AVERROR_INVALIDDATA;
162  break;
164  if (avctx->bits_per_coded_sample != 8)
165  return AVERROR_INVALIDDATA;
166  break;
167  default:
168  break;
169  }
170 
171  switch (avctx->codec->id) {
192  break;
194  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
196  break;
198  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
200  break;
201  default:
202  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
203  }
204 
205  adpcm_flush(avctx);
206  return 0;
207 }
208 
209 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
210 {
211  int delta, pred, step, add;
212 
213  pred = c->predictor;
214  delta = nibble & 7;
215  step = c->step;
216  add = (delta * 2 + 1) * step;
217  if (add < 0)
218  add = add + 7;
219 
220  if ((nibble & 8) == 0)
221  pred = av_clip(pred + (add >> 3), -32767, 32767);
222  else
223  pred = av_clip(pred - (add >> 3), -32767, 32767);
224 
225  switch (delta) {
226  case 7:
227  step *= 0x99;
228  break;
229  case 6:
230  c->step = av_clip(c->step * 2, 127, 24576);
231  c->predictor = pred;
232  return pred;
233  case 5:
234  step *= 0x66;
235  break;
236  case 4:
237  step *= 0x4d;
238  break;
239  default:
240  step *= 0x39;
241  break;
242  }
243 
244  if (step < 0)
245  step += 0x3f;
246 
247  c->step = step >> 6;
248  c->step = av_clip(c->step, 127, 24576);
249  c->predictor = pred;
250  return pred;
251 }
252 
253 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
254 {
255  int step_index;
256  int predictor;
257  int sign, delta, diff, step;
258 
259  step = ff_adpcm_step_table[c->step_index];
260  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
261  step_index = av_clip(step_index, 0, 88);
262 
263  sign = nibble & 8;
264  delta = nibble & 7;
265  /* perform direct multiplication instead of series of jumps proposed by
266  * the reference ADPCM implementation since modern CPUs can do the mults
267  * quickly enough */
268  diff = ((2 * delta + 1) * step) >> shift;
269  predictor = c->predictor;
270  if (sign) predictor -= diff;
271  else predictor += diff;
272 
273  c->predictor = av_clip_int16(predictor);
274  c->step_index = step_index;
275 
276  return (int16_t)c->predictor;
277 }
278 
279 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
280 {
281  int step_index;
282  int predictor;
283  int sign, delta, diff, step;
284 
285  step = ff_adpcm_step_table[c->step_index];
286  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
287  step_index = av_clip(step_index, 0, 88);
288 
289  sign = nibble & 8;
290  delta = nibble & 7;
291  diff = (delta * step) >> shift;
292  predictor = c->predictor;
293  if (sign) predictor -= diff;
294  else predictor += diff;
295 
296  c->predictor = av_clip_int16(predictor);
297  c->step_index = step_index;
298 
299  return (int16_t)c->predictor;
300 }
301 
302 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
303 {
304  int step_index, step, delta, predictor;
305 
306  step = ff_adpcm_step_table[c->step_index];
307 
308  delta = step * (2 * nibble - 15);
309  predictor = c->predictor + delta;
310 
311  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
312  c->predictor = av_clip_int16(predictor >> 4);
313  c->step_index = av_clip(step_index, 0, 88);
314 
315  return (int16_t)c->predictor;
316 }
317 
318 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
319 {
320  int step_index;
321  int predictor;
322  int step;
323 
324  nibble = sign_extend(nibble & 0xF, 4);
325 
326  step = ff_adpcm_ima_cunning_step_table[c->step_index];
327  step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
328  step_index = av_clip(step_index, 0, 60);
329 
330  predictor = c->predictor + step * nibble;
331 
332  c->predictor = av_clip_int16(predictor);
333  c->step_index = step_index;
334 
335  return c->predictor;
336 }
337 
339 {
340  int nibble, step_index, predictor, sign, delta, diff, step, shift;
341 
342  shift = bps - 1;
343  nibble = get_bits_le(gb, bps),
344  step = ff_adpcm_step_table[c->step_index];
345  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
346  step_index = av_clip(step_index, 0, 88);
347 
348  sign = nibble & (1 << shift);
349  delta = av_mod_uintp2(nibble, shift);
350  diff = ((2 * delta + 1) * step) >> shift;
351  predictor = c->predictor;
352  if (sign) predictor -= diff;
353  else predictor += diff;
354 
355  c->predictor = av_clip_int16(predictor);
356  c->step_index = step_index;
357 
358  return (int16_t)c->predictor;
359 }
360 
361 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
362 {
363  int step_index;
364  int predictor;
365  int diff, step;
366 
367  step = ff_adpcm_step_table[c->step_index];
368  step_index = c->step_index + ff_adpcm_index_table[nibble];
369  step_index = av_clip(step_index, 0, 88);
370 
371  diff = step >> 3;
372  if (nibble & 4) diff += step;
373  if (nibble & 2) diff += step >> 1;
374  if (nibble & 1) diff += step >> 2;
375 
376  if (nibble & 8)
377  predictor = c->predictor - diff;
378  else
379  predictor = c->predictor + diff;
380 
381  c->predictor = av_clip_int16(predictor);
382  c->step_index = step_index;
383 
384  return c->predictor;
385 }
386 
387 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
388 {
389  int predictor;
390 
391  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
392  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
393 
394  c->sample2 = c->sample1;
395  c->sample1 = av_clip_int16(predictor);
396  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
397  if (c->idelta < 16) c->idelta = 16;
398  if (c->idelta > INT_MAX/768) {
399  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
400  c->idelta = INT_MAX/768;
401  }
402 
403  return c->sample1;
404 }
405 
406 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
407 {
408  int step_index, predictor, sign, delta, diff, step;
409 
410  step = ff_adpcm_oki_step_table[c->step_index];
411  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
412  step_index = av_clip(step_index, 0, 48);
413 
414  sign = nibble & 8;
415  delta = nibble & 7;
416  diff = ((2 * delta + 1) * step) >> 3;
417  predictor = c->predictor;
418  if (sign) predictor -= diff;
419  else predictor += diff;
420 
421  c->predictor = av_clip_intp2(predictor, 11);
422  c->step_index = step_index;
423 
424  return c->predictor * 16;
425 }
426 
427 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
428 {
429  int sign, delta, diff;
430  int new_step;
431 
432  sign = nibble & 8;
433  delta = nibble & 7;
434  /* perform direct multiplication instead of series of jumps proposed by
435  * the reference ADPCM implementation since modern CPUs can do the mults
436  * quickly enough */
437  diff = ((2 * delta + 1) * c->step) >> 3;
438  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
439  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
440  c->predictor = av_clip_int16(c->predictor);
441  /* calculate new step and clamp it to range 511..32767 */
442  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
443  c->step = av_clip(new_step, 511, 32767);
444 
445  return (int16_t)c->predictor;
446 }
447 
448 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
449 {
450  int sign, delta, diff;
451 
452  sign = nibble & (1<<(size-1));
453  delta = nibble & ((1<<(size-1))-1);
454  diff = delta << (7 + c->step + shift);
455 
456  /* clamp result */
457  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
458 
459  /* calculate new step */
460  if (delta >= (2*size - 3) && c->step < 3)
461  c->step++;
462  else if (delta == 0 && c->step > 0)
463  c->step--;
464 
465  return (int16_t) c->predictor;
466 }
467 
469 {
470  if(!c->step) {
471  c->predictor = 0;
472  c->step = 127;
473  }
474 
475  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
476  c->predictor = av_clip_int16(c->predictor);
477  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
478  c->step = av_clip(c->step, 127, 24576);
479  return c->predictor;
480 }
481 
482 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
483 {
484  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
485  c->predictor = av_clip_int16(c->predictor);
486  c->step += ff_adpcm_index_table[nibble];
487  c->step = av_clip_uintp2(c->step, 5);
488  return c->predictor;
489 }
490 
491 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
492 {
493  int16_t index = c->step_index;
494  uint32_t lookup_sample = ff_adpcm_step_table[index];
495  int32_t sample = 0;
496 
497  if (nibble & 0x40)
498  sample += lookup_sample;
499  if (nibble & 0x20)
500  sample += lookup_sample >> 1;
501  if (nibble & 0x10)
502  sample += lookup_sample >> 2;
503  if (nibble & 0x08)
504  sample += lookup_sample >> 3;
505  if (nibble & 0x04)
506  sample += lookup_sample >> 4;
507  if (nibble & 0x02)
508  sample += lookup_sample >> 5;
509  if (nibble & 0x01)
510  sample += lookup_sample >> 6;
511  if (nibble & 0x80)
512  sample = -sample;
513 
514  sample += c->predictor;
516 
517  index += zork_index_table[(nibble >> 4) & 7];
518  index = av_clip(index, 0, 88);
519 
520  c->predictor = sample;
521  c->step_index = index;
522 
523  return sample;
524 }
525 
526 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
528  ADPCMChannelStatus *right, int channels, int sample_offset)
529 {
530  int i, j;
531  int shift,filter,f0,f1;
532  int s_1,s_2;
533  int d,s,t;
534 
535  out0 += sample_offset;
536  if (channels == 1)
537  out1 = out0 + 28;
538  else
539  out1 += sample_offset;
540 
541  for(i=0;i<4;i++) {
542  shift = 12 - (in[4+i*2] & 15);
543  filter = in[4+i*2] >> 4;
545  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
546  filter=0;
547  }
548  if (shift < 0) {
549  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
550  shift = 0;
551  }
552  f0 = xa_adpcm_table[filter][0];
553  f1 = xa_adpcm_table[filter][1];
554 
555  s_1 = left->sample1;
556  s_2 = left->sample2;
557 
558  for(j=0;j<28;j++) {
559  d = in[16+i+j*4];
560 
561  t = sign_extend(d, 4);
562  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
563  s_2 = s_1;
564  s_1 = av_clip_int16(s);
565  out0[j] = s_1;
566  }
567 
568  if (channels == 2) {
569  left->sample1 = s_1;
570  left->sample2 = s_2;
571  s_1 = right->sample1;
572  s_2 = right->sample2;
573  }
574 
575  shift = 12 - (in[5+i*2] & 15);
576  filter = in[5+i*2] >> 4;
577  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
578  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
579  filter=0;
580  }
581  if (shift < 0) {
582  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
583  shift = 0;
584  }
585 
586  f0 = xa_adpcm_table[filter][0];
587  f1 = xa_adpcm_table[filter][1];
588 
589  for(j=0;j<28;j++) {
590  d = in[16+i+j*4];
591 
592  t = sign_extend(d >> 4, 4);
593  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
594  s_2 = s_1;
595  s_1 = av_clip_int16(s);
596  out1[j] = s_1;
597  }
598 
599  if (channels == 2) {
600  right->sample1 = s_1;
601  right->sample2 = s_2;
602  } else {
603  left->sample1 = s_1;
604  left->sample2 = s_2;
605  }
606 
607  out0 += 28 * (3 - channels);
608  out1 += 28 * (3 - channels);
609  }
610 
611  return 0;
612 }
613 
614 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
615 {
616  ADPCMDecodeContext *c = avctx->priv_data;
617  GetBitContext gb;
618  const int8_t *table;
619  int k0, signmask, nb_bits, count;
620  int size = buf_size*8;
621  int i;
622 
623  init_get_bits(&gb, buf, size);
624 
625  //read bits & initial values
626  nb_bits = get_bits(&gb, 2)+2;
627  table = swf_index_tables[nb_bits-2];
628  k0 = 1 << (nb_bits-2);
629  signmask = 1 << (nb_bits-1);
630 
631  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
632  for (i = 0; i < avctx->channels; i++) {
633  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
634  c->status[i].step_index = get_bits(&gb, 6);
635  }
636 
637  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
638  int i;
639 
640  for (i = 0; i < avctx->channels; i++) {
641  // similar to IMA adpcm
642  int delta = get_bits(&gb, nb_bits);
643  int step = ff_adpcm_step_table[c->status[i].step_index];
644  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
645  int k = k0;
646 
647  do {
648  if (delta & k)
649  vpdiff += step;
650  step >>= 1;
651  k >>= 1;
652  } while(k);
653  vpdiff += step;
654 
655  if (delta & signmask)
656  c->status[i].predictor -= vpdiff;
657  else
658  c->status[i].predictor += vpdiff;
659 
660  c->status[i].step_index += table[delta & (~signmask)];
661 
662  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
663  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
664 
665  *samples++ = c->status[i].predictor;
666  }
667  }
668  }
669 }
670 
671 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
672 {
673  int sample = sign_extend(nibble, 4) * (1 << shift);
674 
675  if (flag)
676  sample += (8 * cs->sample1) - (4 * cs->sample2);
677  else
678  sample += 4 * cs->sample1;
679 
680  sample = av_clip_int16(sample >> 2);
681 
682  cs->sample2 = cs->sample1;
683  cs->sample1 = sample;
684 
685  return sample;
686 }
687 
688 /**
689  * Get the number of samples (per channel) that will be decoded from the packet.
690  * In one case, this is actually the maximum number of samples possible to
691  * decode with the given buf_size.
692  *
693  * @param[out] coded_samples set to the number of samples as coded in the
694  * packet, or 0 if the codec does not encode the
695  * number of samples in each frame.
696  * @param[out] approx_nb_samples set to non-zero if the number of samples
697  * returned is an approximation.
698  */
700  int buf_size, int *coded_samples, int *approx_nb_samples)
701 {
702  ADPCMDecodeContext *s = avctx->priv_data;
703  int nb_samples = 0;
704  int ch = avctx->channels;
705  int has_coded_samples = 0;
706  int header_size;
707 
708  *coded_samples = 0;
709  *approx_nb_samples = 0;
710 
711  if(ch <= 0)
712  return 0;
713 
714  switch (avctx->codec->id) {
715  /* constant, only check buf_size */
717  if (buf_size < 76 * ch)
718  return 0;
719  nb_samples = 128;
720  break;
722  if (buf_size < 34 * ch)
723  return 0;
724  nb_samples = 64;
725  break;
726  /* simple 4-bit adpcm */
739  nb_samples = buf_size * 2 / ch;
740  break;
741  }
742  if (nb_samples)
743  return nb_samples;
744 
745  /* simple 4-bit adpcm, with header */
746  header_size = 0;
747  switch (avctx->codec->id) {
752  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
753  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
754  }
755  if (header_size > 0)
756  return (buf_size - header_size) * 2 / ch;
757 
758  /* more complex formats */
759  switch (avctx->codec->id) {
761  bytestream2_skip(gb, 4);
762  has_coded_samples = 1;
763  *coded_samples = bytestream2_get_le32u(gb);
764  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
765  bytestream2_seek(gb, -8, SEEK_CUR);
766  break;
768  has_coded_samples = 1;
769  *coded_samples = bytestream2_get_le32(gb);
770  *coded_samples -= *coded_samples % 28;
771  nb_samples = (buf_size - 12) / 30 * 28;
772  break;
774  has_coded_samples = 1;
775  *coded_samples = bytestream2_get_le32(gb);
776  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
777  break;
779  nb_samples = (buf_size - ch) / ch * 2;
780  break;
784  /* maximum number of samples */
785  /* has internal offsets and a per-frame switch to signal raw 16-bit */
786  has_coded_samples = 1;
787  switch (avctx->codec->id) {
789  header_size = 4 + 9 * ch;
790  *coded_samples = bytestream2_get_le32(gb);
791  break;
793  header_size = 4 + 5 * ch;
794  *coded_samples = bytestream2_get_le32(gb);
795  break;
797  header_size = 4 + 5 * ch;
798  *coded_samples = bytestream2_get_be32(gb);
799  break;
800  }
801  *coded_samples -= *coded_samples % 28;
802  nb_samples = (buf_size - header_size) * 2 / ch;
803  nb_samples -= nb_samples % 28;
804  *approx_nb_samples = 1;
805  break;
807  if (avctx->block_align > 0)
808  buf_size = FFMIN(buf_size, avctx->block_align);
809  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
810  break;
812  if (avctx->block_align > 0)
813  buf_size = FFMIN(buf_size, avctx->block_align);
814  if (buf_size < 4 * ch)
815  return AVERROR_INVALIDDATA;
816  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
817  break;
819  if (avctx->block_align > 0)
820  buf_size = FFMIN(buf_size, avctx->block_align);
821  nb_samples = (buf_size - 4 * ch) * 2 / ch;
822  break;
824  {
825  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
826  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
827  if (avctx->block_align > 0)
828  buf_size = FFMIN(buf_size, avctx->block_align);
829  if (buf_size < 4 * ch)
830  return AVERROR_INVALIDDATA;
831  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
832  break;
833  }
835  if (avctx->block_align > 0)
836  buf_size = FFMIN(buf_size, avctx->block_align);
837  nb_samples = (buf_size - 6 * ch) * 2 / ch;
838  break;
840  if (avctx->block_align > 0)
841  buf_size = FFMIN(buf_size, avctx->block_align);
842  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
843  break;
847  {
848  int samples_per_byte;
849  switch (avctx->codec->id) {
850  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
851  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
852  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
853  }
854  if (!s->status[0].step_index) {
855  if (buf_size < ch)
856  return AVERROR_INVALIDDATA;
857  nb_samples++;
858  buf_size -= ch;
859  }
860  nb_samples += buf_size * samples_per_byte / ch;
861  break;
862  }
864  {
865  int buf_bits = buf_size * 8 - 2;
866  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
867  int block_hdr_size = 22 * ch;
868  int block_size = block_hdr_size + nbits * ch * 4095;
869  int nblocks = buf_bits / block_size;
870  int bits_left = buf_bits - nblocks * block_size;
871  nb_samples = nblocks * 4096;
872  if (bits_left >= block_hdr_size)
873  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
874  break;
875  }
878  if (avctx->extradata) {
879  nb_samples = buf_size * 14 / (8 * ch);
880  break;
881  }
882  has_coded_samples = 1;
883  bytestream2_skip(gb, 4); // channel size
884  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
885  bytestream2_get_le32(gb) :
886  bytestream2_get_be32(gb);
887  buf_size -= 8 + 36 * ch;
888  buf_size /= ch;
889  nb_samples = buf_size / 8 * 14;
890  if (buf_size % 8 > 1)
891  nb_samples += (buf_size % 8 - 1) * 2;
892  *approx_nb_samples = 1;
893  break;
895  nb_samples = buf_size / (9 * ch) * 16;
896  break;
898  nb_samples = (buf_size / 128) * 224 / ch;
899  break;
902  nb_samples = buf_size / (16 * ch) * 28;
903  break;
905  nb_samples = buf_size / avctx->block_align * 32;
906  break;
908  nb_samples = buf_size / ch;
909  break;
910  }
911 
912  /* validate coded sample count */
913  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
914  return AVERROR_INVALIDDATA;
915 
916  return nb_samples;
917 }
918 
919 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
920  int *got_frame_ptr, AVPacket *avpkt)
921 {
922  AVFrame *frame = data;
923  const uint8_t *buf = avpkt->data;
924  int buf_size = avpkt->size;
925  ADPCMDecodeContext *c = avctx->priv_data;
926  ADPCMChannelStatus *cs;
927  int n, m, channel, i;
928  int16_t *samples;
929  int16_t **samples_p;
930  int st; /* stereo */
931  int count1, count2;
932  int nb_samples, coded_samples, approx_nb_samples, ret;
933  GetByteContext gb;
934 
935  bytestream2_init(&gb, buf, buf_size);
936  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
937  if (nb_samples <= 0) {
938  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
939  return AVERROR_INVALIDDATA;
940  }
941 
942  /* get output buffer */
943  frame->nb_samples = nb_samples;
944  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
945  return ret;
946  samples = (int16_t *)frame->data[0];
947  samples_p = (int16_t **)frame->extended_data;
948 
949  /* use coded_samples when applicable */
950  /* it is always <= nb_samples, so the output buffer will be large enough */
951  if (coded_samples) {
952  if (!approx_nb_samples && coded_samples != nb_samples)
953  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
954  frame->nb_samples = nb_samples = coded_samples;
955  }
956 
957  st = avctx->channels == 2 ? 1 : 0;
958 
959  switch(avctx->codec->id) {
961  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
962  Channel data is interleaved per-chunk. */
963  for (channel = 0; channel < avctx->channels; channel++) {
964  int predictor;
965  int step_index;
966  cs = &(c->status[channel]);
967  /* (pppppp) (piiiiiii) */
968 
969  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
970  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
971  step_index = predictor & 0x7F;
972  predictor &= ~0x7F;
973 
974  if (cs->step_index == step_index) {
975  int diff = predictor - cs->predictor;
976  if (diff < 0)
977  diff = - diff;
978  if (diff > 0x7f)
979  goto update;
980  } else {
981  update:
982  cs->step_index = step_index;
983  cs->predictor = predictor;
984  }
985 
986  if (cs->step_index > 88u){
987  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
988  channel, cs->step_index);
989  return AVERROR_INVALIDDATA;
990  }
991 
992  samples = samples_p[channel];
993 
994  for (m = 0; m < 64; m += 2) {
995  int byte = bytestream2_get_byteu(&gb);
996  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
997  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
998  }
999  }
1000  break;
1002  for(i=0; i<avctx->channels; i++){
1003  cs = &(c->status[i]);
1004  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1005 
1006  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1007  if (cs->step_index > 88u){
1008  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1009  i, cs->step_index);
1010  return AVERROR_INVALIDDATA;
1011  }
1012  }
1013 
1014  if (avctx->bits_per_coded_sample != 4) {
1015  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1016  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1018  GetBitContext g;
1019 
1020  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1021  for (i = 0; i < avctx->channels; i++) {
1022  int j;
1023 
1024  cs = &c->status[i];
1025  samples = &samples_p[i][1 + n * samples_per_block];
1026  for (j = 0; j < block_size; j++) {
1027  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1028  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1029  }
1030  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1031  if (ret < 0)
1032  return ret;
1033  for (m = 0; m < samples_per_block; m++) {
1035  avctx->bits_per_coded_sample);
1036  }
1037  }
1038  }
1039  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1040  } else {
1041  for (n = 0; n < (nb_samples - 1) / 8; n++) {
1042  for (i = 0; i < avctx->channels; i++) {
1043  cs = &c->status[i];
1044  samples = &samples_p[i][1 + n * 8];
1045  for (m = 0; m < 8; m += 2) {
1046  int v = bytestream2_get_byteu(&gb);
1047  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1048  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1049  }
1050  }
1051  }
1052  }
1053  break;
1054  case AV_CODEC_ID_ADPCM_4XM:
1055  for (i = 0; i < avctx->channels; i++)
1056  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1057 
1058  for (i = 0; i < avctx->channels; i++) {
1059  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1060  if (c->status[i].step_index > 88u) {
1061  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1062  i, c->status[i].step_index);
1063  return AVERROR_INVALIDDATA;
1064  }
1065  }
1066 
1067  for (i = 0; i < avctx->channels; i++) {
1068  samples = (int16_t *)frame->data[i];
1069  cs = &c->status[i];
1070  for (n = nb_samples >> 1; n > 0; n--) {
1071  int v = bytestream2_get_byteu(&gb);
1072  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1073  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1074  }
1075  }
1076  break;
1077  case AV_CODEC_ID_ADPCM_AGM:
1078  for (i = 0; i < avctx->channels; i++)
1079  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1080  for (i = 0; i < avctx->channels; i++)
1081  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1082 
1083  for (n = 0; n < nb_samples >> (1 - st); n++) {
1084  int v = bytestream2_get_byteu(&gb);
1085  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1086  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1087  }
1088  break;
1089  case AV_CODEC_ID_ADPCM_MS:
1090  {
1091  int block_predictor;
1092 
1093  if (avctx->channels > 2) {
1094  for (channel = 0; channel < avctx->channels; channel++) {
1095  samples = samples_p[channel];
1096  block_predictor = bytestream2_get_byteu(&gb);
1097  if (block_predictor > 6) {
1098  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1099  channel, block_predictor);
1100  return AVERROR_INVALIDDATA;
1101  }
1102  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1103  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1104  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1105  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1106  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1107  *samples++ = c->status[channel].sample2;
1108  *samples++ = c->status[channel].sample1;
1109  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1110  int byte = bytestream2_get_byteu(&gb);
1111  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1112  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1113  }
1114  }
1115  } else {
1116  block_predictor = bytestream2_get_byteu(&gb);
1117  if (block_predictor > 6) {
1118  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1119  block_predictor);
1120  return AVERROR_INVALIDDATA;
1121  }
1122  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1123  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1124  if (st) {
1125  block_predictor = bytestream2_get_byteu(&gb);
1126  if (block_predictor > 6) {
1127  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1128  block_predictor);
1129  return AVERROR_INVALIDDATA;
1130  }
1131  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1132  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1133  }
1134  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1135  if (st){
1136  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1137  }
1138 
1139  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1140  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1141  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1142  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1143 
1144  *samples++ = c->status[0].sample2;
1145  if (st) *samples++ = c->status[1].sample2;
1146  *samples++ = c->status[0].sample1;
1147  if (st) *samples++ = c->status[1].sample1;
1148  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1149  int byte = bytestream2_get_byteu(&gb);
1150  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1151  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1152  }
1153  }
1154  break;
1155  }
1157  for (channel = 0; channel < avctx->channels; channel+=2) {
1158  bytestream2_skipu(&gb, 4);
1159  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1160  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1161  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1162  bytestream2_skipu(&gb, 2);
1163  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1164  bytestream2_skipu(&gb, 2);
1165  for (n = 0; n < nb_samples; n+=2) {
1166  int v = bytestream2_get_byteu(&gb);
1167  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1168  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1169  }
1170  for (n = 0; n < nb_samples; n+=2) {
1171  int v = bytestream2_get_byteu(&gb);
1172  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1173  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1174  }
1175  }
1176  break;
1178  for (channel = 0; channel < avctx->channels; channel++) {
1179  cs = &c->status[channel];
1180  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1181  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1182  if (cs->step_index > 88u){
1183  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1184  channel, cs->step_index);
1185  return AVERROR_INVALIDDATA;
1186  }
1187  }
1188  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1189  int v = bytestream2_get_byteu(&gb);
1190  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1191  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1192  }
1193  break;
1195  {
1196  int last_byte = 0;
1197  int nibble;
1198  int decode_top_nibble_next = 0;
1199  int diff_channel;
1200  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1201 
1202  bytestream2_skipu(&gb, 10);
1203  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1204  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1205  c->status[0].step_index = bytestream2_get_byteu(&gb);
1206  c->status[1].step_index = bytestream2_get_byteu(&gb);
1207  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1208  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1209  c->status[0].step_index, c->status[1].step_index);
1210  return AVERROR_INVALIDDATA;
1211  }
1212  /* sign extend the predictors */
1213  diff_channel = c->status[1].predictor;
1214 
1215  /* DK3 ADPCM support macro */
1216 #define DK3_GET_NEXT_NIBBLE() \
1217  if (decode_top_nibble_next) { \
1218  nibble = last_byte >> 4; \
1219  decode_top_nibble_next = 0; \
1220  } else { \
1221  last_byte = bytestream2_get_byteu(&gb); \
1222  nibble = last_byte & 0x0F; \
1223  decode_top_nibble_next = 1; \
1224  }
1225 
1226  while (samples < samples_end) {
1227 
1228  /* for this algorithm, c->status[0] is the sum channel and
1229  * c->status[1] is the diff channel */
1230 
1231  /* process the first predictor of the sum channel */
1233  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1234 
1235  /* process the diff channel predictor */
1237  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1238 
1239  /* process the first pair of stereo PCM samples */
1240  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1241  *samples++ = c->status[0].predictor + c->status[1].predictor;
1242  *samples++ = c->status[0].predictor - c->status[1].predictor;
1243 
1244  /* process the second predictor of the sum channel */
1246  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1247 
1248  /* process the second pair of stereo PCM samples */
1249  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1250  *samples++ = c->status[0].predictor + c->status[1].predictor;
1251  *samples++ = c->status[0].predictor - c->status[1].predictor;
1252  }
1253 
1254  if ((bytestream2_tell(&gb) & 1))
1255  bytestream2_skip(&gb, 1);
1256  break;
1257  }
1259  for (channel = 0; channel < avctx->channels; channel++) {
1260  cs = &c->status[channel];
1261  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1262  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1263  if (cs->step_index > 88u){
1264  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1265  channel, cs->step_index);
1266  return AVERROR_INVALIDDATA;
1267  }
1268  }
1269 
1270  for (n = nb_samples >> (1 - st); n > 0; n--) {
1271  int v1, v2;
1272  int v = bytestream2_get_byteu(&gb);
1273  /* nibbles are swapped for mono */
1274  if (st) {
1275  v1 = v >> 4;
1276  v2 = v & 0x0F;
1277  } else {
1278  v2 = v >> 4;
1279  v1 = v & 0x0F;
1280  }
1281  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1282  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1283  }
1284  break;
1286  for (channel = 0; channel < avctx->channels; channel++) {
1287  cs = &c->status[channel];
1288  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1289  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1290  if (cs->step_index > 88u){
1291  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1292  channel, cs->step_index);
1293  return AVERROR_INVALIDDATA;
1294  }
1295  }
1296 
1297  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1298  for (channel = 0; channel < avctx->channels; channel++) {
1299  samples = samples_p[channel] + 256 * subframe;
1300  for (n = 0; n < 256; n += 2) {
1301  int v = bytestream2_get_byteu(&gb);
1302  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1303  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1304  }
1305  }
1306  }
1307  break;
1309  for (channel = 0; channel < avctx->channels; channel++) {
1310  cs = &c->status[channel];
1311  samples = samples_p[channel];
1312  bytestream2_skip(&gb, 4);
1313  for (n = 0; n < nb_samples; n += 2) {
1314  int v = bytestream2_get_byteu(&gb);
1315  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1316  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1317  }
1318  }
1319  break;
1321  for (n = nb_samples >> (1 - st); n > 0; n--) {
1322  int v = bytestream2_get_byteu(&gb);
1323  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1324  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1325  }
1326  break;
1328  for (n = nb_samples >> (1 - st); n > 0; n--) {
1329  int v = bytestream2_get_byteu(&gb);
1330  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1331  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1332  }
1333  break;
1335  for (n = nb_samples / 2; n > 0; n--) {
1336  for (channel = 0; channel < avctx->channels; channel++) {
1337  int v = bytestream2_get_byteu(&gb);
1338  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1339  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1340  }
1341  samples += avctx->channels;
1342  }
1343  break;
1345  for (n = nb_samples / 2; n > 0; n--) {
1346  for (channel = 0; channel < avctx->channels; channel++) {
1347  int v = bytestream2_get_byteu(&gb);
1348  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1349  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1350  }
1351  samples += avctx->channels;
1352  }
1353  break;
1355  for (channel = 0; channel < avctx->channels; channel++) {
1356  int16_t *smp = samples_p[channel];
1357  for (n = 0; n < nb_samples / 2; n++) {
1358  int v = bytestream2_get_byteu(&gb);
1359  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1360  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1361  }
1362  }
1363  break;
1365  for (n = nb_samples >> (1 - st); n > 0; n--) {
1366  int v = bytestream2_get_byteu(&gb);
1367  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1368  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1369  }
1370  break;
1372  for (channel = 0; channel < avctx->channels; channel++) {
1373  cs = &c->status[channel];
1374  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1375  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1376  if (cs->step_index > 88u){
1377  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1378  channel, cs->step_index);
1379  return AVERROR_INVALIDDATA;
1380  }
1381  }
1382  for (n = 0; n < nb_samples / 2; n++) {
1383  int byte[2];
1384 
1385  byte[0] = bytestream2_get_byteu(&gb);
1386  if (st)
1387  byte[1] = bytestream2_get_byteu(&gb);
1388  for(channel = 0; channel < avctx->channels; channel++) {
1389  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1390  }
1391  for(channel = 0; channel < avctx->channels; channel++) {
1392  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1393  }
1394  }
1395  break;
1397  if (c->vqa_version == 3) {
1398  for (channel = 0; channel < avctx->channels; channel++) {
1399  int16_t *smp = samples_p[channel];
1400 
1401  for (n = nb_samples / 2; n > 0; n--) {
1402  int v = bytestream2_get_byteu(&gb);
1403  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1404  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1405  }
1406  }
1407  } else {
1408  for (n = nb_samples / 2; n > 0; n--) {
1409  for (channel = 0; channel < avctx->channels; channel++) {
1410  int v = bytestream2_get_byteu(&gb);
1411  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1412  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1413  }
1414  samples += avctx->channels;
1415  }
1416  }
1417  bytestream2_seek(&gb, 0, SEEK_END);
1418  break;
1419  case AV_CODEC_ID_ADPCM_XA:
1420  {
1421  int16_t *out0 = samples_p[0];
1422  int16_t *out1 = samples_p[1];
1423  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1424  int sample_offset = 0;
1425  int bytes_remaining;
1426  while (bytestream2_get_bytes_left(&gb) >= 128) {
1427  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1428  &c->status[0], &c->status[1],
1429  avctx->channels, sample_offset)) < 0)
1430  return ret;
1431  bytestream2_skipu(&gb, 128);
1432  sample_offset += samples_per_block;
1433  }
1434  /* Less than a full block of data left, e.g. when reading from
1435  * 2324 byte per sector XA; the remainder is padding */
1436  bytes_remaining = bytestream2_get_bytes_left(&gb);
1437  if (bytes_remaining > 0) {
1438  bytestream2_skip(&gb, bytes_remaining);
1439  }
1440  break;
1441  }
1443  for (i=0; i<=st; i++) {
1444  c->status[i].step_index = bytestream2_get_le32u(&gb);
1445  if (c->status[i].step_index > 88u) {
1446  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1447  i, c->status[i].step_index);
1448  return AVERROR_INVALIDDATA;
1449  }
1450  }
1451  for (i=0; i<=st; i++) {
1452  c->status[i].predictor = bytestream2_get_le32u(&gb);
1453  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1454  return AVERROR_INVALIDDATA;
1455  }
1456 
1457  for (n = nb_samples >> (1 - st); n > 0; n--) {
1458  int byte = bytestream2_get_byteu(&gb);
1459  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1460  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1461  }
1462  break;
1464  for (n = nb_samples >> (1 - st); n > 0; n--) {
1465  int byte = bytestream2_get_byteu(&gb);
1466  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1467  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1468  }
1469  break;
1470  case AV_CODEC_ID_ADPCM_EA:
1471  {
1472  int previous_left_sample, previous_right_sample;
1473  int current_left_sample, current_right_sample;
1474  int next_left_sample, next_right_sample;
1475  int coeff1l, coeff2l, coeff1r, coeff2r;
1476  int shift_left, shift_right;
1477 
1478  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1479  each coding 28 stereo samples. */
1480 
1481  if(avctx->channels != 2)
1482  return AVERROR_INVALIDDATA;
1483 
1484  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1485  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1486  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1487  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1488 
1489  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1490  int byte = bytestream2_get_byteu(&gb);
1491  coeff1l = ea_adpcm_table[ byte >> 4 ];
1492  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1493  coeff1r = ea_adpcm_table[ byte & 0x0F];
1494  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1495 
1496  byte = bytestream2_get_byteu(&gb);
1497  shift_left = 20 - (byte >> 4);
1498  shift_right = 20 - (byte & 0x0F);
1499 
1500  for (count2 = 0; count2 < 28; count2++) {
1501  byte = bytestream2_get_byteu(&gb);
1502  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1503  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1504 
1505  next_left_sample = (next_left_sample +
1506  (current_left_sample * coeff1l) +
1507  (previous_left_sample * coeff2l) + 0x80) >> 8;
1508  next_right_sample = (next_right_sample +
1509  (current_right_sample * coeff1r) +
1510  (previous_right_sample * coeff2r) + 0x80) >> 8;
1511 
1512  previous_left_sample = current_left_sample;
1513  current_left_sample = av_clip_int16(next_left_sample);
1514  previous_right_sample = current_right_sample;
1515  current_right_sample = av_clip_int16(next_right_sample);
1516  *samples++ = current_left_sample;
1517  *samples++ = current_right_sample;
1518  }
1519  }
1520 
1521  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1522 
1523  break;
1524  }
1526  {
1527  int coeff[2][2], shift[2];
1528 
1529  for(channel = 0; channel < avctx->channels; channel++) {
1530  int byte = bytestream2_get_byteu(&gb);
1531  for (i=0; i<2; i++)
1532  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1533  shift[channel] = 20 - (byte & 0x0F);
1534  }
1535  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1536  int byte[2];
1537 
1538  byte[0] = bytestream2_get_byteu(&gb);
1539  if (st) byte[1] = bytestream2_get_byteu(&gb);
1540  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1541  for(channel = 0; channel < avctx->channels; channel++) {
1542  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1543  sample = (sample +
1544  c->status[channel].sample1 * coeff[channel][0] +
1545  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1546  c->status[channel].sample2 = c->status[channel].sample1;
1547  c->status[channel].sample1 = av_clip_int16(sample);
1548  *samples++ = c->status[channel].sample1;
1549  }
1550  }
1551  }
1552  bytestream2_seek(&gb, 0, SEEK_END);
1553  break;
1554  }
1557  case AV_CODEC_ID_ADPCM_EA_R3: {
1558  /* channel numbering
1559  2chan: 0=fl, 1=fr
1560  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1561  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1562  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1563  int previous_sample, current_sample, next_sample;
1564  int coeff1, coeff2;
1565  int shift;
1566  unsigned int channel;
1567  uint16_t *samplesC;
1568  int count = 0;
1569  int offsets[6];
1570 
1571  for (channel=0; channel<avctx->channels; channel++)
1572  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1573  bytestream2_get_le32(&gb)) +
1574  (avctx->channels + 1) * 4;
1575 
1576  for (channel=0; channel<avctx->channels; channel++) {
1577  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1578  samplesC = samples_p[channel];
1579 
1580  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1581  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1582  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1583  } else {
1584  current_sample = c->status[channel].predictor;
1585  previous_sample = c->status[channel].prev_sample;
1586  }
1587 
1588  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1589  int byte = bytestream2_get_byte(&gb);
1590  if (byte == 0xEE) { /* only seen in R2 and R3 */
1591  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1592  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1593 
1594  for (count2=0; count2<28; count2++)
1595  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1596  } else {
1597  coeff1 = ea_adpcm_table[ byte >> 4 ];
1598  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1599  shift = 20 - (byte & 0x0F);
1600 
1601  for (count2=0; count2<28; count2++) {
1602  if (count2 & 1)
1603  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1604  else {
1605  byte = bytestream2_get_byte(&gb);
1606  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1607  }
1608 
1609  next_sample += (current_sample * coeff1) +
1610  (previous_sample * coeff2);
1611  next_sample = av_clip_int16(next_sample >> 8);
1612 
1613  previous_sample = current_sample;
1614  current_sample = next_sample;
1615  *samplesC++ = current_sample;
1616  }
1617  }
1618  }
1619  if (!count) {
1620  count = count1;
1621  } else if (count != count1) {
1622  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1623  count = FFMAX(count, count1);
1624  }
1625 
1626  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1627  c->status[channel].predictor = current_sample;
1628  c->status[channel].prev_sample = previous_sample;
1629  }
1630  }
1631 
1632  frame->nb_samples = count * 28;
1633  bytestream2_seek(&gb, 0, SEEK_END);
1634  break;
1635  }
1637  for (channel=0; channel<avctx->channels; channel++) {
1638  int coeff[2][4], shift[4];
1639  int16_t *s = samples_p[channel];
1640  for (n = 0; n < 4; n++, s += 32) {
1641  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1642  for (i=0; i<2; i++)
1643  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1644  s[0] = val & ~0x0F;
1645 
1646  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1647  shift[n] = 20 - (val & 0x0F);
1648  s[1] = val & ~0x0F;
1649  }
1650 
1651  for (m=2; m<32; m+=2) {
1652  s = &samples_p[channel][m];
1653  for (n = 0; n < 4; n++, s += 32) {
1654  int level, pred;
1655  int byte = bytestream2_get_byteu(&gb);
1656 
1657  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1658  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1659  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1660 
1661  level = sign_extend(byte, 4) * (1 << shift[n]);
1662  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1663  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1664  }
1665  }
1666  }
1667  break;
1669  av_assert0(avctx->channels == 1);
1670 
1671  /*
1672  * Header format:
1673  * int16_t predictor;
1674  * uint8_t step_index;
1675  * uint8_t reserved;
1676  * uint32_t frame_size;
1677  *
1678  * Some implementations have step_index as 16-bits, but others
1679  * only use the lower 8 and store garbage in the upper 8.
1680  */
1681  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1682  c->status[0].step_index = bytestream2_get_byteu(&gb);
1683  bytestream2_skipu(&gb, 5);
1684  if (c->status[0].step_index > 88u) {
1685  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1686  c->status[0].step_index);
1687  return AVERROR_INVALIDDATA;
1688  }
1689 
1690  for (n = nb_samples >> 1; n > 0; n--) {
1691  int v = bytestream2_get_byteu(&gb);
1692 
1693  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1694  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1695  }
1696 
1697  if (nb_samples & 1) {
1698  int v = bytestream2_get_byteu(&gb);
1699  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1700 
1701  if (v & 0x0F) {
1702  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1703  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1704  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1705  }
1706  }
1707  break;
1709  for (i = 0; i < avctx->channels; i++) {
1710  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1711  c->status[i].step_index = bytestream2_get_byteu(&gb);
1712  bytestream2_skipu(&gb, 1);
1713  if (c->status[i].step_index > 88u) {
1714  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1715  c->status[i].step_index);
1716  return AVERROR_INVALIDDATA;
1717  }
1718  }
1719 
1720  for (n = nb_samples >> (1 - st); n > 0; n--) {
1721  int v = bytestream2_get_byteu(&gb);
1722 
1723  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1724  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1725  }
1726  break;
1727  case AV_CODEC_ID_ADPCM_CT:
1728  for (n = nb_samples >> (1 - st); n > 0; n--) {
1729  int v = bytestream2_get_byteu(&gb);
1730  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1731  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1732  }
1733  break;
1737  if (!c->status[0].step_index) {
1738  /* the first byte is a raw sample */
1739  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1740  if (st)
1741  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1742  c->status[0].step_index = 1;
1743  nb_samples--;
1744  }
1745  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1746  for (n = nb_samples >> (1 - st); n > 0; n--) {
1747  int byte = bytestream2_get_byteu(&gb);
1748  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1749  byte >> 4, 4, 0);
1750  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1751  byte & 0x0F, 4, 0);
1752  }
1753  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1754  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1755  int byte = bytestream2_get_byteu(&gb);
1756  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1757  byte >> 5 , 3, 0);
1758  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1759  (byte >> 2) & 0x07, 3, 0);
1760  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1761  byte & 0x03, 2, 0);
1762  }
1763  } else {
1764  for (n = nb_samples >> (2 - st); n > 0; n--) {
1765  int byte = bytestream2_get_byteu(&gb);
1766  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1767  byte >> 6 , 2, 2);
1768  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1769  (byte >> 4) & 0x03, 2, 2);
1770  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1771  (byte >> 2) & 0x03, 2, 2);
1772  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1773  byte & 0x03, 2, 2);
1774  }
1775  }
1776  break;
1777  case AV_CODEC_ID_ADPCM_SWF:
1778  adpcm_swf_decode(avctx, buf, buf_size, samples);
1779  bytestream2_seek(&gb, 0, SEEK_END);
1780  break;
1782  for (n = nb_samples >> (1 - st); n > 0; n--) {
1783  int v = bytestream2_get_byteu(&gb);
1784  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1785  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1786  }
1787  break;
1789  for (channel = 0; channel < avctx->channels; channel++) {
1790  samples = samples_p[channel];
1791  for (n = nb_samples >> 1; n > 0; n--) {
1792  int v = bytestream2_get_byteu(&gb);
1793  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1794  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1795  }
1796  }
1797  break;
1798  case AV_CODEC_ID_ADPCM_AFC:
1799  {
1800  int samples_per_block;
1801  int blocks;
1802 
1803  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1804  samples_per_block = avctx->extradata[0] / 16;
1805  blocks = nb_samples / avctx->extradata[0];
1806  } else {
1807  samples_per_block = nb_samples / 16;
1808  blocks = 1;
1809  }
1810 
1811  for (m = 0; m < blocks; m++) {
1812  for (channel = 0; channel < avctx->channels; channel++) {
1813  int prev1 = c->status[channel].sample1;
1814  int prev2 = c->status[channel].sample2;
1815 
1816  samples = samples_p[channel] + m * 16;
1817  /* Read in every sample for this channel. */
1818  for (i = 0; i < samples_per_block; i++) {
1819  int byte = bytestream2_get_byteu(&gb);
1820  int scale = 1 << (byte >> 4);
1821  int index = byte & 0xf;
1822  int factor1 = ff_adpcm_afc_coeffs[0][index];
1823  int factor2 = ff_adpcm_afc_coeffs[1][index];
1824 
1825  /* Decode 16 samples. */
1826  for (n = 0; n < 16; n++) {
1827  int32_t sampledat;
1828 
1829  if (n & 1) {
1830  sampledat = sign_extend(byte, 4);
1831  } else {
1832  byte = bytestream2_get_byteu(&gb);
1833  sampledat = sign_extend(byte >> 4, 4);
1834  }
1835 
1836  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1837  sampledat * scale;
1838  *samples = av_clip_int16(sampledat);
1839  prev2 = prev1;
1840  prev1 = *samples++;
1841  }
1842  }
1843 
1844  c->status[channel].sample1 = prev1;
1845  c->status[channel].sample2 = prev2;
1846  }
1847  }
1848  bytestream2_seek(&gb, 0, SEEK_END);
1849  break;
1850  }
1851  case AV_CODEC_ID_ADPCM_THP:
1853  {
1854  int table[14][16];
1855  int ch;
1856 
1857 #define THP_GET16(g) \
1858  sign_extend( \
1859  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1860  bytestream2_get_le16u(&(g)) : \
1861  bytestream2_get_be16u(&(g)), 16)
1862 
1863  if (avctx->extradata) {
1865  if (avctx->extradata_size < 32 * avctx->channels) {
1866  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1867  return AVERROR_INVALIDDATA;
1868  }
1869 
1870  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1871  for (i = 0; i < avctx->channels; i++)
1872  for (n = 0; n < 16; n++)
1873  table[i][n] = THP_GET16(tb);
1874  } else {
1875  for (i = 0; i < avctx->channels; i++)
1876  for (n = 0; n < 16; n++)
1877  table[i][n] = THP_GET16(gb);
1878 
1879  if (!c->has_status) {
1880  /* Initialize the previous sample. */
1881  for (i = 0; i < avctx->channels; i++) {
1882  c->status[i].sample1 = THP_GET16(gb);
1883  c->status[i].sample2 = THP_GET16(gb);
1884  }
1885  c->has_status = 1;
1886  } else {
1887  bytestream2_skip(&gb, avctx->channels * 4);
1888  }
1889  }
1890 
1891  for (ch = 0; ch < avctx->channels; ch++) {
1892  samples = samples_p[ch];
1893 
1894  /* Read in every sample for this channel. */
1895  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1896  int byte = bytestream2_get_byteu(&gb);
1897  int index = (byte >> 4) & 7;
1898  unsigned int exp = byte & 0x0F;
1899  int64_t factor1 = table[ch][index * 2];
1900  int64_t factor2 = table[ch][index * 2 + 1];
1901 
1902  /* Decode 14 samples. */
1903  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1904  int32_t sampledat;
1905 
1906  if (n & 1) {
1907  sampledat = sign_extend(byte, 4);
1908  } else {
1909  byte = bytestream2_get_byteu(&gb);
1910  sampledat = sign_extend(byte >> 4, 4);
1911  }
1912 
1913  sampledat = ((c->status[ch].sample1 * factor1
1914  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1915  *samples = av_clip_int16(sampledat);
1916  c->status[ch].sample2 = c->status[ch].sample1;
1917  c->status[ch].sample1 = *samples++;
1918  }
1919  }
1920  }
1921  break;
1922  }
1923  case AV_CODEC_ID_ADPCM_DTK:
1924  for (channel = 0; channel < avctx->channels; channel++) {
1925  samples = samples_p[channel];
1926 
1927  /* Read in every sample for this channel. */
1928  for (i = 0; i < nb_samples / 28; i++) {
1929  int byte, header;
1930  if (channel)
1931  bytestream2_skipu(&gb, 1);
1932  header = bytestream2_get_byteu(&gb);
1933  bytestream2_skipu(&gb, 3 - channel);
1934 
1935  /* Decode 28 samples. */
1936  for (n = 0; n < 28; n++) {
1937  int32_t sampledat, prev;
1938 
1939  switch (header >> 4) {
1940  case 1:
1941  prev = (c->status[channel].sample1 * 0x3c);
1942  break;
1943  case 2:
1944  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1945  break;
1946  case 3:
1947  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1948  break;
1949  default:
1950  prev = 0;
1951  }
1952 
1953  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1954 
1955  byte = bytestream2_get_byteu(&gb);
1956  if (!channel)
1957  sampledat = sign_extend(byte, 4);
1958  else
1959  sampledat = sign_extend(byte >> 4, 4);
1960 
1961  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1962  *samples++ = av_clip_int16(sampledat >> 6);
1963  c->status[channel].sample2 = c->status[channel].sample1;
1964  c->status[channel].sample1 = sampledat;
1965  }
1966  }
1967  if (!channel)
1968  bytestream2_seek(&gb, 0, SEEK_SET);
1969  }
1970  break;
1971  case AV_CODEC_ID_ADPCM_PSX:
1972  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
1973  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
1974  for (channel = 0; channel < avctx->channels; channel++) {
1975  samples = samples_p[channel] + block * nb_samples_per_block;
1976  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
1977 
1978  /* Read in every sample for this channel. */
1979  for (i = 0; i < nb_samples_per_block / 28; i++) {
1980  int filter, shift, flag, byte;
1981 
1982  filter = bytestream2_get_byteu(&gb);
1983  shift = filter & 0xf;
1984  filter = filter >> 4;
1986  return AVERROR_INVALIDDATA;
1987  flag = bytestream2_get_byteu(&gb);
1988 
1989  /* Decode 28 samples. */
1990  for (n = 0; n < 28; n++) {
1991  int sample = 0, scale;
1992 
1993  if (flag < 0x07) {
1994  if (n & 1) {
1995  scale = sign_extend(byte >> 4, 4);
1996  } else {
1997  byte = bytestream2_get_byteu(&gb);
1998  scale = sign_extend(byte, 4);
1999  }
2000 
2001  scale = scale * (1 << 12);
2002  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2003  }
2005  c->status[channel].sample2 = c->status[channel].sample1;
2006  c->status[channel].sample1 = sample;
2007  }
2008  }
2009  }
2010  }
2011  break;
2013  /*
2014  * The format of each block:
2015  * uint8_t left_control;
2016  * uint4_t left_samples[nb_samples];
2017  * ---- and if stereo ----
2018  * uint8_t right_control;
2019  * uint4_t right_samples[nb_samples];
2020  *
2021  * Format of the control byte:
2022  * MSB [SSSSRDRR] LSB
2023  * S = (Shift Amount - 2)
2024  * D = Decoder flag.
2025  * R = Reserved
2026  *
2027  * Each block relies on the previous two samples of each channel.
2028  * They should be 0 initially.
2029  */
2030  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2031  for (channel = 0; channel < avctx->channels; channel++) {
2032  int control, shift;
2033 
2034  samples = samples_p[channel] + block * 32;
2035  cs = c->status + channel;
2036 
2037  /* Get the control byte and decode the samples, 2 at a time. */
2038  control = bytestream2_get_byteu(&gb);
2039  shift = (control >> 4) + 2;
2040 
2041  for (n = 0; n < 16; n++) {
2042  int sample = bytestream2_get_byteu(&gb);
2043  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2044  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2045  }
2046  }
2047  }
2048  break;
2050  for (n = 0; n < nb_samples * avctx->channels; n++) {
2051  int v = bytestream2_get_byteu(&gb);
2052  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2053  }
2054  break;
2056  for (n = nb_samples / 2; n > 0; n--) {
2057  for (channel = 0; channel < avctx->channels; channel++) {
2058  int v = bytestream2_get_byteu(&gb);
2059  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2060  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2061  }
2062  samples += avctx->channels;
2063  }
2064  break;
2065  default:
2066  av_assert0(0); // unsupported codec_id should not happen
2067  }
2068 
2069  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2070  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2071  return AVERROR_INVALIDDATA;
2072  }
2073 
2074  *got_frame_ptr = 1;
2075 
2076  if (avpkt->size < bytestream2_tell(&gb)) {
2077  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2078  return avpkt->size;
2079  }
2080 
2081  return bytestream2_tell(&gb);
2082 }
2083 
2084 static void adpcm_flush(AVCodecContext *avctx)
2085 {
2086  ADPCMDecodeContext *c = avctx->priv_data;
2087 
2088  /* Just nuke the entire state and re-init. */
2089  memset(c, 0, sizeof(ADPCMDecodeContext));
2090 
2091  switch(avctx->codec_id) {
2092  case AV_CODEC_ID_ADPCM_CT:
2093  c->status[0].step = c->status[1].step = 511;
2094  break;
2095 
2097  if (avctx->extradata && avctx->extradata_size >= 8) {
2098  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
2099  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2100  }
2101  break;
2102 
2104  if (avctx->extradata) {
2105  if (avctx->extradata_size >= 28) {
2106  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
2107  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
2108  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
2109  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
2110  } else if (avctx->extradata_size >= 16) {
2111  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
2112  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
2113  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
2114  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
2115  }
2116  }
2117  break;
2118 
2120  if (avctx->extradata && avctx->extradata_size >= 2)
2121  c->vqa_version = AV_RL16(avctx->extradata);
2122  break;
2123  default:
2124  /* Other codecs may want to handle this during decoding. */
2125  c->has_status = 0;
2126  return;
2127  }
2128 
2129  c->has_status = 1;
2130 }
2131 
2132 
2140 
2141 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2142 AVCodec ff_ ## name_ ## _decoder = { \
2143  .name = #name_, \
2144  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2145  .type = AVMEDIA_TYPE_AUDIO, \
2146  .id = id_, \
2147  .priv_data_size = sizeof(ADPCMDecodeContext), \
2148  .init = adpcm_decode_init, \
2149  .decode = adpcm_decode_frame, \
2150  .flush = adpcm_flush, \
2151  .capabilities = AV_CODEC_CAP_DR1, \
2152  .sample_fmts = sample_fmts_, \
2153  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
2154 }
2155 
2156 /* Note: Do not forget to add new entries to the Makefile as well. */
2157 ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
2158 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
2159 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
2160 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
2161 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
2162 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
2163 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
2164 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
2165 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
2166 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
2167 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
2168 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
2169 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
2170 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
2171 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
2172 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
2173 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
2174 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
2175 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
2176 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
2177 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
2178 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
2179 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
2180 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
2181 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
2182 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
2183 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
2184 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
2185 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
2186 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
2187 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
2188 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
2189 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
2190 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
2191 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
2192 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
2193 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
2194 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
2195 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
2196 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
2197 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
2198 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
2199 ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
2200 ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
2201 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork");
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AV_CODEC_ID_ADPCM_MS
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:359
DK3_GET_NEXT_NIBBLE
#define DK3_GET_NEXT_NIBBLE()
AV_CODEC_ID_ADPCM_IMA_QT
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:353
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:122
ff_adpcm_oki_step_table
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_ADPCM_DTK
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:387
ADPCMChannelStatus::step_index
int16_t step_index
Definition: adpcm.h:33
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
zork_index_table
static const int8_t zork_index_table[8]
Definition: adpcm.c:89
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:146
ff_adpcm_AdaptationTable
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:149
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:324
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
index
fg index
Definition: ffmpeg_filter.c:170
internal.h
AV_CODEC_ID_ADPCM_IMA_CUNNING
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:402
AVPacket::data
uint8_t * data
Definition: packet.h:369
table
static const uint16_t table[]
Definition: prosumer.c:206
AV_CODEC_ID_ADPCM_EA_R3
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:374
data
const char data[16]
Definition: mxf.c:142
AV_CODEC_ID_ADPCM_AICA
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:392
AV_CODEC_ID_ADPCM_IMA_OKI
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:386
adpcm_ima_qt_expand_nibble
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:361
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
AV_CODEC_ID_ADPCM_THP_LE
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:390
adpcm_sbpro_expand_nibble
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:448
AV_CODEC_ID_ADPCM_CT
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:365
ff_adpcm_ima_cunning_index_table
const int8_t ff_adpcm_ima_cunning_index_table[9]
Definition: adpcm_data.c:187
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
THP_GET16
#define THP_GET16(g)
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:545
GetBitContext
Definition: get_bits.h:61
adpcm_ima_mtf_expand_nibble
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:302
adpcm_ima_expand_nibble
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:253
val
static double val(void *priv, double ch)
Definition: aeval.c:76
adpcm_flush
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2084
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
ff_adpcm_ima_block_sizes
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
AV_CODEC_ID_ADPCM_SBPRO_2
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:370
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
sample_fmts_s16p
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2135
adpcm_ima_alp_expand_nibble
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:279
adpcm_yamaha_expand_nibble
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:468
ADPCMChannelStatus::sample1
int sample1
Definition: adpcm.h:39
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:638
adpcm_zork_expand_nibble
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:491
adpcm_data.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
offsets
static const int offsets[]
Definition: hevc_pel.c:34
AV_CODEC_ID_ADPCM_AFC
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:385
AV_CODEC_ID_ADPCM_IMA_EA_SEAD
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:376
g
const char * g
Definition: vf_curves.c:117
AV_CODEC_ID_ADPCM_IMA_DK3
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:355
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_CODEC_ID_ADPCM_IMA_APC
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:382
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
AV_CODEC_ID_ADPCM_IMA_ISS
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:380
channels
channels
Definition: aptx.h:33
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
AV_CODEC_ID_ADPCM_IMA_SMJPEG
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:358
adpcm_ms_expand_nibble
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:387
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:546
int32_t
int32_t
Definition: audio_convert.c:194
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
ff_adpcm_ima_block_samples
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
sample_fmts_s16
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2133
AV_CODEC_ID_ADPCM_EA_XAS
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:378
av_clip_int16
#define av_clip_int16
Definition: common.h:137
NULL
#define NULL
Definition: coverity.c:32
av_clip_intp2
#define av_clip_intp2
Definition: common.h:143
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
AV_CODEC_ID_ADPCM_YAMAHA
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:367
AV_CODEC_ID_ADPCM_IMA_WS
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:357
AV_CODEC_ID_ADPCM_IMA_EA_EACS
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:377
AV_CODEC_ID_ADPCM_ARGO
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:396
AV_CODEC_ID_ADPCM_IMA_DK4
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:356
ff_adpcm_mtaf_stepsize
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
AV_CODEC_ID_ADPCM_IMA_AMV
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:372
abs
#define abs(x)
Definition: cuda_runtime.h:35
ea_adpcm_table
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:73
exp
int8_t exp
Definition: eval.c:72
ADPCMChannelStatus::sample2
int sample2
Definition: adpcm.h:40
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_ADPCM_XA
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:361
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
adpcm_ct_expand_nibble
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:427
adpcm.h
adpcm_ima_oki_expand_nibble
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:406
AV_CODEC_ID_ADPCM_ZORK
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:398
ADPCMDecodeContext
Definition: adpcm.c:100
ff_adpcm_yamaha_difflookup
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1893
AVPacket::size
int size
Definition: packet.h:370
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
AV_CODEC_ID_ADPCM_IMA_RAD
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:388
adpcm_ima_cunning_expand_nibble
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:318
AV_CODEC_ID_ADPCM_IMA_ALP
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:400
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
bps
unsigned bps
Definition: movenc.c:1612
ff_adpcm_step_table
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1204
get_nb_samples
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:699
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
AV_CODEC_ID_ADPCM_SWF
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:366
size
int size
Definition: twinvq_data.h:10344
header
static const uint8_t header[24]
Definition: sdr2.c:67
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:163
xa_decode
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:526
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
ADPCM_DECODER
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:2141
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1197
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
flag
#define flag(name)
Definition: cbs_av1.c:553
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1740
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
sample_fmts_both
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2137
AV_CODEC_ID_ADPCM_MTAF
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:394
AV_CODEC_ID_ADPCM_EA_MAXIS_XA
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:379
i
int i
Definition: input.c:407
ff_adpcm_AdaptCoeff1
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
ff_adpcm_AdaptCoeff2
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
delta
float delta
Definition: vorbis_enc_data.h:457
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
AV_CODEC_ID_ADPCM_IMA_APM
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:399
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
tb
#define tb
Definition: regdef.h:68
ADPCMDecodeContext::vqa_version
int vqa_version
VQA version.
Definition: adpcm.c:102
AV_CODEC_ID_ADPCM_IMA_DAT4
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:393
ff_adpcm_argo_expand_nibble
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:671
xa_adpcm_table
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:65
ff_adpcm_index_table
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
avcodec.h
AV_CODEC_ID_ADPCM_EA
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:363
AV_CODEC_ID_ADPCM_IMA_MTF
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:401
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1233
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
adpcm_ima_wav_expand_nibble
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:338
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AV_CODEC_ID_ADPCM_AGM
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:395
ff_adpcm_yamaha_indexscale
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
AV_CODEC_ID_ADPCM_EA_R1
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:373
AV_CODEC_ID_ADPCM_EA_R2
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:375
temp
else temp
Definition: vf_mcdeint.c:259
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
shift
static int shift(int a, int b)
Definition: sonic.c:82
AV_CODEC_ID_ADPCM_THP
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:371
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:36
AV_CODEC_ID_ADPCM_SBPRO_4
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:368
adpcm_swf_decode
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:614
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AV_CODEC_ID_ADPCM_IMA_SSI
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:397
adpcm_decode_init
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:108
ADPCMDecodeContext::has_status
int has_status
Status flag.
Definition: adpcm.c:103
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
AV_CODEC_ID_ADPCM_IMA_MOFLEX
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:403
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
AV_CODEC_ID_ADPCM_IMA_WAV
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:354
d
d
Definition: ffmpeg_filter.c:158
bytestream.h
ADPCMChannelStatus::predictor
int predictor
Definition: adpcm.h:32
ff_adpcm_ima_cunning_step_table
const int16_t ff_adpcm_ima_cunning_step_table[61]
Definition: adpcm_data.c:197
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_adpcm_afc_coeffs
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
adpcm_decode_frame
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:919
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
AV_CODEC_ID_ADPCM_4XM
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:360
adpcm_agm_expand_nibble
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:209
AV_CODEC_ID_ADPCM_PSX
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:391
adpcm_mtaf_expand_nibble
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:482
ff_adpcm_index_tables
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
int
int
Definition: ffmpeg_filter.c:158
ADPCMChannelStatus
Definition: adpcm.h:31
mtf_index_table
static const int8_t mtf_index_table[16]
Definition: adpcm.c:93
channel
channel
Definition: ebur128.h:39
AV_CODEC_ID_ADPCM_SBPRO_3
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:369
ADPCMDecodeContext::status
ADPCMChannelStatus status[14]
Definition: adpcm.c:101
swf_index_tables
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:82