FFmpeg
takdec.c
Go to the documentation of this file.
1 /*
2  * TAK decoder
3  * Copyright (c) 2012 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * TAK (Tom's lossless Audio Kompressor) decoder
25  * @author Paul B Mahol
26  */
27 
28 #include "libavutil/internal.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/samplefmt.h"
32 
33 #define CACHED_BITSTREAM_READER !ARCH_X86_32
34 #define BITSTREAM_READER_LE
35 #include "audiodsp.h"
36 #include "thread.h"
37 #include "avcodec.h"
38 #include "codec_internal.h"
39 #include "unary.h"
40 #include "tak.h"
41 #include "takdsp.h"
42 
43 #define MAX_SUBFRAMES 8 ///< max number of subframes per channel
44 #define MAX_PREDICTORS 256
45 
46 typedef struct MCDParam {
47  int8_t present; ///< decorrelation parameter availability for this channel
48  int8_t index; ///< index into array of decorrelation types
49  int8_t chan1;
50  int8_t chan2;
51 } MCDParam;
52 
53 typedef struct TAKDecContext {
54  AVCodecContext *avctx; ///< parent AVCodecContext
58  GetBitContext gb; ///< bitstream reader initialized to start at the current frame
59 
60  int uval;
61  int nb_samples; ///< number of samples in the current frame
62  uint8_t *decode_buffer;
63  unsigned int decode_buffer_size;
64  int32_t *decoded[TAK_MAX_CHANNELS]; ///< decoded samples for each channel
65 
67  int8_t sample_shift[TAK_MAX_CHANNELS]; ///< shift applied to every sample in the channel
69  int nb_subframes; ///< number of subframes in the current frame
70  int16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
72 
73  int8_t dmode; ///< channel decorrelation type in the current frame
74 
75  MCDParam mcdparams[TAK_MAX_CHANNELS]; ///< multichannel decorrelation parameters
76 
77  int8_t coding_mode[128];
79  DECLARE_ALIGNED(16, int16_t, residues)[544];
81 
82 static const int8_t mc_dmodes[] = { 1, 3, 4, 6, };
83 
84 static const uint16_t predictor_sizes[] = {
85  4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
86 };
87 
88 static const struct CParam {
89  int init;
90  int escape;
91  int scale;
92  int aescape;
93  int bias;
94 } xcodes[50] = {
95  { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
96  { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
97  { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
98  { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
99  { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
100  { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
101  { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
102  { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
103  { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
104  { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
105  { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
106  { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
107  { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
108  { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
109  { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
110  { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
111  { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
112  { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
113  { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
114  { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
115  { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
116  { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
117  { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
118  { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
119  { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
120  { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
121  { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
122  { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
123  { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
124  { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
125  { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
126  { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
127  { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
128  { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
129  { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
130  { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
131  { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
132  { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
133  { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
134  { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
135  { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
136  { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
137  { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
138  { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
139  { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
140  { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
141  { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
142  { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
143  { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
144  { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
145 };
146 
147 static int set_bps_params(AVCodecContext *avctx)
148 {
149  switch (avctx->bits_per_raw_sample) {
150  case 8:
151  avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
152  break;
153  case 16:
155  break;
156  case 24:
158  break;
159  default:
160  av_log(avctx, AV_LOG_ERROR, "invalid/unsupported bits per sample: %d\n",
161  avctx->bits_per_raw_sample);
162  return AVERROR_INVALIDDATA;
163  }
164 
165  return 0;
166 }
167 
169 {
170  TAKDecContext *s = avctx->priv_data;
171  int shift;
172 
173  if (avctx->sample_rate < 11025) {
174  shift = 3;
175  } else if (avctx->sample_rate < 22050) {
176  shift = 2;
177  } else if (avctx->sample_rate < 44100) {
178  shift = 1;
179  } else {
180  shift = 0;
181  }
182  s->uval = FFALIGN(avctx->sample_rate + 511LL >> 9, 4) << shift;
183  s->subframe_scale = FFALIGN(avctx->sample_rate + 511LL >> 9, 4) << 1;
184 }
185 
187 {
188  TAKDecContext *s = avctx->priv_data;
189 
190  ff_audiodsp_init(&s->adsp);
191  ff_takdsp_init(&s->tdsp);
192 
193  s->avctx = avctx;
195 
196  set_sample_rate_params(avctx);
197 
198  return set_bps_params(avctx);
199 }
200 
201 static void decode_lpc(int32_t *coeffs, int mode, int length)
202 {
203  int i;
204 
205  if (length < 2)
206  return;
207 
208  if (mode == 1) {
209  unsigned a1 = *coeffs++;
210  for (i = 0; i < length - 1 >> 1; i++) {
211  *coeffs += a1;
212  coeffs[1] += (unsigned)*coeffs;
213  a1 = coeffs[1];
214  coeffs += 2;
215  }
216  if (length - 1 & 1)
217  *coeffs += a1;
218  } else if (mode == 2) {
219  unsigned a1 = coeffs[1];
220  unsigned a2 = a1 + *coeffs;
221  coeffs[1] = a2;
222  if (length > 2) {
223  coeffs += 2;
224  for (i = 0; i < length - 2 >> 1; i++) {
225  unsigned a3 = *coeffs + a1;
226  unsigned a4 = a3 + a2;
227  *coeffs = a4;
228  a1 = coeffs[1] + a3;
229  a2 = a1 + a4;
230  coeffs[1] = a2;
231  coeffs += 2;
232  }
233  if (length & 1)
234  *coeffs += a1 + a2;
235  }
236  } else if (mode == 3) {
237  unsigned a1 = coeffs[1];
238  unsigned a2 = a1 + *coeffs;
239  coeffs[1] = a2;
240  if (length > 2) {
241  unsigned a3 = coeffs[2];
242  unsigned a4 = a3 + a1;
243  unsigned a5 = a4 + a2;
244  coeffs[2] = a5;
245  coeffs += 3;
246  for (i = 0; i < length - 3; i++) {
247  a3 += *coeffs;
248  a4 += a3;
249  a5 += a4;
250  *coeffs = a5;
251  coeffs++;
252  }
253  }
254  }
255 }
256 
257 static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
258 {
259  struct CParam code;
260  GetBitContext *gb = &s->gb;
261  int i;
262 
263  if (!mode) {
264  memset(decoded, 0, len * sizeof(*decoded));
265  return 0;
266  }
267 
268  if (mode > FF_ARRAY_ELEMS(xcodes))
269  return AVERROR_INVALIDDATA;
270  code = xcodes[mode - 1];
271 
272  for (i = 0; i < len; i++) {
273  unsigned x = get_bits_long(gb, code.init);
274  if (x >= code.escape && get_bits1(gb)) {
275  x |= 1 << code.init;
276  if (x >= code.aescape) {
277  unsigned scale = get_unary(gb, 1, 9);
278  if (scale == 9) {
279  int scale_bits = get_bits(gb, 3);
280  if (scale_bits > 0) {
281  if (scale_bits == 7) {
282  scale_bits += get_bits(gb, 5);
283  if (scale_bits > 29)
284  return AVERROR_INVALIDDATA;
285  }
286  scale = get_bits_long(gb, scale_bits) + 1;
287  x += code.scale * scale;
288  }
289  x += code.bias;
290  } else
291  x += code.scale * scale - code.escape;
292  } else
293  x -= code.escape;
294  }
295  decoded[i] = (x >> 1) ^ -(x & 1);
296  }
297 
298  return 0;
299 }
300 
301 static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
302 {
303  GetBitContext *gb = &s->gb;
304  int i, mode, ret;
305 
306  if (length > s->nb_samples)
307  return AVERROR_INVALIDDATA;
308 
309  if (get_bits1(gb)) {
310  int wlength, rval;
311 
312  wlength = length / s->uval;
313 
314  rval = length - (wlength * s->uval);
315 
316  if (rval < s->uval / 2)
317  rval += s->uval;
318  else
319  wlength++;
320 
321  if (wlength <= 1 || wlength > 128)
322  return AVERROR_INVALIDDATA;
323 
324  s->coding_mode[0] = mode = get_bits(gb, 6);
325 
326  for (i = 1; i < wlength; i++) {
327  int c = get_unary(gb, 1, 6);
328 
329  switch (c) {
330  case 6:
331  mode = get_bits(gb, 6);
332  break;
333  case 5:
334  case 4:
335  case 3: {
336  /* mode += sign ? (1 - c) : (c - 1) */
337  int sign = get_bits1(gb);
338  mode += (-sign ^ (c - 1)) + sign;
339  break;
340  }
341  case 2:
342  mode++;
343  break;
344  case 1:
345  mode--;
346  break;
347  }
348  s->coding_mode[i] = mode;
349  }
350 
351  i = 0;
352  while (i < wlength) {
353  int len = 0;
354 
355  mode = s->coding_mode[i];
356  do {
357  if (i >= wlength - 1)
358  len += rval;
359  else
360  len += s->uval;
361  i++;
362 
363  if (i == wlength)
364  break;
365  } while (s->coding_mode[i] == mode);
366 
367  if ((ret = decode_segment(s, mode, decoded, len)) < 0)
368  return ret;
369  decoded += len;
370  }
371  } else {
372  mode = get_bits(gb, 6);
373  if ((ret = decode_segment(s, mode, decoded, length)) < 0)
374  return ret;
375  }
376 
377  return 0;
378 }
379 
381 {
382  if (get_bits1(gb))
383  return get_bits(gb, 4) + 1;
384  else
385  return 0;
386 }
387 
388 static int decode_subframe(TAKDecContext *s, int32_t *decoded,
389  int subframe_size, int prev_subframe_size)
390 {
391  GetBitContext *gb = &s->gb;
392  int x, y, i, j, ret = 0;
393  int dshift, size, filter_quant, filter_order;
394  int tfilter[MAX_PREDICTORS];
395 
396  if (!get_bits1(gb))
397  return decode_residues(s, decoded, subframe_size);
398 
399  filter_order = predictor_sizes[get_bits(gb, 4)];
400 
401  if (prev_subframe_size > 0 && get_bits1(gb)) {
402  if (filter_order > prev_subframe_size)
403  return AVERROR_INVALIDDATA;
404 
405  decoded -= filter_order;
406  subframe_size += filter_order;
407 
408  if (filter_order > subframe_size)
409  return AVERROR_INVALIDDATA;
410  } else {
411  int lpc_mode;
412 
413  if (filter_order > subframe_size)
414  return AVERROR_INVALIDDATA;
415 
416  lpc_mode = get_bits(gb, 2);
417  if (lpc_mode > 2)
418  return AVERROR_INVALIDDATA;
419 
420  if ((ret = decode_residues(s, decoded, filter_order)) < 0)
421  return ret;
422 
423  if (lpc_mode)
424  decode_lpc(decoded, lpc_mode, filter_order);
425  }
426 
427  dshift = get_bits_esc4(gb);
428  size = get_bits1(gb) + 6;
429 
430  filter_quant = 10;
431  if (get_bits1(gb)) {
432  filter_quant -= get_bits(gb, 3) + 1;
433  if (filter_quant < 3)
434  return AVERROR_INVALIDDATA;
435  }
436 
437  s->predictors[0] = get_sbits(gb, 10);
438  s->predictors[1] = get_sbits(gb, 10);
439  s->predictors[2] = get_sbits(gb, size) * (1 << (10 - size));
440  s->predictors[3] = get_sbits(gb, size) * (1 << (10 - size));
441  if (filter_order > 4) {
442  int tmp = size - get_bits1(gb);
443 
444  for (i = 4; i < filter_order; i++) {
445  if (!(i & 3))
446  x = tmp - get_bits(gb, 2);
447  s->predictors[i] = get_sbits(gb, x) * (1 << (10 - size));
448  }
449  }
450 
451  tfilter[0] = s->predictors[0] * 64;
452  for (i = 1; i < filter_order; i++) {
453  uint32_t *p1 = &tfilter[0];
454  uint32_t *p2 = &tfilter[i - 1];
455 
456  for (j = 0; j < (i + 1) / 2; j++) {
457  x = *p1 + ((int32_t)(s->predictors[i] * *p2 + 256) >> 9);
458  *p2 += (int32_t)(s->predictors[i] * *p1 + 256) >> 9;
459  *p1++ = x;
460  p2--;
461  }
462 
463  tfilter[i] = s->predictors[i] * 64;
464  }
465 
466  x = 1 << (32 - (15 - filter_quant));
467  y = 1 << ((15 - filter_quant) - 1);
468  for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
469  s->filter[j] = x - ((tfilter[i] + y) >> (15 - filter_quant));
470  s->filter[i] = x - ((tfilter[j] + y) >> (15 - filter_quant));
471  }
472 
473  if ((ret = decode_residues(s, &decoded[filter_order],
474  subframe_size - filter_order)) < 0)
475  return ret;
476 
477  for (i = 0; i < filter_order; i++)
478  s->residues[i] = *decoded++ >> dshift;
479 
480  y = FF_ARRAY_ELEMS(s->residues) - filter_order;
481  x = subframe_size - filter_order;
482  while (x > 0) {
483  int tmp = FFMIN(y, x);
484 
485  for (i = 0; i < tmp; i++) {
486  int v = 1 << (filter_quant - 1);
487 
488  if (filter_order & -16)
489  v += (unsigned)s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
490  filter_order & -16);
491  for (j = filter_order & -16; j < filter_order; j += 4) {
492  v += s->residues[i + j + 3] * (unsigned)s->filter[j + 3] +
493  s->residues[i + j + 2] * (unsigned)s->filter[j + 2] +
494  s->residues[i + j + 1] * (unsigned)s->filter[j + 1] +
495  s->residues[i + j ] * (unsigned)s->filter[j ];
496  }
497  v = (av_clip_intp2(v >> filter_quant, 13) * (1 << dshift)) - (unsigned)*decoded;
498  *decoded++ = v;
499  s->residues[filter_order + i] = v >> dshift;
500  }
501 
502  x -= tmp;
503  if (x > 0)
504  memcpy(s->residues, &s->residues[y], 2 * filter_order);
505  }
506 
507  return 0;
508 }
509 
510 static int decode_channel(TAKDecContext *s, int chan)
511 {
512  AVCodecContext *avctx = s->avctx;
513  GetBitContext *gb = &s->gb;
514  int32_t *decoded = s->decoded[chan];
515  int left = s->nb_samples - 1;
516  int i = 0, ret, prev = 0;
517 
518  s->sample_shift[chan] = get_bits_esc4(gb);
519  if (s->sample_shift[chan] >= avctx->bits_per_raw_sample)
520  return AVERROR_INVALIDDATA;
521 
522  *decoded++ = get_sbits(gb, avctx->bits_per_raw_sample - s->sample_shift[chan]);
523  s->lpc_mode[chan] = get_bits(gb, 2);
524  s->nb_subframes = get_bits(gb, 3) + 1;
525 
526  if (s->nb_subframes > 1) {
527  if (get_bits_left(gb) < (s->nb_subframes - 1) * 6)
528  return AVERROR_INVALIDDATA;
529 
530  for (; i < s->nb_subframes - 1; i++) {
531  int v = get_bits(gb, 6);
532 
533  s->subframe_len[i] = (v - prev) * s->subframe_scale;
534  if (s->subframe_len[i] <= 0)
535  return AVERROR_INVALIDDATA;
536 
537  left -= s->subframe_len[i];
538  prev = v;
539  }
540 
541  if (left <= 0)
542  return AVERROR_INVALIDDATA;
543  }
544  s->subframe_len[i] = left;
545 
546  prev = 0;
547  for (i = 0; i < s->nb_subframes; i++) {
548  if ((ret = decode_subframe(s, decoded, s->subframe_len[i], prev)) < 0)
549  return ret;
550  decoded += s->subframe_len[i];
551  prev = s->subframe_len[i];
552  }
553 
554  return 0;
555 }
556 
557 static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
558 {
559  GetBitContext *gb = &s->gb;
560  int32_t *p1 = s->decoded[c1] + (s->dmode > 5);
561  int32_t *p2 = s->decoded[c2] + (s->dmode > 5);
562  int32_t bp1 = p1[0];
563  int32_t bp2 = p2[0];
564  int i;
565  int dshift, dfactor;
566 
567  length += s->dmode < 6;
568 
569  switch (s->dmode) {
570  case 1: /* left/side */
571  s->tdsp.decorrelate_ls(p1, p2, length);
572  break;
573  case 2: /* side/right */
574  s->tdsp.decorrelate_sr(p1, p2, length);
575  break;
576  case 3: /* side/mid */
577  s->tdsp.decorrelate_sm(p1, p2, length);
578  break;
579  case 4: /* side/left with scale factor */
580  FFSWAP(int32_t*, p1, p2);
581  FFSWAP(int32_t, bp1, bp2);
582  case 5: /* side/right with scale factor */
583  dshift = get_bits_esc4(gb);
584  dfactor = get_sbits(gb, 10);
585  s->tdsp.decorrelate_sf(p1, p2, length, dshift, dfactor);
586  break;
587  case 6:
588  FFSWAP(int32_t*, p1, p2);
589  case 7: {
590  int length2, order_half, filter_order, dval1, dval2;
591  int tmp, x, code_size;
592 
593  if (length < 256)
594  return AVERROR_INVALIDDATA;
595 
596  dshift = get_bits_esc4(gb);
597  filter_order = 8 << get_bits1(gb);
598  dval1 = get_bits1(gb);
599  dval2 = get_bits1(gb);
600 
601  for (i = 0; i < filter_order; i++) {
602  if (!(i & 3))
603  code_size = 14 - get_bits(gb, 3);
604  s->filter[i] = get_sbits(gb, code_size);
605  }
606 
607  order_half = filter_order / 2;
608  length2 = length - (filter_order - 1);
609 
610  /* decorrelate beginning samples */
611  if (dval1) {
612  for (i = 0; i < order_half; i++) {
613  int32_t a = p1[i];
614  int32_t b = p2[i];
615  p1[i] = a + b;
616  }
617  }
618 
619  /* decorrelate ending samples */
620  if (dval2) {
621  for (i = length2 + order_half; i < length; i++) {
622  int32_t a = p1[i];
623  int32_t b = p2[i];
624  p1[i] = a + b;
625  }
626  }
627 
628 
629  for (i = 0; i < filter_order; i++)
630  s->residues[i] = *p2++ >> dshift;
631 
632  p1 += order_half;
633  x = FF_ARRAY_ELEMS(s->residues) - filter_order;
634  for (; length2 > 0; length2 -= tmp) {
635  tmp = FFMIN(length2, x);
636 
637  for (i = 0; i < tmp - (tmp == length2); i++)
638  s->residues[filter_order + i] = *p2++ >> dshift;
639 
640  for (i = 0; i < tmp; i++) {
641  int v = 1 << 9;
642 
643  if (filter_order == 16) {
644  v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
645  filter_order);
646  } else {
647  v += s->residues[i + 7] * s->filter[7] +
648  s->residues[i + 6] * s->filter[6] +
649  s->residues[i + 5] * s->filter[5] +
650  s->residues[i + 4] * s->filter[4] +
651  s->residues[i + 3] * s->filter[3] +
652  s->residues[i + 2] * s->filter[2] +
653  s->residues[i + 1] * s->filter[1] +
654  s->residues[i ] * s->filter[0];
655  }
656 
657  v = av_clip_intp2(v >> 10, 13) * (1U << dshift) - *p1;
658  *p1++ = v;
659  }
660 
661  memmove(s->residues, &s->residues[tmp], 2 * filter_order);
662  }
663  break;
664  }
665  }
666 
667  if (s->dmode > 0 && s->dmode < 6) {
668  p1[0] = bp1;
669  p2[0] = bp2;
670  }
671 
672  return 0;
673 }
674 
676  int *got_frame_ptr, AVPacket *pkt)
677 {
678  TAKDecContext *s = avctx->priv_data;
679  GetBitContext *gb = &s->gb;
680  int chan, i, ret, hsize;
681 
683  return AVERROR_INVALIDDATA;
684 
685  if ((ret = init_get_bits8(gb, pkt->data, pkt->size)) < 0)
686  return ret;
687 
688  if ((ret = ff_tak_decode_frame_header(avctx, gb, &s->ti, 0)) < 0)
689  return ret;
690 
691  hsize = get_bits_count(gb) / 8;
693  if (ff_tak_check_crc(pkt->data, hsize)) {
694  av_log(avctx, AV_LOG_ERROR, "CRC error\n");
695  if (avctx->err_recognition & AV_EF_EXPLODE)
696  return AVERROR_INVALIDDATA;
697  }
698  }
699 
700  if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
701  s->ti.codec != TAK_CODEC_MULTICHANNEL) {
702  avpriv_report_missing_feature(avctx, "TAK codec type %d", s->ti.codec);
703  return AVERROR_PATCHWELCOME;
704  }
705  if (s->ti.data_type) {
706  av_log(avctx, AV_LOG_ERROR,
707  "unsupported data type: %d\n", s->ti.data_type);
708  return AVERROR_INVALIDDATA;
709  }
710  if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
711  av_log(avctx, AV_LOG_ERROR,
712  "invalid number of channels: %d\n", s->ti.channels);
713  return AVERROR_INVALIDDATA;
714  }
715  if (s->ti.channels > 6) {
716  av_log(avctx, AV_LOG_ERROR,
717  "unsupported number of channels: %d\n", s->ti.channels);
718  return AVERROR_INVALIDDATA;
719  }
720 
721  if (s->ti.frame_samples <= 0) {
722  av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
723  return AVERROR_INVALIDDATA;
724  }
725 
726  avctx->bits_per_raw_sample = s->ti.bps;
727  if ((ret = set_bps_params(avctx)) < 0)
728  return ret;
729  if (s->ti.sample_rate != avctx->sample_rate) {
730  avctx->sample_rate = s->ti.sample_rate;
731  set_sample_rate_params(avctx);
732  }
733 
735  if (s->ti.ch_layout) {
736  av_channel_layout_from_mask(&avctx->ch_layout, s->ti.ch_layout);
737  } else {
739  avctx->ch_layout.nb_channels = s->ti.channels;
740  }
741 
742  s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
743  : s->ti.frame_samples;
744 
745  frame->nb_samples = s->nb_samples;
746  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
747  return ret;
748  ff_thread_finish_setup(avctx);
749 
750  if (avctx->bits_per_raw_sample <= 16) {
751  int buf_size = av_samples_get_buffer_size(NULL, avctx->ch_layout.nb_channels,
752  s->nb_samples,
753  AV_SAMPLE_FMT_S32P, 0);
754  if (buf_size < 0)
755  return buf_size;
756  av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
757  if (!s->decode_buffer)
758  return AVERROR(ENOMEM);
759  ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
760  s->decode_buffer, avctx->ch_layout.nb_channels,
761  s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
762  if (ret < 0)
763  return ret;
764  } else {
765  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++)
766  s->decoded[chan] = (int32_t *)frame->extended_data[chan];
767  }
768 
769  if (s->nb_samples < 16) {
770  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
771  int32_t *decoded = s->decoded[chan];
772  for (i = 0; i < s->nb_samples; i++)
773  decoded[i] = get_sbits(gb, avctx->bits_per_raw_sample);
774  }
775  } else {
776  if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
777  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++)
778  if (ret = decode_channel(s, chan))
779  return ret;
780 
781  if (avctx->ch_layout.nb_channels == 2) {
782  s->nb_subframes = get_bits(gb, 1) + 1;
783  if (s->nb_subframes > 1) {
784  s->subframe_len[1] = get_bits(gb, 6);
785  }
786 
787  s->dmode = get_bits(gb, 3);
788  if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
789  return ret;
790  }
791  } else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
792  if (get_bits1(gb)) {
793  int ch_mask = 0;
794 
795  chan = get_bits(gb, 4) + 1;
796  if (chan > avctx->ch_layout.nb_channels)
797  return AVERROR_INVALIDDATA;
798 
799  for (i = 0; i < chan; i++) {
800  int nbit = get_bits(gb, 4);
801 
802  if (nbit >= avctx->ch_layout.nb_channels)
803  return AVERROR_INVALIDDATA;
804 
805  if (ch_mask & 1 << nbit)
806  return AVERROR_INVALIDDATA;
807 
808  s->mcdparams[i].present = get_bits1(gb);
809  if (s->mcdparams[i].present) {
810  s->mcdparams[i].index = get_bits(gb, 2);
811  s->mcdparams[i].chan2 = get_bits(gb, 4);
812  if (s->mcdparams[i].chan2 >= avctx->ch_layout.nb_channels) {
813  av_log(avctx, AV_LOG_ERROR,
814  "invalid channel 2 (%d) for %d channel(s)\n",
815  s->mcdparams[i].chan2, avctx->ch_layout.nb_channels);
816  return AVERROR_INVALIDDATA;
817  }
818  if (s->mcdparams[i].index == 1) {
819  if ((nbit == s->mcdparams[i].chan2) ||
820  (ch_mask & 1 << s->mcdparams[i].chan2))
821  return AVERROR_INVALIDDATA;
822 
823  ch_mask |= 1 << s->mcdparams[i].chan2;
824  } else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
825  return AVERROR_INVALIDDATA;
826  }
827  }
828  s->mcdparams[i].chan1 = nbit;
829 
830  ch_mask |= 1 << nbit;
831  }
832  } else {
833  chan = avctx->ch_layout.nb_channels;
834  for (i = 0; i < chan; i++) {
835  s->mcdparams[i].present = 0;
836  s->mcdparams[i].chan1 = i;
837  }
838  }
839 
840  for (i = 0; i < chan; i++) {
841  if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
842  if (ret = decode_channel(s, s->mcdparams[i].chan2))
843  return ret;
844 
845  if (ret = decode_channel(s, s->mcdparams[i].chan1))
846  return ret;
847 
848  if (s->mcdparams[i].present) {
849  s->dmode = mc_dmodes[s->mcdparams[i].index];
850  if (ret = decorrelate(s,
851  s->mcdparams[i].chan2,
852  s->mcdparams[i].chan1,
853  s->nb_samples - 1))
854  return ret;
855  }
856  }
857  }
858 
859  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
860  int32_t *decoded = s->decoded[chan];
861 
862  if (s->lpc_mode[chan])
863  decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
864 
865  if (s->sample_shift[chan] > 0)
866  for (i = 0; i < s->nb_samples; i++)
867  decoded[i] *= 1U << s->sample_shift[chan];
868  }
869  }
870 
871  align_get_bits(gb);
872  skip_bits(gb, 24);
873  if (get_bits_left(gb) < 0)
874  av_log(avctx, AV_LOG_DEBUG, "overread\n");
875  else if (get_bits_left(gb) > 0)
876  av_log(avctx, AV_LOG_DEBUG, "underread\n");
877 
879  if (ff_tak_check_crc(pkt->data + hsize,
880  get_bits_count(gb) / 8 - hsize)) {
881  av_log(avctx, AV_LOG_ERROR, "CRC error\n");
882  if (avctx->err_recognition & AV_EF_EXPLODE)
883  return AVERROR_INVALIDDATA;
884  }
885  }
886 
887  /* convert to output buffer */
888  switch (avctx->sample_fmt) {
889  case AV_SAMPLE_FMT_U8P:
890  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
891  uint8_t *samples = (uint8_t *)frame->extended_data[chan];
892  int32_t *decoded = s->decoded[chan];
893  for (i = 0; i < s->nb_samples; i++)
894  samples[i] = decoded[i] + 0x80U;
895  }
896  break;
897  case AV_SAMPLE_FMT_S16P:
898  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
899  int16_t *samples = (int16_t *)frame->extended_data[chan];
900  int32_t *decoded = s->decoded[chan];
901  for (i = 0; i < s->nb_samples; i++)
902  samples[i] = decoded[i];
903  }
904  break;
905  case AV_SAMPLE_FMT_S32P:
906  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
907  int32_t *samples = (int32_t *)frame->extended_data[chan];
908  for (i = 0; i < s->nb_samples; i++)
909  samples[i] *= 1U << 8;
910  }
911  break;
912  }
913 
914  *got_frame_ptr = 1;
915 
916  return pkt->size;
917 }
918 
919 #if HAVE_THREADS
921  const AVCodecContext *src)
922 {
923  TAKDecContext *tsrc = src->priv_data;
924  TAKDecContext *tdst = dst->priv_data;
925 
926  if (dst == src)
927  return 0;
928  memcpy(&tdst->ti, &tsrc->ti, sizeof(TAKStreamInfo));
929  return 0;
930 }
931 #endif
932 
934 {
935  TAKDecContext *s = avctx->priv_data;
936 
937  av_freep(&s->decode_buffer);
938 
939  return 0;
940 }
941 
943  .p.name = "tak",
944  CODEC_LONG_NAME("TAK (Tom's lossless Audio Kompressor)"),
945  .p.type = AVMEDIA_TYPE_AUDIO,
946  .p.id = AV_CODEC_ID_TAK,
947  .priv_data_size = sizeof(TAKDecContext),
949  .close = tak_decode_close,
953  .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
957 };
TAK_MAX_CHANNELS
#define TAK_MAX_CHANNELS
Definition: tak.h:62
ff_tak_decode_frame_header
int ff_tak_decode_frame_header(void *logctx, GetBitContext *gb, TAKStreamInfo *ti, int log_level_offset)
Validate and decode a frame header.
Definition: tak.c:147
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
TAKDecContext::avctx
AVCodecContext * avctx
parent AVCodecContext
Definition: takdec.c:54
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
TAKDecContext::sample_shift
int8_t sample_shift[TAK_MAX_CHANNELS]
shift applied to every sample in the channel
Definition: takdec.c:67
MCDParam::chan1
int8_t chan1
Definition: takdec.c:49
MCDParam::present
int8_t present
decorrelation parameter availability for this channel
Definition: takdec.c:47
mem_internal.h
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1056
TAKDecContext::ti
TAKStreamInfo ti
Definition: takdec.c:57
MAX_PREDICTORS
#define MAX_PREDICTORS
Definition: takdec.c:44
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1430
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
av_samples_fill_arrays
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, const uint8_t *buf, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Fill plane data pointers and linesize for samples with sample format sample_fmt.
Definition: samplefmt.c:153
TAKDecContext::tdsp
TAKDSPContext tdsp
Definition: takdec.c:56
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
TAKDecContext::mcdparams
MCDParam mcdparams[TAK_MAX_CHANNELS]
multichannel decorrelation parameters
Definition: takdec.c:75
CParam::scale
int scale
Definition: takdec.c:91
AVPacket::data
uint8_t * data
Definition: packet.h:539
b
#define b
Definition: input.c:41
TAKDSPContext
Definition: takdsp.h:24
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
FFCodec
Definition: codec_internal.h:127
ff_audiodsp_init
av_cold void ff_audiodsp_init(AudioDSPContext *c)
Definition: audiodsp.c:65
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:316
c1
static const uint64_t c1
Definition: murmur3.c:52
TAKDecContext::lpc_mode
int8_t lpc_mode[TAK_MAX_CHANNELS]
Definition: takdec.c:66
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
thread.h
TAKDecContext::subframe_len
int16_t subframe_len[MAX_SUBFRAMES]
subframe length in samples
Definition: takdec.c:70
tak_decode_close
static av_cold int tak_decode_close(AVCodecContext *avctx)
Definition: takdec.c:933
predictor_sizes
static const uint16_t predictor_sizes[]
Definition: takdec.c:84
MCDParam::chan2
int8_t chan2
Definition: takdec.c:50
ff_takdsp_init
av_cold void ff_takdsp_init(TAKDSPContext *c)
Definition: takdsp.c:73
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
TAKDecContext::coding_mode
int8_t coding_mode[128]
Definition: takdec.c:77
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
MCDParam::index
int8_t index
index into array of decorrelation types
Definition: takdec.c:48
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
TAKDecContext::uval
int uval
Definition: takdec.c:60
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1071
GetBitContext
Definition: get_bits.h:108
samplefmt.h
MAX_SUBFRAMES
#define MAX_SUBFRAMES
max number of subframes per channel
Definition: takdec.c:43
a2
static double a2(void *priv, double x, double y)
Definition: vf_xfade.c:2030
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
tak_decode_init
static av_cold int tak_decode_init(AVCodecContext *avctx)
Definition: takdec.c:186
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:116
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1049
av_channel_layout_from_mask
int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
Definition: channel_layout.c:247
CParam::bias
int bias
Definition: takdec.c:93
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1585
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
TAK_CODEC_MONO_STEREO
@ TAK_CODEC_MONO_STEREO
Definition: tak.h:98
TAKDecContext::adsp
AudioDSPContext adsp
Definition: takdec.c:55
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
decorrelate
static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
Definition: takdec.c:557
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
NULL
#define NULL
Definition: coverity.c:32
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
a3
static double a3(void *priv, double x, double y)
Definition: vf_xfade.c:2031
TAKDecContext::dmode
int8_t dmode
channel decorrelation type in the current frame
Definition: takdec.c:73
ff_tak_decoder
const FFCodec ff_tak_decoder
Definition: takdec.c:942
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
TAKDecContext::decode_buffer_size
unsigned int decode_buffer_size
Definition: takdec.c:63
tak.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:305
decode_channel
static int decode_channel(TAKDecContext *s, int chan)
Definition: takdec.c:510
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: defs.h:48
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
AV_CODEC_CAP_CHANNEL_CONF
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
Definition: codec.h:106
takdsp.h
CParam::aescape
int aescape
Definition: takdec.c:92
TAKDecContext
Definition: takdec.c:53
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
AV_SAMPLE_FMT_U8P
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
Definition: samplefmt.h:63
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
TAKDecContext::residues
int16_t residues[544]
Definition: takdec.c:79
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1063
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
size
int size
Definition: twinvq_data.h:10344
decode_residues
static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
Definition: takdec.c:301
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
ff_tak_check_crc
int ff_tak_check_crc(const uint8_t *buf, unsigned int buf_size)
Definition: tak.c:79
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
unary.h
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
CParam::init
int init
Definition: takdec.c:89
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1578
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
internal.h
TAKDecContext::decode_buffer
uint8_t * decode_buffer
Definition: takdec.c:62
get_bits_esc4
static int get_bits_esc4(GetBitContext *gb)
Definition: takdec.c:380
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
avcodec.h
set_sample_rate_params
static void set_sample_rate_params(AVCodecContext *avctx)
Definition: takdec.c:168
ret
ret
Definition: filter_design.txt:187
decode_lpc
static void decode_lpc(int32_t *coeffs, int mode, int length)
Definition: takdec.c:201
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
mc_dmodes
static const int8_t mc_dmodes[]
Definition: takdec.c:82
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
CParam::escape
int escape
Definition: takdec.c:90
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:451
c2
static const uint64_t c2
Definition: murmur3.c:53
TAKDecContext::predictors
int16_t predictors[MAX_PREDICTORS]
Definition: takdec.c:68
mode
mode
Definition: ebur128.h:83
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:437
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. Use ff_thread_get_buffer()(or ff_progress_frame_get_buffer() in case you have inter-frame dependencies and use the ProgressFrame API) to allocate frame buffers. Call ff_progress_frame_report() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
TAKDecContext::gb
GetBitContext gb
bitstream reader initialized to start at the current frame
Definition: takdec.c:58
xcodes
static const struct CParam xcodes[50]
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
TAKDecContext::filter
int16_t filter[MAX_PREDICTORS]
Definition: takdec.c:78
set_bps_params
static int set_bps_params(AVCodecContext *avctx)
Definition: takdec.c:147
MCDParam
Definition: takdec.c:46
audiodsp.h
mem.h
TAKStreamInfo
Definition: tak.h:126
AudioDSPContext
Definition: audiodsp.h:24
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
CParam
Definition: takdec.c:88
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_CODEC_ID_TAK
@ AV_CODEC_ID_TAK
Definition: codec_id.h:508
TAKDecContext::nb_samples
int nb_samples
number of samples in the current frame
Definition: takdec.c:61
int32_t
int32_t
Definition: audioconvert.c:56
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
a1
static double a1(void *priv, double x, double y)
Definition: vf_xfade.c:2029
TAKDecContext::decoded
int32_t * decoded[TAK_MAX_CHANNELS]
decoded samples for each channel
Definition: takdec.c:64
tak_decode_frame
static int tak_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *pkt)
Definition: takdec.c:675
decode_subframe
static int decode_subframe(TAKDecContext *s, int32_t *decoded, int subframe_size, int prev_subframe_size)
Definition: takdec.c:388
TAK_MIN_FRAME_HEADER_BYTES
#define TAK_MIN_FRAME_HEADER_BYTES
Definition: tak.h:95
TAK_CODEC_MULTICHANNEL
@ TAK_CODEC_MULTICHANNEL
Definition: tak.h:99
TAKDecContext::subframe_scale
int subframe_scale
Definition: takdec.c:71
TAKDecContext::nb_subframes
int nb_subframes
number of subframes in the current frame
Definition: takdec.c:69
src
#define src
Definition: vp8dsp.c:248
decode_segment
static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
Definition: takdec.c:257