FFmpeg
takdec.c
Go to the documentation of this file.
1 /*
2  * TAK decoder
3  * Copyright (c) 2012 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * TAK (Tom's lossless Audio Kompressor) decoder
25  * @author Paul B Mahol
26  */
27 
28 #include "libavutil/internal.h"
29 #include "libavutil/samplefmt.h"
30 
31 #define BITSTREAM_READER_LE
32 #include "audiodsp.h"
33 #include "thread.h"
34 #include "avcodec.h"
35 #include "internal.h"
36 #include "unary.h"
37 #include "tak.h"
38 #include "takdsp.h"
39 
40 #define MAX_SUBFRAMES 8 ///< max number of subframes per channel
41 #define MAX_PREDICTORS 256
42 
43 typedef struct MCDParam {
44  int8_t present; ///< decorrelation parameter availability for this channel
45  int8_t index; ///< index into array of decorrelation types
46  int8_t chan1;
47  int8_t chan2;
48 } MCDParam;
49 
50 typedef struct TAKDecContext {
51  AVCodecContext *avctx; ///< parent AVCodecContext
55  GetBitContext gb; ///< bitstream reader initialized to start at the current frame
56 
57  int uval;
58  int nb_samples; ///< number of samples in the current frame
60  unsigned int decode_buffer_size;
61  int32_t *decoded[TAK_MAX_CHANNELS]; ///< decoded samples for each channel
62 
64  int8_t sample_shift[TAK_MAX_CHANNELS]; ///< shift applied to every sample in the channel
66  int nb_subframes; ///< number of subframes in the current frame
67  int16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
69 
70  int8_t dmode; ///< channel decorrelation type in the current frame
71 
72  MCDParam mcdparams[TAK_MAX_CHANNELS]; ///< multichannel decorrelation parameters
73 
74  int8_t coding_mode[128];
76  DECLARE_ALIGNED(16, int16_t, residues)[544];
78 
79 static const int8_t mc_dmodes[] = { 1, 3, 4, 6, };
80 
81 static const uint16_t predictor_sizes[] = {
82  4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
83 };
84 
85 static const struct CParam {
86  int init;
87  int escape;
88  int scale;
89  int aescape;
90  int bias;
91 } xcodes[50] = {
92  { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
93  { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
94  { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
95  { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
96  { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
97  { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
98  { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
99  { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
100  { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
101  { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
102  { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
103  { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
104  { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
105  { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
106  { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
107  { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
108  { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
109  { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
110  { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
111  { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
112  { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
113  { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
114  { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
115  { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
116  { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
117  { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
118  { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
119  { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
120  { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
121  { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
122  { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
123  { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
124  { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
125  { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
126  { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
127  { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
128  { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
129  { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
130  { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
131  { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
132  { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
133  { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
134  { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
135  { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
136  { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
137  { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
138  { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
139  { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
140  { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
141  { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
142 };
143 
144 static int set_bps_params(AVCodecContext *avctx)
145 {
146  switch (avctx->bits_per_raw_sample) {
147  case 8:
148  avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
149  break;
150  case 16:
152  break;
153  case 24:
155  break;
156  default:
157  av_log(avctx, AV_LOG_ERROR, "invalid/unsupported bits per sample: %d\n",
158  avctx->bits_per_raw_sample);
159  return AVERROR_INVALIDDATA;
160  }
161 
162  return 0;
163 }
164 
166 {
167  TAKDecContext *s = avctx->priv_data;
168  int shift;
169 
170  if (avctx->sample_rate < 11025) {
171  shift = 3;
172  } else if (avctx->sample_rate < 22050) {
173  shift = 2;
174  } else if (avctx->sample_rate < 44100) {
175  shift = 1;
176  } else {
177  shift = 0;
178  }
179  s->uval = FFALIGN(avctx->sample_rate + 511LL >> 9, 4) << shift;
180  s->subframe_scale = FFALIGN(avctx->sample_rate + 511LL >> 9, 4) << 1;
181 }
182 
184 {
185  TAKDecContext *s = avctx->priv_data;
186 
187  ff_audiodsp_init(&s->adsp);
188  ff_takdsp_init(&s->tdsp);
189 
190  s->avctx = avctx;
192 
193  set_sample_rate_params(avctx);
194 
195  return set_bps_params(avctx);
196 }
197 
198 static void decode_lpc(int32_t *coeffs, int mode, int length)
199 {
200  int i;
201 
202  if (length < 2)
203  return;
204 
205  if (mode == 1) {
206  unsigned a1 = *coeffs++;
207  for (i = 0; i < length - 1 >> 1; i++) {
208  *coeffs += a1;
209  coeffs[1] += (unsigned)*coeffs;
210  a1 = coeffs[1];
211  coeffs += 2;
212  }
213  if (length - 1 & 1)
214  *coeffs += a1;
215  } else if (mode == 2) {
216  unsigned a1 = coeffs[1];
217  unsigned a2 = a1 + *coeffs;
218  coeffs[1] = a2;
219  if (length > 2) {
220  coeffs += 2;
221  for (i = 0; i < length - 2 >> 1; i++) {
222  unsigned a3 = *coeffs + a1;
223  unsigned a4 = a3 + a2;
224  *coeffs = a4;
225  a1 = coeffs[1] + a3;
226  a2 = a1 + a4;
227  coeffs[1] = a2;
228  coeffs += 2;
229  }
230  if (length & 1)
231  *coeffs += a1 + a2;
232  }
233  } else if (mode == 3) {
234  unsigned a1 = coeffs[1];
235  unsigned a2 = a1 + *coeffs;
236  coeffs[1] = a2;
237  if (length > 2) {
238  unsigned a3 = coeffs[2];
239  unsigned a4 = a3 + a1;
240  unsigned a5 = a4 + a2;
241  coeffs[2] = a5;
242  coeffs += 3;
243  for (i = 0; i < length - 3; i++) {
244  a3 += *coeffs;
245  a4 += a3;
246  a5 += a4;
247  *coeffs = a5;
248  coeffs++;
249  }
250  }
251  }
252 }
253 
254 static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
255 {
256  struct CParam code;
257  GetBitContext *gb = &s->gb;
258  int i;
259 
260  if (!mode) {
261  memset(decoded, 0, len * sizeof(*decoded));
262  return 0;
263  }
264 
265  if (mode > FF_ARRAY_ELEMS(xcodes))
266  return AVERROR_INVALIDDATA;
267  code = xcodes[mode - 1];
268 
269  for (i = 0; i < len; i++) {
270  unsigned x = get_bits_long(gb, code.init);
271  if (x >= code.escape && get_bits1(gb)) {
272  x |= 1 << code.init;
273  if (x >= code.aescape) {
274  unsigned scale = get_unary(gb, 1, 9);
275  if (scale == 9) {
276  int scale_bits = get_bits(gb, 3);
277  if (scale_bits > 0) {
278  if (scale_bits == 7) {
279  scale_bits += get_bits(gb, 5);
280  if (scale_bits > 29)
281  return AVERROR_INVALIDDATA;
282  }
283  scale = get_bits_long(gb, scale_bits) + 1;
284  x += code.scale * scale;
285  }
286  x += code.bias;
287  } else
288  x += code.scale * scale - code.escape;
289  } else
290  x -= code.escape;
291  }
292  decoded[i] = (x >> 1) ^ -(x & 1);
293  }
294 
295  return 0;
296 }
297 
298 static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
299 {
300  GetBitContext *gb = &s->gb;
301  int i, mode, ret;
302 
303  if (length > s->nb_samples)
304  return AVERROR_INVALIDDATA;
305 
306  if (get_bits1(gb)) {
307  int wlength, rval;
308 
309  wlength = length / s->uval;
310 
311  rval = length - (wlength * s->uval);
312 
313  if (rval < s->uval / 2)
314  rval += s->uval;
315  else
316  wlength++;
317 
318  if (wlength <= 1 || wlength > 128)
319  return AVERROR_INVALIDDATA;
320 
321  s->coding_mode[0] = mode = get_bits(gb, 6);
322 
323  for (i = 1; i < wlength; i++) {
324  int c = get_unary(gb, 1, 6);
325 
326  switch (c) {
327  case 6:
328  mode = get_bits(gb, 6);
329  break;
330  case 5:
331  case 4:
332  case 3: {
333  /* mode += sign ? (1 - c) : (c - 1) */
334  int sign = get_bits1(gb);
335  mode += (-sign ^ (c - 1)) + sign;
336  break;
337  }
338  case 2:
339  mode++;
340  break;
341  case 1:
342  mode--;
343  break;
344  }
345  s->coding_mode[i] = mode;
346  }
347 
348  i = 0;
349  while (i < wlength) {
350  int len = 0;
351 
352  mode = s->coding_mode[i];
353  do {
354  if (i >= wlength - 1)
355  len += rval;
356  else
357  len += s->uval;
358  i++;
359 
360  if (i == wlength)
361  break;
362  } while (s->coding_mode[i] == mode);
363 
364  if ((ret = decode_segment(s, mode, decoded, len)) < 0)
365  return ret;
366  decoded += len;
367  }
368  } else {
369  mode = get_bits(gb, 6);
370  if ((ret = decode_segment(s, mode, decoded, length)) < 0)
371  return ret;
372  }
373 
374  return 0;
375 }
376 
378 {
379  if (get_bits1(gb))
380  return get_bits(gb, 4) + 1;
381  else
382  return 0;
383 }
384 
385 static int decode_subframe(TAKDecContext *s, int32_t *decoded,
386  int subframe_size, int prev_subframe_size)
387 {
388  GetBitContext *gb = &s->gb;
389  int x, y, i, j, ret = 0;
390  int dshift, size, filter_quant, filter_order;
391  int tfilter[MAX_PREDICTORS];
392 
393  if (!get_bits1(gb))
394  return decode_residues(s, decoded, subframe_size);
395 
396  filter_order = predictor_sizes[get_bits(gb, 4)];
397 
398  if (prev_subframe_size > 0 && get_bits1(gb)) {
399  if (filter_order > prev_subframe_size)
400  return AVERROR_INVALIDDATA;
401 
402  decoded -= filter_order;
403  subframe_size += filter_order;
404 
405  if (filter_order > subframe_size)
406  return AVERROR_INVALIDDATA;
407  } else {
408  int lpc_mode;
409 
410  if (filter_order > subframe_size)
411  return AVERROR_INVALIDDATA;
412 
413  lpc_mode = get_bits(gb, 2);
414  if (lpc_mode > 2)
415  return AVERROR_INVALIDDATA;
416 
417  if ((ret = decode_residues(s, decoded, filter_order)) < 0)
418  return ret;
419 
420  if (lpc_mode)
421  decode_lpc(decoded, lpc_mode, filter_order);
422  }
423 
424  dshift = get_bits_esc4(gb);
425  size = get_bits1(gb) + 6;
426 
427  filter_quant = 10;
428  if (get_bits1(gb)) {
429  filter_quant -= get_bits(gb, 3) + 1;
430  if (filter_quant < 3)
431  return AVERROR_INVALIDDATA;
432  }
433 
434  s->predictors[0] = get_sbits(gb, 10);
435  s->predictors[1] = get_sbits(gb, 10);
436  s->predictors[2] = get_sbits(gb, size) * (1 << (10 - size));
437  s->predictors[3] = get_sbits(gb, size) * (1 << (10 - size));
438  if (filter_order > 4) {
439  int tmp = size - get_bits1(gb);
440 
441  for (i = 4; i < filter_order; i++) {
442  if (!(i & 3))
443  x = tmp - get_bits(gb, 2);
444  s->predictors[i] = get_sbits(gb, x) * (1 << (10 - size));
445  }
446  }
447 
448  tfilter[0] = s->predictors[0] * 64;
449  for (i = 1; i < filter_order; i++) {
450  uint32_t *p1 = &tfilter[0];
451  uint32_t *p2 = &tfilter[i - 1];
452 
453  for (j = 0; j < (i + 1) / 2; j++) {
454  x = *p1 + ((int32_t)(s->predictors[i] * *p2 + 256) >> 9);
455  *p2 += (int32_t)(s->predictors[i] * *p1 + 256) >> 9;
456  *p1++ = x;
457  p2--;
458  }
459 
460  tfilter[i] = s->predictors[i] * 64;
461  }
462 
463  x = 1 << (32 - (15 - filter_quant));
464  y = 1 << ((15 - filter_quant) - 1);
465  for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
466  s->filter[j] = x - ((tfilter[i] + y) >> (15 - filter_quant));
467  s->filter[i] = x - ((tfilter[j] + y) >> (15 - filter_quant));
468  }
469 
470  if ((ret = decode_residues(s, &decoded[filter_order],
471  subframe_size - filter_order)) < 0)
472  return ret;
473 
474  for (i = 0; i < filter_order; i++)
475  s->residues[i] = *decoded++ >> dshift;
476 
477  y = FF_ARRAY_ELEMS(s->residues) - filter_order;
478  x = subframe_size - filter_order;
479  while (x > 0) {
480  int tmp = FFMIN(y, x);
481 
482  for (i = 0; i < tmp; i++) {
483  int v = 1 << (filter_quant - 1);
484 
485  if (filter_order & -16)
486  v += (unsigned)s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
487  filter_order & -16);
488  for (j = filter_order & -16; j < filter_order; j += 4) {
489  v += s->residues[i + j + 3] * (unsigned)s->filter[j + 3] +
490  s->residues[i + j + 2] * (unsigned)s->filter[j + 2] +
491  s->residues[i + j + 1] * (unsigned)s->filter[j + 1] +
492  s->residues[i + j ] * (unsigned)s->filter[j ];
493  }
494  v = (av_clip_intp2(v >> filter_quant, 13) * (1 << dshift)) - (unsigned)*decoded;
495  *decoded++ = v;
496  s->residues[filter_order + i] = v >> dshift;
497  }
498 
499  x -= tmp;
500  if (x > 0)
501  memcpy(s->residues, &s->residues[y], 2 * filter_order);
502  }
503 
504  emms_c();
505 
506  return 0;
507 }
508 
509 static int decode_channel(TAKDecContext *s, int chan)
510 {
511  AVCodecContext *avctx = s->avctx;
512  GetBitContext *gb = &s->gb;
513  int32_t *decoded = s->decoded[chan];
514  int left = s->nb_samples - 1;
515  int i = 0, ret, prev = 0;
516 
517  s->sample_shift[chan] = get_bits_esc4(gb);
518  if (s->sample_shift[chan] >= avctx->bits_per_raw_sample)
519  return AVERROR_INVALIDDATA;
520 
521  *decoded++ = get_sbits(gb, avctx->bits_per_raw_sample - s->sample_shift[chan]);
522  s->lpc_mode[chan] = get_bits(gb, 2);
523  s->nb_subframes = get_bits(gb, 3) + 1;
524 
525  if (s->nb_subframes > 1) {
526  if (get_bits_left(gb) < (s->nb_subframes - 1) * 6)
527  return AVERROR_INVALIDDATA;
528 
529  for (; i < s->nb_subframes - 1; i++) {
530  int v = get_bits(gb, 6);
531 
532  s->subframe_len[i] = (v - prev) * s->subframe_scale;
533  if (s->subframe_len[i] <= 0)
534  return AVERROR_INVALIDDATA;
535 
536  left -= s->subframe_len[i];
537  prev = v;
538  }
539 
540  if (left <= 0)
541  return AVERROR_INVALIDDATA;
542  }
543  s->subframe_len[i] = left;
544 
545  prev = 0;
546  for (i = 0; i < s->nb_subframes; i++) {
547  if ((ret = decode_subframe(s, decoded, s->subframe_len[i], prev)) < 0)
548  return ret;
549  decoded += s->subframe_len[i];
550  prev = s->subframe_len[i];
551  }
552 
553  return 0;
554 }
555 
556 static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
557 {
558  GetBitContext *gb = &s->gb;
559  int32_t *p1 = s->decoded[c1] + (s->dmode > 5);
560  int32_t *p2 = s->decoded[c2] + (s->dmode > 5);
561  int32_t bp1 = p1[0];
562  int32_t bp2 = p2[0];
563  int i;
564  int dshift, dfactor;
565 
566  length += s->dmode < 6;
567 
568  switch (s->dmode) {
569  case 1: /* left/side */
570  s->tdsp.decorrelate_ls(p1, p2, length);
571  break;
572  case 2: /* side/right */
573  s->tdsp.decorrelate_sr(p1, p2, length);
574  break;
575  case 3: /* side/mid */
576  s->tdsp.decorrelate_sm(p1, p2, length);
577  break;
578  case 4: /* side/left with scale factor */
579  FFSWAP(int32_t*, p1, p2);
580  FFSWAP(int32_t, bp1, bp2);
581  case 5: /* side/right with scale factor */
582  dshift = get_bits_esc4(gb);
583  dfactor = get_sbits(gb, 10);
584  s->tdsp.decorrelate_sf(p1, p2, length, dshift, dfactor);
585  break;
586  case 6:
587  FFSWAP(int32_t*, p1, p2);
588  case 7: {
589  int length2, order_half, filter_order, dval1, dval2;
590  int tmp, x, code_size;
591 
592  if (length < 256)
593  return AVERROR_INVALIDDATA;
594 
595  dshift = get_bits_esc4(gb);
596  filter_order = 8 << get_bits1(gb);
597  dval1 = get_bits1(gb);
598  dval2 = get_bits1(gb);
599 
600  for (i = 0; i < filter_order; i++) {
601  if (!(i & 3))
602  code_size = 14 - get_bits(gb, 3);
603  s->filter[i] = get_sbits(gb, code_size);
604  }
605 
606  order_half = filter_order / 2;
607  length2 = length - (filter_order - 1);
608 
609  /* decorrelate beginning samples */
610  if (dval1) {
611  for (i = 0; i < order_half; i++) {
612  int32_t a = p1[i];
613  int32_t b = p2[i];
614  p1[i] = a + b;
615  }
616  }
617 
618  /* decorrelate ending samples */
619  if (dval2) {
620  for (i = length2 + order_half; i < length; i++) {
621  int32_t a = p1[i];
622  int32_t b = p2[i];
623  p1[i] = a + b;
624  }
625  }
626 
627 
628  for (i = 0; i < filter_order; i++)
629  s->residues[i] = *p2++ >> dshift;
630 
631  p1 += order_half;
632  x = FF_ARRAY_ELEMS(s->residues) - filter_order;
633  for (; length2 > 0; length2 -= tmp) {
634  tmp = FFMIN(length2, x);
635 
636  for (i = 0; i < tmp - (tmp == length2); i++)
637  s->residues[filter_order + i] = *p2++ >> dshift;
638 
639  for (i = 0; i < tmp; i++) {
640  int v = 1 << 9;
641 
642  if (filter_order == 16) {
643  v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
644  filter_order);
645  } else {
646  v += s->residues[i + 7] * s->filter[7] +
647  s->residues[i + 6] * s->filter[6] +
648  s->residues[i + 5] * s->filter[5] +
649  s->residues[i + 4] * s->filter[4] +
650  s->residues[i + 3] * s->filter[3] +
651  s->residues[i + 2] * s->filter[2] +
652  s->residues[i + 1] * s->filter[1] +
653  s->residues[i ] * s->filter[0];
654  }
655 
656  v = av_clip_intp2(v >> 10, 13) * (1U << dshift) - *p1;
657  *p1++ = v;
658  }
659 
660  memmove(s->residues, &s->residues[tmp], 2 * filter_order);
661  }
662 
663  emms_c();
664  break;
665  }
666  }
667 
668  if (s->dmode > 0 && s->dmode < 6) {
669  p1[0] = bp1;
670  p2[0] = bp2;
671  }
672 
673  return 0;
674 }
675 
676 static int tak_decode_frame(AVCodecContext *avctx, void *data,
677  int *got_frame_ptr, AVPacket *pkt)
678 {
679  TAKDecContext *s = avctx->priv_data;
680  AVFrame *frame = data;
681  ThreadFrame tframe = { .f = data };
682  GetBitContext *gb = &s->gb;
683  int chan, i, ret, hsize;
684 
686  return AVERROR_INVALIDDATA;
687 
688  if ((ret = init_get_bits8(gb, pkt->data, pkt->size)) < 0)
689  return ret;
690 
691  if ((ret = ff_tak_decode_frame_header(avctx, gb, &s->ti, 0)) < 0)
692  return ret;
693 
694  hsize = get_bits_count(gb) / 8;
696  if (ff_tak_check_crc(pkt->data, hsize)) {
697  av_log(avctx, AV_LOG_ERROR, "CRC error\n");
698  if (avctx->err_recognition & AV_EF_EXPLODE)
699  return AVERROR_INVALIDDATA;
700  }
701  }
702 
703  if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
704  s->ti.codec != TAK_CODEC_MULTICHANNEL) {
705  avpriv_report_missing_feature(avctx, "TAK codec type %d", s->ti.codec);
706  return AVERROR_PATCHWELCOME;
707  }
708  if (s->ti.data_type) {
709  av_log(avctx, AV_LOG_ERROR,
710  "unsupported data type: %d\n", s->ti.data_type);
711  return AVERROR_INVALIDDATA;
712  }
713  if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
714  av_log(avctx, AV_LOG_ERROR,
715  "invalid number of channels: %d\n", s->ti.channels);
716  return AVERROR_INVALIDDATA;
717  }
718  if (s->ti.channels > 6) {
719  av_log(avctx, AV_LOG_ERROR,
720  "unsupported number of channels: %d\n", s->ti.channels);
721  return AVERROR_INVALIDDATA;
722  }
723 
724  if (s->ti.frame_samples <= 0) {
725  av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
726  return AVERROR_INVALIDDATA;
727  }
728 
729  avctx->bits_per_raw_sample = s->ti.bps;
730  if ((ret = set_bps_params(avctx)) < 0)
731  return ret;
732  if (s->ti.sample_rate != avctx->sample_rate) {
733  avctx->sample_rate = s->ti.sample_rate;
734  set_sample_rate_params(avctx);
735  }
736  if (s->ti.ch_layout)
737  avctx->channel_layout = s->ti.ch_layout;
738  avctx->channels = s->ti.channels;
739 
740  s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
741  : s->ti.frame_samples;
742 
743  frame->nb_samples = s->nb_samples;
744  if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
745  return ret;
746  ff_thread_finish_setup(avctx);
747 
748  if (avctx->bits_per_raw_sample <= 16) {
749  int buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
750  s->nb_samples,
751  AV_SAMPLE_FMT_S32P, 0);
752  if (buf_size < 0)
753  return buf_size;
754  av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
755  if (!s->decode_buffer)
756  return AVERROR(ENOMEM);
757  ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
758  s->decode_buffer, avctx->channels,
759  s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
760  if (ret < 0)
761  return ret;
762  } else {
763  for (chan = 0; chan < avctx->channels; chan++)
764  s->decoded[chan] = (int32_t *)frame->extended_data[chan];
765  }
766 
767  if (s->nb_samples < 16) {
768  for (chan = 0; chan < avctx->channels; chan++) {
769  int32_t *decoded = s->decoded[chan];
770  for (i = 0; i < s->nb_samples; i++)
771  decoded[i] = get_sbits(gb, avctx->bits_per_raw_sample);
772  }
773  } else {
774  if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
775  for (chan = 0; chan < avctx->channels; chan++)
776  if (ret = decode_channel(s, chan))
777  return ret;
778 
779  if (avctx->channels == 2) {
780  s->nb_subframes = get_bits(gb, 1) + 1;
781  if (s->nb_subframes > 1) {
782  s->subframe_len[1] = get_bits(gb, 6);
783  }
784 
785  s->dmode = get_bits(gb, 3);
786  if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
787  return ret;
788  }
789  } else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
790  if (get_bits1(gb)) {
791  int ch_mask = 0;
792 
793  chan = get_bits(gb, 4) + 1;
794  if (chan > avctx->channels)
795  return AVERROR_INVALIDDATA;
796 
797  for (i = 0; i < chan; i++) {
798  int nbit = get_bits(gb, 4);
799 
800  if (nbit >= avctx->channels)
801  return AVERROR_INVALIDDATA;
802 
803  if (ch_mask & 1 << nbit)
804  return AVERROR_INVALIDDATA;
805 
806  s->mcdparams[i].present = get_bits1(gb);
807  if (s->mcdparams[i].present) {
808  s->mcdparams[i].index = get_bits(gb, 2);
809  s->mcdparams[i].chan2 = get_bits(gb, 4);
810  if (s->mcdparams[i].chan2 >= avctx->channels) {
811  av_log(avctx, AV_LOG_ERROR,
812  "invalid channel 2 (%d) for %d channel(s)\n",
813  s->mcdparams[i].chan2, avctx->channels);
814  return AVERROR_INVALIDDATA;
815  }
816  if (s->mcdparams[i].index == 1) {
817  if ((nbit == s->mcdparams[i].chan2) ||
818  (ch_mask & 1 << s->mcdparams[i].chan2))
819  return AVERROR_INVALIDDATA;
820 
821  ch_mask |= 1 << s->mcdparams[i].chan2;
822  } else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
823  return AVERROR_INVALIDDATA;
824  }
825  }
826  s->mcdparams[i].chan1 = nbit;
827 
828  ch_mask |= 1 << nbit;
829  }
830  } else {
831  chan = avctx->channels;
832  for (i = 0; i < chan; i++) {
833  s->mcdparams[i].present = 0;
834  s->mcdparams[i].chan1 = i;
835  }
836  }
837 
838  for (i = 0; i < chan; i++) {
839  if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
840  if (ret = decode_channel(s, s->mcdparams[i].chan2))
841  return ret;
842 
843  if (ret = decode_channel(s, s->mcdparams[i].chan1))
844  return ret;
845 
846  if (s->mcdparams[i].present) {
847  s->dmode = mc_dmodes[s->mcdparams[i].index];
848  if (ret = decorrelate(s,
849  s->mcdparams[i].chan2,
850  s->mcdparams[i].chan1,
851  s->nb_samples - 1))
852  return ret;
853  }
854  }
855  }
856 
857  for (chan = 0; chan < avctx->channels; chan++) {
858  int32_t *decoded = s->decoded[chan];
859 
860  if (s->lpc_mode[chan])
861  decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
862 
863  if (s->sample_shift[chan] > 0)
864  for (i = 0; i < s->nb_samples; i++)
865  decoded[i] *= 1U << s->sample_shift[chan];
866  }
867  }
868 
869  align_get_bits(gb);
870  skip_bits(gb, 24);
871  if (get_bits_left(gb) < 0)
872  av_log(avctx, AV_LOG_DEBUG, "overread\n");
873  else if (get_bits_left(gb) > 0)
874  av_log(avctx, AV_LOG_DEBUG, "underread\n");
875 
877  if (ff_tak_check_crc(pkt->data + hsize,
878  get_bits_count(gb) / 8 - hsize)) {
879  av_log(avctx, AV_LOG_ERROR, "CRC error\n");
880  if (avctx->err_recognition & AV_EF_EXPLODE)
881  return AVERROR_INVALIDDATA;
882  }
883  }
884 
885  /* convert to output buffer */
886  switch (avctx->sample_fmt) {
887  case AV_SAMPLE_FMT_U8P:
888  for (chan = 0; chan < avctx->channels; chan++) {
889  uint8_t *samples = (uint8_t *)frame->extended_data[chan];
890  int32_t *decoded = s->decoded[chan];
891  for (i = 0; i < s->nb_samples; i++)
892  samples[i] = decoded[i] + 0x80U;
893  }
894  break;
895  case AV_SAMPLE_FMT_S16P:
896  for (chan = 0; chan < avctx->channels; chan++) {
897  int16_t *samples = (int16_t *)frame->extended_data[chan];
898  int32_t *decoded = s->decoded[chan];
899  for (i = 0; i < s->nb_samples; i++)
900  samples[i] = decoded[i];
901  }
902  break;
903  case AV_SAMPLE_FMT_S32P:
904  for (chan = 0; chan < avctx->channels; chan++) {
905  int32_t *samples = (int32_t *)frame->extended_data[chan];
906  for (i = 0; i < s->nb_samples; i++)
907  samples[i] *= 1U << 8;
908  }
909  break;
910  }
911 
912  *got_frame_ptr = 1;
913 
914  return pkt->size;
915 }
916 
917 #if HAVE_THREADS
918 static int init_thread_copy(AVCodecContext *avctx)
919 {
920  TAKDecContext *s = avctx->priv_data;
921  s->avctx = avctx;
922  return 0;
923 }
924 
925 static int update_thread_context(AVCodecContext *dst,
926  const AVCodecContext *src)
927 {
928  TAKDecContext *tsrc = src->priv_data;
929  TAKDecContext *tdst = dst->priv_data;
930 
931  if (dst == src)
932  return 0;
933  memcpy(&tdst->ti, &tsrc->ti, sizeof(TAKStreamInfo));
934  return 0;
935 }
936 #endif
937 
939 {
940  TAKDecContext *s = avctx->priv_data;
941 
942  av_freep(&s->decode_buffer);
943 
944  return 0;
945 }
946 
948  .name = "tak",
949  .long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
950  .type = AVMEDIA_TYPE_AUDIO,
951  .id = AV_CODEC_ID_TAK,
952  .priv_data_size = sizeof(TAKDecContext),
954  .close = tak_decode_close,
957  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
959  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
963 };
AVCodec
AVCodec.
Definition: avcodec.h:3481
TAK_MAX_CHANNELS
#define TAK_MAX_CHANNELS
Definition: tak.h:64
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
TAKDecContext::avctx
AVCodecContext * avctx
parent AVCodecContext
Definition: takdec.c:51
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
TAKDecContext::sample_shift
int8_t sample_shift[TAK_MAX_CHANNELS]
shift applied to every sample in the channel
Definition: takdec.c:64
MCDParam::chan1
int8_t chan1
Definition: takdec.c:46
MCDParam::present
int8_t present
decorrelation parameter availability for this channel
Definition: takdec.c:44
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2276
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:2225
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
TAKDecContext::ti
TAKStreamInfo ti
Definition: takdec.c:54
MAX_PREDICTORS
#define MAX_PREDICTORS
Definition: takdec.c:41
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2694
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
ff_tak_decode_frame_header
int ff_tak_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb, TAKStreamInfo *ti, int log_level_offset)
Validate and decode a frame header.
Definition: tak.c:141
av_samples_fill_arrays
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, const uint8_t *buf, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Fill plane data pointers and linesize for samples with sample format sample_fmt.
Definition: samplefmt.c:151
TAKDecContext::tdsp
TAKDSPContext tdsp
Definition: takdec.c:53
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
TAKDecContext::mcdparams
MCDParam mcdparams[TAK_MAX_CHANNELS]
multichannel decorrelation parameters
Definition: takdec.c:72
CParam::scale
int scale
Definition: takdec.c:88
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
b
#define b
Definition: input.c:41
TAKDSPContext
Definition: takdsp.h:24
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
data
const char data[16]
Definition: mxf.c:91
ff_audiodsp_init
av_cold void ff_audiodsp_init(AudioDSPContext *c)
Definition: audiodsp.c:106
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: avcodec.h:2709
c1
static const uint64_t c1
Definition: murmur3.c:49
TAKDecContext::lpc_mode
int8_t lpc_mode[TAK_MAX_CHANNELS]
Definition: takdec.c:63
thread.h
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
TAKDecContext::subframe_len
int16_t subframe_len[MAX_SUBFRAMES]
subframe length in samples
Definition: takdec.c:67
tak_decode_close
static av_cold int tak_decode_close(AVCodecContext *avctx)
Definition: takdec.c:938
predictor_sizes
static const uint16_t predictor_sizes[]
Definition: takdec.c:81
MCDParam::chan2
int8_t chan2
Definition: takdec.c:47
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
TAKDecContext::coding_mode
int8_t coding_mode[128]
Definition: takdec.c:74
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
MCDParam::index
int8_t index
index into array of decorrelation types
Definition: takdec.c:45
TAKDecContext::uval
int uval
Definition: takdec.c:57
U
#define U(x)
Definition: vp56_arith.h:37
GetBitContext
Definition: get_bits.h:61
samplefmt.h
MAX_SUBFRAMES
#define MAX_SUBFRAMES
max number of subframes per channel
Definition: takdec.c:40
src
#define src
Definition: vp8dsp.c:254
a1
#define a1
Definition: regdef.h:47
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
tak_decode_init
static av_cold int tak_decode_init(AVCodecContext *avctx)
Definition: takdec.c:183
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
CParam::bias
int bias
Definition: takdec.c:90
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2796
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
a4
#define a4
Definition: regdef.h:50
TAK_CODEC_MONO_STEREO
@ TAK_CODEC_MONO_STEREO
Definition: tak.h:100
TAKDecContext::adsp
AudioDSPContext adsp
Definition: takdec.c:52
int32_t
int32_t
Definition: audio_convert.c:194
decorrelate
static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
Definition: takdec.c:556
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
TAKDecContext::dmode
int8_t dmode
channel decorrelation type in the current frame
Definition: takdec.c:70
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
TAKDecContext::decode_buffer_size
unsigned int decode_buffer_size
Definition: takdec.c:60
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:964
tak.h
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. If the codec allocates writable tables in its init()
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2705
decode_channel
static int decode_channel(TAKDecContext *s, int chan)
Definition: takdec.c:509
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
takdsp.h
CParam::aescape
int aescape
Definition: takdec.c:89
TAKDecContext
Definition: takdec.c:50
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_SAMPLE_FMT_U8P
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
Definition: samplefmt.h:66
TAKDecContext::residues
int16_t residues[544]
Definition: takdec.c:76
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
init_thread_copy
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
size
int size
Definition: twinvq_data.h:11134
decode_residues
static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
Definition: takdec.c:298
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
ff_tak_check_crc
int ff_tak_check_crc(const uint8_t *buf, unsigned int buf_size)
Definition: tak.c:77
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
unary.h
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
CParam::init
int init
Definition: takdec.c:86
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2789
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
internal.h
TAKDecContext::decode_buffer
uint8_t * decode_buffer
Definition: takdec.c:59
get_bits_esc4
static int get_bits_esc4(GetBitContext *gb)
Definition: takdec.c:377
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
tak_decode_frame
static int tak_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *pkt)
Definition: takdec.c:676
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
avcodec.h
set_sample_rate_params
static void set_sample_rate_params(AVCodecContext *avctx)
Definition: takdec.c:165
ret
ret
Definition: filter_design.txt:187
decode_lpc
static void decode_lpc(int32_t *coeffs, int mode, int length)
Definition: takdec.c:198
mc_dmodes
static const int8_t mc_dmodes[]
Definition: takdec.c:79
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: avcodec.h:2702
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
CParam::escape
int escape
Definition: takdec.c:87
ff_tak_decoder
AVCodec ff_tak_decoder
Definition: takdec.c:947
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
c2
static const uint64_t c2
Definition: murmur3.c:50
a5
#define a5
Definition: regdef.h:51
TAKDecContext::predictors
int16_t predictors[MAX_PREDICTORS]
Definition: takdec.c:65
ThreadFrame
Definition: thread.h:34
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
mode
mode
Definition: ebur128.h:83
TAKDecContext::gb
GetBitContext gb
bitstream reader initialized to start at the current frame
Definition: takdec.c:55
xcodes
static const struct CParam xcodes[50]
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
TAKDecContext::filter
int16_t filter[MAX_PREDICTORS]
Definition: takdec.c:75
set_bps_params
static int set_bps_params(AVCodecContext *avctx)
Definition: takdec.c:144
MCDParam
Definition: takdec.c:43
shift
static int shift(int a, int b)
Definition: sonic.c:82
audiodsp.h
TAKStreamInfo
Definition: tak.h:128
AudioDSPContext
Definition: audiodsp.h:24
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
CParam
Definition: takdec.c:85
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
AV_CODEC_ID_TAK
@ AV_CODEC_ID_TAK
Definition: avcodec.h:626
TAKDecContext::nb_samples
int nb_samples
number of samples in the current frame
Definition: takdec.c:58
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
TAKDecContext::decoded
int32_t * decoded[TAK_MAX_CHANNELS]
decoded samples for each channel
Definition: takdec.c:61
decode_subframe
static int decode_subframe(TAKDecContext *s, int32_t *decoded, int subframe_size, int prev_subframe_size)
Definition: takdec.c:385
ff_takdsp_init
av_cold void ff_takdsp_init(TAKDSPContext *c)
Definition: takdsp.c:73
TAK_MIN_FRAME_HEADER_BYTES
#define TAK_MIN_FRAME_HEADER_BYTES
Definition: tak.h:97
TAK_CODEC_MULTICHANNEL
@ TAK_CODEC_MULTICHANNEL
Definition: tak.h:101
a3
#define a3
Definition: regdef.h:49
TAKDecContext::subframe_scale
int subframe_scale
Definition: takdec.c:68
TAKDecContext::nb_subframes
int nb_subframes
number of subframes in the current frame
Definition: takdec.c:66
decode_segment
static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
Definition: takdec.c:254