FFmpeg
takdec.c
Go to the documentation of this file.
1 /*
2  * TAK decoder
3  * Copyright (c) 2012 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * TAK (Tom's lossless Audio Kompressor) decoder
25  * @author Paul B Mahol
26  */
27 
28 #include "libavutil/internal.h"
29 #include "libavutil/mem_internal.h"
30 #include "libavutil/samplefmt.h"
31 
32 #define CACHED_BITSTREAM_READER !ARCH_X86_32
33 #define BITSTREAM_READER_LE
34 #include "audiodsp.h"
35 #include "thread.h"
36 #include "avcodec.h"
37 #include "codec_internal.h"
38 #include "unary.h"
39 #include "tak.h"
40 #include "takdsp.h"
41 
42 #define MAX_SUBFRAMES 8 ///< max number of subframes per channel
43 #define MAX_PREDICTORS 256
44 
45 typedef struct MCDParam {
46  int8_t present; ///< decorrelation parameter availability for this channel
47  int8_t index; ///< index into array of decorrelation types
48  int8_t chan1;
49  int8_t chan2;
50 } MCDParam;
51 
52 typedef struct TAKDecContext {
53  AVCodecContext *avctx; ///< parent AVCodecContext
57  GetBitContext gb; ///< bitstream reader initialized to start at the current frame
58 
59  int uval;
60  int nb_samples; ///< number of samples in the current frame
61  uint8_t *decode_buffer;
62  unsigned int decode_buffer_size;
63  int32_t *decoded[TAK_MAX_CHANNELS]; ///< decoded samples for each channel
64 
66  int8_t sample_shift[TAK_MAX_CHANNELS]; ///< shift applied to every sample in the channel
68  int nb_subframes; ///< number of subframes in the current frame
69  int16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
71 
72  int8_t dmode; ///< channel decorrelation type in the current frame
73 
74  MCDParam mcdparams[TAK_MAX_CHANNELS]; ///< multichannel decorrelation parameters
75 
76  int8_t coding_mode[128];
78  DECLARE_ALIGNED(16, int16_t, residues)[544];
80 
81 static const int8_t mc_dmodes[] = { 1, 3, 4, 6, };
82 
83 static const uint16_t predictor_sizes[] = {
84  4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
85 };
86 
87 static const struct CParam {
88  int init;
89  int escape;
90  int scale;
91  int aescape;
92  int bias;
93 } xcodes[50] = {
94  { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
95  { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
96  { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
97  { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
98  { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
99  { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
100  { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
101  { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
102  { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
103  { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
104  { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
105  { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
106  { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
107  { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
108  { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
109  { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
110  { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
111  { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
112  { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
113  { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
114  { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
115  { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
116  { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
117  { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
118  { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
119  { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
120  { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
121  { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
122  { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
123  { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
124  { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
125  { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
126  { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
127  { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
128  { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
129  { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
130  { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
131  { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
132  { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
133  { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
134  { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
135  { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
136  { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
137  { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
138  { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
139  { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
140  { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
141  { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
142  { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
143  { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
144 };
145 
146 static int set_bps_params(AVCodecContext *avctx)
147 {
148  switch (avctx->bits_per_raw_sample) {
149  case 8:
150  avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
151  break;
152  case 16:
154  break;
155  case 24:
157  break;
158  default:
159  av_log(avctx, AV_LOG_ERROR, "invalid/unsupported bits per sample: %d\n",
160  avctx->bits_per_raw_sample);
161  return AVERROR_INVALIDDATA;
162  }
163 
164  return 0;
165 }
166 
168 {
169  TAKDecContext *s = avctx->priv_data;
170  int shift;
171 
172  if (avctx->sample_rate < 11025) {
173  shift = 3;
174  } else if (avctx->sample_rate < 22050) {
175  shift = 2;
176  } else if (avctx->sample_rate < 44100) {
177  shift = 1;
178  } else {
179  shift = 0;
180  }
181  s->uval = FFALIGN(avctx->sample_rate + 511LL >> 9, 4) << shift;
182  s->subframe_scale = FFALIGN(avctx->sample_rate + 511LL >> 9, 4) << 1;
183 }
184 
186 {
187  TAKDecContext *s = avctx->priv_data;
188 
189  ff_audiodsp_init(&s->adsp);
190  ff_takdsp_init(&s->tdsp);
191 
192  s->avctx = avctx;
194 
195  set_sample_rate_params(avctx);
196 
197  return set_bps_params(avctx);
198 }
199 
200 static void decode_lpc(int32_t *coeffs, int mode, int length)
201 {
202  int i;
203 
204  if (length < 2)
205  return;
206 
207  if (mode == 1) {
208  unsigned a1 = *coeffs++;
209  for (i = 0; i < length - 1 >> 1; i++) {
210  *coeffs += a1;
211  coeffs[1] += (unsigned)*coeffs;
212  a1 = coeffs[1];
213  coeffs += 2;
214  }
215  if (length - 1 & 1)
216  *coeffs += a1;
217  } else if (mode == 2) {
218  unsigned a1 = coeffs[1];
219  unsigned a2 = a1 + *coeffs;
220  coeffs[1] = a2;
221  if (length > 2) {
222  coeffs += 2;
223  for (i = 0; i < length - 2 >> 1; i++) {
224  unsigned a3 = *coeffs + a1;
225  unsigned a4 = a3 + a2;
226  *coeffs = a4;
227  a1 = coeffs[1] + a3;
228  a2 = a1 + a4;
229  coeffs[1] = a2;
230  coeffs += 2;
231  }
232  if (length & 1)
233  *coeffs += a1 + a2;
234  }
235  } else if (mode == 3) {
236  unsigned a1 = coeffs[1];
237  unsigned a2 = a1 + *coeffs;
238  coeffs[1] = a2;
239  if (length > 2) {
240  unsigned a3 = coeffs[2];
241  unsigned a4 = a3 + a1;
242  unsigned a5 = a4 + a2;
243  coeffs[2] = a5;
244  coeffs += 3;
245  for (i = 0; i < length - 3; i++) {
246  a3 += *coeffs;
247  a4 += a3;
248  a5 += a4;
249  *coeffs = a5;
250  coeffs++;
251  }
252  }
253  }
254 }
255 
256 static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
257 {
258  struct CParam code;
259  GetBitContext *gb = &s->gb;
260  int i;
261 
262  if (!mode) {
263  memset(decoded, 0, len * sizeof(*decoded));
264  return 0;
265  }
266 
267  if (mode > FF_ARRAY_ELEMS(xcodes))
268  return AVERROR_INVALIDDATA;
269  code = xcodes[mode - 1];
270 
271  for (i = 0; i < len; i++) {
272  unsigned x = get_bits_long(gb, code.init);
273  if (x >= code.escape && get_bits1(gb)) {
274  x |= 1 << code.init;
275  if (x >= code.aescape) {
276  unsigned scale = get_unary(gb, 1, 9);
277  if (scale == 9) {
278  int scale_bits = get_bits(gb, 3);
279  if (scale_bits > 0) {
280  if (scale_bits == 7) {
281  scale_bits += get_bits(gb, 5);
282  if (scale_bits > 29)
283  return AVERROR_INVALIDDATA;
284  }
285  scale = get_bits_long(gb, scale_bits) + 1;
286  x += code.scale * scale;
287  }
288  x += code.bias;
289  } else
290  x += code.scale * scale - code.escape;
291  } else
292  x -= code.escape;
293  }
294  decoded[i] = (x >> 1) ^ -(x & 1);
295  }
296 
297  return 0;
298 }
299 
300 static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
301 {
302  GetBitContext *gb = &s->gb;
303  int i, mode, ret;
304 
305  if (length > s->nb_samples)
306  return AVERROR_INVALIDDATA;
307 
308  if (get_bits1(gb)) {
309  int wlength, rval;
310 
311  wlength = length / s->uval;
312 
313  rval = length - (wlength * s->uval);
314 
315  if (rval < s->uval / 2)
316  rval += s->uval;
317  else
318  wlength++;
319 
320  if (wlength <= 1 || wlength > 128)
321  return AVERROR_INVALIDDATA;
322 
323  s->coding_mode[0] = mode = get_bits(gb, 6);
324 
325  for (i = 1; i < wlength; i++) {
326  int c = get_unary(gb, 1, 6);
327 
328  switch (c) {
329  case 6:
330  mode = get_bits(gb, 6);
331  break;
332  case 5:
333  case 4:
334  case 3: {
335  /* mode += sign ? (1 - c) : (c - 1) */
336  int sign = get_bits1(gb);
337  mode += (-sign ^ (c - 1)) + sign;
338  break;
339  }
340  case 2:
341  mode++;
342  break;
343  case 1:
344  mode--;
345  break;
346  }
347  s->coding_mode[i] = mode;
348  }
349 
350  i = 0;
351  while (i < wlength) {
352  int len = 0;
353 
354  mode = s->coding_mode[i];
355  do {
356  if (i >= wlength - 1)
357  len += rval;
358  else
359  len += s->uval;
360  i++;
361 
362  if (i == wlength)
363  break;
364  } while (s->coding_mode[i] == mode);
365 
366  if ((ret = decode_segment(s, mode, decoded, len)) < 0)
367  return ret;
368  decoded += len;
369  }
370  } else {
371  mode = get_bits(gb, 6);
372  if ((ret = decode_segment(s, mode, decoded, length)) < 0)
373  return ret;
374  }
375 
376  return 0;
377 }
378 
380 {
381  if (get_bits1(gb))
382  return get_bits(gb, 4) + 1;
383  else
384  return 0;
385 }
386 
387 static int decode_subframe(TAKDecContext *s, int32_t *decoded,
388  int subframe_size, int prev_subframe_size)
389 {
390  GetBitContext *gb = &s->gb;
391  int x, y, i, j, ret = 0;
392  int dshift, size, filter_quant, filter_order;
393  int tfilter[MAX_PREDICTORS];
394 
395  if (!get_bits1(gb))
396  return decode_residues(s, decoded, subframe_size);
397 
398  filter_order = predictor_sizes[get_bits(gb, 4)];
399 
400  if (prev_subframe_size > 0 && get_bits1(gb)) {
401  if (filter_order > prev_subframe_size)
402  return AVERROR_INVALIDDATA;
403 
404  decoded -= filter_order;
405  subframe_size += filter_order;
406 
407  if (filter_order > subframe_size)
408  return AVERROR_INVALIDDATA;
409  } else {
410  int lpc_mode;
411 
412  if (filter_order > subframe_size)
413  return AVERROR_INVALIDDATA;
414 
415  lpc_mode = get_bits(gb, 2);
416  if (lpc_mode > 2)
417  return AVERROR_INVALIDDATA;
418 
419  if ((ret = decode_residues(s, decoded, filter_order)) < 0)
420  return ret;
421 
422  if (lpc_mode)
423  decode_lpc(decoded, lpc_mode, filter_order);
424  }
425 
426  dshift = get_bits_esc4(gb);
427  size = get_bits1(gb) + 6;
428 
429  filter_quant = 10;
430  if (get_bits1(gb)) {
431  filter_quant -= get_bits(gb, 3) + 1;
432  if (filter_quant < 3)
433  return AVERROR_INVALIDDATA;
434  }
435 
436  s->predictors[0] = get_sbits(gb, 10);
437  s->predictors[1] = get_sbits(gb, 10);
438  s->predictors[2] = get_sbits(gb, size) * (1 << (10 - size));
439  s->predictors[3] = get_sbits(gb, size) * (1 << (10 - size));
440  if (filter_order > 4) {
441  int tmp = size - get_bits1(gb);
442 
443  for (i = 4; i < filter_order; i++) {
444  if (!(i & 3))
445  x = tmp - get_bits(gb, 2);
446  s->predictors[i] = get_sbits(gb, x) * (1 << (10 - size));
447  }
448  }
449 
450  tfilter[0] = s->predictors[0] * 64;
451  for (i = 1; i < filter_order; i++) {
452  uint32_t *p1 = &tfilter[0];
453  uint32_t *p2 = &tfilter[i - 1];
454 
455  for (j = 0; j < (i + 1) / 2; j++) {
456  x = *p1 + ((int32_t)(s->predictors[i] * *p2 + 256) >> 9);
457  *p2 += (int32_t)(s->predictors[i] * *p1 + 256) >> 9;
458  *p1++ = x;
459  p2--;
460  }
461 
462  tfilter[i] = s->predictors[i] * 64;
463  }
464 
465  x = 1 << (32 - (15 - filter_quant));
466  y = 1 << ((15 - filter_quant) - 1);
467  for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
468  s->filter[j] = x - ((tfilter[i] + y) >> (15 - filter_quant));
469  s->filter[i] = x - ((tfilter[j] + y) >> (15 - filter_quant));
470  }
471 
472  if ((ret = decode_residues(s, &decoded[filter_order],
473  subframe_size - filter_order)) < 0)
474  return ret;
475 
476  for (i = 0; i < filter_order; i++)
477  s->residues[i] = *decoded++ >> dshift;
478 
479  y = FF_ARRAY_ELEMS(s->residues) - filter_order;
480  x = subframe_size - filter_order;
481  while (x > 0) {
482  int tmp = FFMIN(y, x);
483 
484  for (i = 0; i < tmp; i++) {
485  int v = 1 << (filter_quant - 1);
486 
487  if (filter_order & -16)
488  v += (unsigned)s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
489  filter_order & -16);
490  for (j = filter_order & -16; j < filter_order; j += 4) {
491  v += s->residues[i + j + 3] * (unsigned)s->filter[j + 3] +
492  s->residues[i + j + 2] * (unsigned)s->filter[j + 2] +
493  s->residues[i + j + 1] * (unsigned)s->filter[j + 1] +
494  s->residues[i + j ] * (unsigned)s->filter[j ];
495  }
496  v = (av_clip_intp2(v >> filter_quant, 13) * (1 << dshift)) - (unsigned)*decoded;
497  *decoded++ = v;
498  s->residues[filter_order + i] = v >> dshift;
499  }
500 
501  x -= tmp;
502  if (x > 0)
503  memcpy(s->residues, &s->residues[y], 2 * filter_order);
504  }
505 
506  return 0;
507 }
508 
509 static int decode_channel(TAKDecContext *s, int chan)
510 {
511  AVCodecContext *avctx = s->avctx;
512  GetBitContext *gb = &s->gb;
513  int32_t *decoded = s->decoded[chan];
514  int left = s->nb_samples - 1;
515  int i = 0, ret, prev = 0;
516 
517  s->sample_shift[chan] = get_bits_esc4(gb);
518  if (s->sample_shift[chan] >= avctx->bits_per_raw_sample)
519  return AVERROR_INVALIDDATA;
520 
521  *decoded++ = get_sbits(gb, avctx->bits_per_raw_sample - s->sample_shift[chan]);
522  s->lpc_mode[chan] = get_bits(gb, 2);
523  s->nb_subframes = get_bits(gb, 3) + 1;
524 
525  if (s->nb_subframes > 1) {
526  if (get_bits_left(gb) < (s->nb_subframes - 1) * 6)
527  return AVERROR_INVALIDDATA;
528 
529  for (; i < s->nb_subframes - 1; i++) {
530  int v = get_bits(gb, 6);
531 
532  s->subframe_len[i] = (v - prev) * s->subframe_scale;
533  if (s->subframe_len[i] <= 0)
534  return AVERROR_INVALIDDATA;
535 
536  left -= s->subframe_len[i];
537  prev = v;
538  }
539 
540  if (left <= 0)
541  return AVERROR_INVALIDDATA;
542  }
543  s->subframe_len[i] = left;
544 
545  prev = 0;
546  for (i = 0; i < s->nb_subframes; i++) {
547  if ((ret = decode_subframe(s, decoded, s->subframe_len[i], prev)) < 0)
548  return ret;
549  decoded += s->subframe_len[i];
550  prev = s->subframe_len[i];
551  }
552 
553  return 0;
554 }
555 
556 static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
557 {
558  GetBitContext *gb = &s->gb;
559  int32_t *p1 = s->decoded[c1] + (s->dmode > 5);
560  int32_t *p2 = s->decoded[c2] + (s->dmode > 5);
561  int32_t bp1 = p1[0];
562  int32_t bp2 = p2[0];
563  int i;
564  int dshift, dfactor;
565 
566  length += s->dmode < 6;
567 
568  switch (s->dmode) {
569  case 1: /* left/side */
570  s->tdsp.decorrelate_ls(p1, p2, length);
571  break;
572  case 2: /* side/right */
573  s->tdsp.decorrelate_sr(p1, p2, length);
574  break;
575  case 3: /* side/mid */
576  s->tdsp.decorrelate_sm(p1, p2, length);
577  break;
578  case 4: /* side/left with scale factor */
579  FFSWAP(int32_t*, p1, p2);
580  FFSWAP(int32_t, bp1, bp2);
581  case 5: /* side/right with scale factor */
582  dshift = get_bits_esc4(gb);
583  dfactor = get_sbits(gb, 10);
584  s->tdsp.decorrelate_sf(p1, p2, length, dshift, dfactor);
585  break;
586  case 6:
587  FFSWAP(int32_t*, p1, p2);
588  case 7: {
589  int length2, order_half, filter_order, dval1, dval2;
590  int tmp, x, code_size;
591 
592  if (length < 256)
593  return AVERROR_INVALIDDATA;
594 
595  dshift = get_bits_esc4(gb);
596  filter_order = 8 << get_bits1(gb);
597  dval1 = get_bits1(gb);
598  dval2 = get_bits1(gb);
599 
600  for (i = 0; i < filter_order; i++) {
601  if (!(i & 3))
602  code_size = 14 - get_bits(gb, 3);
603  s->filter[i] = get_sbits(gb, code_size);
604  }
605 
606  order_half = filter_order / 2;
607  length2 = length - (filter_order - 1);
608 
609  /* decorrelate beginning samples */
610  if (dval1) {
611  for (i = 0; i < order_half; i++) {
612  int32_t a = p1[i];
613  int32_t b = p2[i];
614  p1[i] = a + b;
615  }
616  }
617 
618  /* decorrelate ending samples */
619  if (dval2) {
620  for (i = length2 + order_half; i < length; i++) {
621  int32_t a = p1[i];
622  int32_t b = p2[i];
623  p1[i] = a + b;
624  }
625  }
626 
627 
628  for (i = 0; i < filter_order; i++)
629  s->residues[i] = *p2++ >> dshift;
630 
631  p1 += order_half;
632  x = FF_ARRAY_ELEMS(s->residues) - filter_order;
633  for (; length2 > 0; length2 -= tmp) {
634  tmp = FFMIN(length2, x);
635 
636  for (i = 0; i < tmp - (tmp == length2); i++)
637  s->residues[filter_order + i] = *p2++ >> dshift;
638 
639  for (i = 0; i < tmp; i++) {
640  int v = 1 << 9;
641 
642  if (filter_order == 16) {
643  v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
644  filter_order);
645  } else {
646  v += s->residues[i + 7] * s->filter[7] +
647  s->residues[i + 6] * s->filter[6] +
648  s->residues[i + 5] * s->filter[5] +
649  s->residues[i + 4] * s->filter[4] +
650  s->residues[i + 3] * s->filter[3] +
651  s->residues[i + 2] * s->filter[2] +
652  s->residues[i + 1] * s->filter[1] +
653  s->residues[i ] * s->filter[0];
654  }
655 
656  v = av_clip_intp2(v >> 10, 13) * (1U << dshift) - *p1;
657  *p1++ = v;
658  }
659 
660  memmove(s->residues, &s->residues[tmp], 2 * filter_order);
661  }
662  break;
663  }
664  }
665 
666  if (s->dmode > 0 && s->dmode < 6) {
667  p1[0] = bp1;
668  p2[0] = bp2;
669  }
670 
671  return 0;
672 }
673 
675  int *got_frame_ptr, AVPacket *pkt)
676 {
677  TAKDecContext *s = avctx->priv_data;
678  GetBitContext *gb = &s->gb;
679  int chan, i, ret, hsize;
680 
682  return AVERROR_INVALIDDATA;
683 
684  if ((ret = init_get_bits8(gb, pkt->data, pkt->size)) < 0)
685  return ret;
686 
687  if ((ret = ff_tak_decode_frame_header(avctx, gb, &s->ti, 0)) < 0)
688  return ret;
689 
690  hsize = get_bits_count(gb) / 8;
692  if (ff_tak_check_crc(pkt->data, hsize)) {
693  av_log(avctx, AV_LOG_ERROR, "CRC error\n");
694  if (avctx->err_recognition & AV_EF_EXPLODE)
695  return AVERROR_INVALIDDATA;
696  }
697  }
698 
699  if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
700  s->ti.codec != TAK_CODEC_MULTICHANNEL) {
701  avpriv_report_missing_feature(avctx, "TAK codec type %d", s->ti.codec);
702  return AVERROR_PATCHWELCOME;
703  }
704  if (s->ti.data_type) {
705  av_log(avctx, AV_LOG_ERROR,
706  "unsupported data type: %d\n", s->ti.data_type);
707  return AVERROR_INVALIDDATA;
708  }
709  if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
710  av_log(avctx, AV_LOG_ERROR,
711  "invalid number of channels: %d\n", s->ti.channels);
712  return AVERROR_INVALIDDATA;
713  }
714  if (s->ti.channels > 6) {
715  av_log(avctx, AV_LOG_ERROR,
716  "unsupported number of channels: %d\n", s->ti.channels);
717  return AVERROR_INVALIDDATA;
718  }
719 
720  if (s->ti.frame_samples <= 0) {
721  av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
722  return AVERROR_INVALIDDATA;
723  }
724 
725  avctx->bits_per_raw_sample = s->ti.bps;
726  if ((ret = set_bps_params(avctx)) < 0)
727  return ret;
728  if (s->ti.sample_rate != avctx->sample_rate) {
729  avctx->sample_rate = s->ti.sample_rate;
730  set_sample_rate_params(avctx);
731  }
732 
734  if (s->ti.ch_layout) {
735  av_channel_layout_from_mask(&avctx->ch_layout, s->ti.ch_layout);
736  } else {
738  avctx->ch_layout.nb_channels = s->ti.channels;
739  }
740 
741  s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
742  : s->ti.frame_samples;
743 
744  frame->nb_samples = s->nb_samples;
745  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
746  return ret;
747  ff_thread_finish_setup(avctx);
748 
749  if (avctx->bits_per_raw_sample <= 16) {
750  int buf_size = av_samples_get_buffer_size(NULL, avctx->ch_layout.nb_channels,
751  s->nb_samples,
752  AV_SAMPLE_FMT_S32P, 0);
753  if (buf_size < 0)
754  return buf_size;
755  av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
756  if (!s->decode_buffer)
757  return AVERROR(ENOMEM);
758  ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
759  s->decode_buffer, avctx->ch_layout.nb_channels,
760  s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
761  if (ret < 0)
762  return ret;
763  } else {
764  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++)
765  s->decoded[chan] = (int32_t *)frame->extended_data[chan];
766  }
767 
768  if (s->nb_samples < 16) {
769  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
770  int32_t *decoded = s->decoded[chan];
771  for (i = 0; i < s->nb_samples; i++)
772  decoded[i] = get_sbits(gb, avctx->bits_per_raw_sample);
773  }
774  } else {
775  if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
776  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++)
777  if (ret = decode_channel(s, chan))
778  return ret;
779 
780  if (avctx->ch_layout.nb_channels == 2) {
781  s->nb_subframes = get_bits(gb, 1) + 1;
782  if (s->nb_subframes > 1) {
783  s->subframe_len[1] = get_bits(gb, 6);
784  }
785 
786  s->dmode = get_bits(gb, 3);
787  if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
788  return ret;
789  }
790  } else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
791  if (get_bits1(gb)) {
792  int ch_mask = 0;
793 
794  chan = get_bits(gb, 4) + 1;
795  if (chan > avctx->ch_layout.nb_channels)
796  return AVERROR_INVALIDDATA;
797 
798  for (i = 0; i < chan; i++) {
799  int nbit = get_bits(gb, 4);
800 
801  if (nbit >= avctx->ch_layout.nb_channels)
802  return AVERROR_INVALIDDATA;
803 
804  if (ch_mask & 1 << nbit)
805  return AVERROR_INVALIDDATA;
806 
807  s->mcdparams[i].present = get_bits1(gb);
808  if (s->mcdparams[i].present) {
809  s->mcdparams[i].index = get_bits(gb, 2);
810  s->mcdparams[i].chan2 = get_bits(gb, 4);
811  if (s->mcdparams[i].chan2 >= avctx->ch_layout.nb_channels) {
812  av_log(avctx, AV_LOG_ERROR,
813  "invalid channel 2 (%d) for %d channel(s)\n",
814  s->mcdparams[i].chan2, avctx->ch_layout.nb_channels);
815  return AVERROR_INVALIDDATA;
816  }
817  if (s->mcdparams[i].index == 1) {
818  if ((nbit == s->mcdparams[i].chan2) ||
819  (ch_mask & 1 << s->mcdparams[i].chan2))
820  return AVERROR_INVALIDDATA;
821 
822  ch_mask |= 1 << s->mcdparams[i].chan2;
823  } else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
824  return AVERROR_INVALIDDATA;
825  }
826  }
827  s->mcdparams[i].chan1 = nbit;
828 
829  ch_mask |= 1 << nbit;
830  }
831  } else {
832  chan = avctx->ch_layout.nb_channels;
833  for (i = 0; i < chan; i++) {
834  s->mcdparams[i].present = 0;
835  s->mcdparams[i].chan1 = i;
836  }
837  }
838 
839  for (i = 0; i < chan; i++) {
840  if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
841  if (ret = decode_channel(s, s->mcdparams[i].chan2))
842  return ret;
843 
844  if (ret = decode_channel(s, s->mcdparams[i].chan1))
845  return ret;
846 
847  if (s->mcdparams[i].present) {
848  s->dmode = mc_dmodes[s->mcdparams[i].index];
849  if (ret = decorrelate(s,
850  s->mcdparams[i].chan2,
851  s->mcdparams[i].chan1,
852  s->nb_samples - 1))
853  return ret;
854  }
855  }
856  }
857 
858  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
859  int32_t *decoded = s->decoded[chan];
860 
861  if (s->lpc_mode[chan])
862  decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
863 
864  if (s->sample_shift[chan] > 0)
865  for (i = 0; i < s->nb_samples; i++)
866  decoded[i] *= 1U << s->sample_shift[chan];
867  }
868  }
869 
870  align_get_bits(gb);
871  skip_bits(gb, 24);
872  if (get_bits_left(gb) < 0)
873  av_log(avctx, AV_LOG_DEBUG, "overread\n");
874  else if (get_bits_left(gb) > 0)
875  av_log(avctx, AV_LOG_DEBUG, "underread\n");
876 
878  if (ff_tak_check_crc(pkt->data + hsize,
879  get_bits_count(gb) / 8 - hsize)) {
880  av_log(avctx, AV_LOG_ERROR, "CRC error\n");
881  if (avctx->err_recognition & AV_EF_EXPLODE)
882  return AVERROR_INVALIDDATA;
883  }
884  }
885 
886  /* convert to output buffer */
887  switch (avctx->sample_fmt) {
888  case AV_SAMPLE_FMT_U8P:
889  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
890  uint8_t *samples = (uint8_t *)frame->extended_data[chan];
891  int32_t *decoded = s->decoded[chan];
892  for (i = 0; i < s->nb_samples; i++)
893  samples[i] = decoded[i] + 0x80U;
894  }
895  break;
896  case AV_SAMPLE_FMT_S16P:
897  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
898  int16_t *samples = (int16_t *)frame->extended_data[chan];
899  int32_t *decoded = s->decoded[chan];
900  for (i = 0; i < s->nb_samples; i++)
901  samples[i] = decoded[i];
902  }
903  break;
904  case AV_SAMPLE_FMT_S32P:
905  for (chan = 0; chan < avctx->ch_layout.nb_channels; chan++) {
907  for (i = 0; i < s->nb_samples; i++)
908  samples[i] *= 1U << 8;
909  }
910  break;
911  }
912 
913  *got_frame_ptr = 1;
914 
915  return pkt->size;
916 }
917 
918 #if HAVE_THREADS
919 static int update_thread_context(AVCodecContext *dst,
920  const AVCodecContext *src)
921 {
922  TAKDecContext *tsrc = src->priv_data;
923  TAKDecContext *tdst = dst->priv_data;
924 
925  if (dst == src)
926  return 0;
927  memcpy(&tdst->ti, &tsrc->ti, sizeof(TAKStreamInfo));
928  return 0;
929 }
930 #endif
931 
933 {
934  TAKDecContext *s = avctx->priv_data;
935 
936  av_freep(&s->decode_buffer);
937 
938  return 0;
939 }
940 
942  .p.name = "tak",
943  CODEC_LONG_NAME("TAK (Tom's lossless Audio Kompressor)"),
944  .p.type = AVMEDIA_TYPE_AUDIO,
945  .p.id = AV_CODEC_ID_TAK,
946  .priv_data_size = sizeof(TAKDecContext),
948  .close = tak_decode_close,
952  .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
956 };
TAK_MAX_CHANNELS
#define TAK_MAX_CHANNELS
Definition: tak.h:62
ff_tak_decode_frame_header
int ff_tak_decode_frame_header(void *logctx, GetBitContext *gb, TAKStreamInfo *ti, int log_level_offset)
Validate and decode a frame header.
Definition: tak.c:147
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
TAKDecContext::avctx
AVCodecContext * avctx
parent AVCodecContext
Definition: takdec.c:53
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
TAKDecContext::sample_shift
int8_t sample_shift[TAK_MAX_CHANNELS]
shift applied to every sample in the channel
Definition: takdec.c:66
MCDParam::chan1
int8_t chan1
Definition: takdec.c:48
MCDParam::present
int8_t present
decorrelation parameter availability for this channel
Definition: takdec.c:46
mem_internal.h
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1050
TAKDecContext::ti
TAKStreamInfo ti
Definition: takdec.c:56
MAX_PREDICTORS
#define MAX_PREDICTORS
Definition: takdec.c:43
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1420
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
av_samples_fill_arrays
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, const uint8_t *buf, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Fill plane data pointers and linesize for samples with sample format sample_fmt.
Definition: samplefmt.c:153
TAKDecContext::tdsp
TAKDSPContext tdsp
Definition: takdec.c:55
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
TAKDecContext::mcdparams
MCDParam mcdparams[TAK_MAX_CHANNELS]
multichannel decorrelation parameters
Definition: takdec.c:74
CParam::scale
int scale
Definition: takdec.c:90
AVPacket::data
uint8_t * data
Definition: packet.h:522
b
#define b
Definition: input.c:41
TAKDSPContext
Definition: takdsp.h:24
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
FFCodec
Definition: codec_internal.h:127
ff_audiodsp_init
av_cold void ff_audiodsp_init(AudioDSPContext *c)
Definition: audiodsp.c:106
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:308
c1
static const uint64_t c1
Definition: murmur3.c:52
TAKDecContext::lpc_mode
int8_t lpc_mode[TAK_MAX_CHANNELS]
Definition: takdec.c:65
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
thread.h
TAKDecContext::subframe_len
int16_t subframe_len[MAX_SUBFRAMES]
subframe length in samples
Definition: takdec.c:69
tak_decode_close
static av_cold int tak_decode_close(AVCodecContext *avctx)
Definition: takdec.c:932
predictor_sizes
static const uint16_t predictor_sizes[]
Definition: takdec.c:83
MCDParam::chan2
int8_t chan2
Definition: takdec.c:49
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_takdsp_init
av_cold void ff_takdsp_init(TAKDSPContext *c)
Definition: takdsp.c:73
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
TAKDecContext::coding_mode
int8_t coding_mode[128]
Definition: takdec.c:76
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
MCDParam::index
int8_t index
index into array of decorrelation types
Definition: takdec.c:47
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
TAKDecContext::uval
int uval
Definition: takdec.c:59
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1065
GetBitContext
Definition: get_bits.h:108
samplefmt.h
MAX_SUBFRAMES
#define MAX_SUBFRAMES
max number of subframes per channel
Definition: takdec.c:42
a1
#define a1
Definition: regdef.h:47
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
tak_decode_init
static av_cold int tak_decode_init(AVCodecContext *avctx)
Definition: takdec.c:185
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_channel_layout_from_mask
int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
Definition: channel_layout.c:242
CParam::bias
int bias
Definition: takdec.c:92
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1574
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:320
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
a4
#define a4
Definition: regdef.h:50
TAK_CODEC_MONO_STEREO
@ TAK_CODEC_MONO_STEREO
Definition: tak.h:98
TAKDecContext::adsp
AudioDSPContext adsp
Definition: takdec.c:54
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
frame
static AVFrame * frame
Definition: demux_decode.c:54
decorrelate
static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
Definition: takdec.c:556
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
NULL
#define NULL
Definition: coverity.c:32
av_clip_intp2
#define av_clip_intp2
Definition: common.h:119
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
TAKDecContext::dmode
int8_t dmode
channel decorrelation type in the current frame
Definition: takdec.c:72
ff_tak_decoder
const FFCodec ff_tak_decoder
Definition: takdec.c:941
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
TAKDecContext::decode_buffer_size
unsigned int decode_buffer_size
Definition: takdec.c:62
tak.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
decode_channel
static int decode_channel(TAKDecContext *s, int chan)
Definition: takdec.c:509
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: defs.h:48
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
AV_CODEC_CAP_CHANNEL_CONF
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
Definition: codec.h:106
takdsp.h
CParam::aescape
int aescape
Definition: takdec.c:91
TAKDecContext
Definition: takdec.c:52
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
AV_SAMPLE_FMT_U8P
@ AV_SAMPLE_FMT_U8P
unsigned 8 bits, planar
Definition: samplefmt.h:63
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
shift
static int shift(int a, int b)
Definition: bonk.c:262
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
TAKDecContext::residues
int16_t residues[544]
Definition: takdec.c:78
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1057
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
size
int size
Definition: twinvq_data.h:10344
decode_residues
static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
Definition: takdec.c:300
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
ff_tak_check_crc
int ff_tak_check_crc(const uint8_t *buf, unsigned int buf_size)
Definition: tak.c:79
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
unary.h
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
CParam::init
int init
Definition: takdec.c:88
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1567
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
internal.h
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:401
TAKDecContext::decode_buffer
uint8_t * decode_buffer
Definition: takdec.c:61
get_bits_esc4
static int get_bits_esc4(GetBitContext *gb)
Definition: takdec.c:379
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
len
int len
Definition: vorbis_enc_data.h:426
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
avcodec.h
set_sample_rate_params
static void set_sample_rate_params(AVCodecContext *avctx)
Definition: takdec.c:167
ret
ret
Definition: filter_design.txt:187
decode_lpc
static void decode_lpc(int32_t *coeffs, int mode, int length)
Definition: takdec.c:200
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
mc_dmodes
static const int8_t mc_dmodes[]
Definition: takdec.c:81
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
CParam::escape
int escape
Definition: takdec.c:89
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
U
#define U(x)
Definition: vpx_arith.h:37
AVCodecContext
main external API structure.
Definition: avcodec.h:445
c2
static const uint64_t c2
Definition: murmur3.c:53
a5
#define a5
Definition: regdef.h:51
TAKDecContext::predictors
int16_t predictors[MAX_PREDICTORS]
Definition: takdec.c:67
mode
mode
Definition: ebur128.h:83
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:432
TAKDecContext::gb
GetBitContext gb
bitstream reader initialized to start at the current frame
Definition: takdec.c:57
xcodes
static const struct CParam xcodes[50]
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
TAKDecContext::filter
int16_t filter[MAX_PREDICTORS]
Definition: takdec.c:77
set_bps_params
static int set_bps_params(AVCodecContext *avctx)
Definition: takdec.c:146
MCDParam
Definition: takdec.c:45
audiodsp.h
TAKStreamInfo
Definition: tak.h:126
AudioDSPContext
Definition: audiodsp.h:24
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
CParam
Definition: takdec.c:87
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_CODEC_ID_TAK
@ AV_CODEC_ID_TAK
Definition: codec_id.h:502
TAKDecContext::nb_samples
int nb_samples
number of samples in the current frame
Definition: takdec.c:60
int32_t
int32_t
Definition: audioconvert.c:56
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TAKDecContext::decoded
int32_t * decoded[TAK_MAX_CHANNELS]
decoded samples for each channel
Definition: takdec.c:63
tak_decode_frame
static int tak_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *pkt)
Definition: takdec.c:674
decode_subframe
static int decode_subframe(TAKDecContext *s, int32_t *decoded, int subframe_size, int prev_subframe_size)
Definition: takdec.c:387
TAK_MIN_FRAME_HEADER_BYTES
#define TAK_MIN_FRAME_HEADER_BYTES
Definition: tak.h:95
TAK_CODEC_MULTICHANNEL
@ TAK_CODEC_MULTICHANNEL
Definition: tak.h:99
a3
#define a3
Definition: regdef.h:49
TAKDecContext::subframe_scale
int subframe_scale
Definition: takdec.c:70
TAKDecContext::nb_subframes
int nb_subframes
number of subframes in the current frame
Definition: takdec.c:68
decode_segment
static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
Definition: takdec.c:256