FFmpeg
atrac3.c
Go to the documentation of this file.
1 /*
2  * ATRAC3 compatible decoder
3  * Copyright (c) 2006-2008 Maxim Poliakovski
4  * Copyright (c) 2006-2008 Benjamin Larsson
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * ATRAC3 compatible decoder.
26  * This decoder handles Sony's ATRAC3 data.
27  *
28  * Container formats used to store ATRAC3 data:
29  * RealMedia (.rm), RIFF WAV (.wav, .at3), Sony OpenMG (.oma, .aa3).
30  *
31  * To use this decoder, a calling application must supply the extradata
32  * bytes provided in the containers above.
33  */
34 
35 #include <math.h>
36 #include <stddef.h>
37 
38 #include "libavutil/attributes.h"
39 #include "libavutil/float_dsp.h"
40 #include "libavutil/libm.h"
41 #include "libavutil/mem_internal.h"
42 #include "libavutil/thread.h"
43 #include "libavutil/tx.h"
44 
45 #include "avcodec.h"
46 #include "bytestream.h"
47 #include "codec_internal.h"
48 #include "decode.h"
49 #include "get_bits.h"
50 
51 #include "atrac.h"
52 #include "atrac3data.h"
53 
54 #define MIN_CHANNELS 1
55 #define MAX_CHANNELS 8
56 #define MAX_JS_PAIRS 8 / 2
57 
58 #define JOINT_STEREO 0x12
59 #define SINGLE 0x2
60 
61 #define SAMPLES_PER_FRAME 1024
62 #define MDCT_SIZE 512
63 
64 #define ATRAC3_VLC_BITS 8
65 
66 typedef struct GainBlock {
68 } GainBlock;
69 
70 typedef struct TonalComponent {
71  int pos;
72  int num_coefs;
73  float coef[8];
75 
76 typedef struct ChannelUnit {
83 
86 
87  float delay_buf1[46]; ///<qmf delay buffers
88  float delay_buf2[46];
89  float delay_buf3[46];
90 } ChannelUnit;
91 
92 typedef struct ATRAC3Context {
94  //@{
95  /** stream data */
97 
99  //@}
100  //@{
101  /** joint-stereo related variables */
106  //@}
107  //@{
108  /** data buffers */
110  float temp_buf[1070];
111  //@}
112  //@{
113  /** extradata */
115  //@}
116 
120  void (*vector_fmul)(float *dst, const float *src0, const float *src1,
121  int len);
122 } ATRAC3Context;
123 
127 
128 /**
129  * Regular 512 points IMDCT without overlapping, with the exception of the
130  * swapping of odd bands caused by the reverse spectra of the QMF.
131  *
132  * @param odd_band 1 if the band is an odd band
133  */
134 static void imlt(ATRAC3Context *q, float *input, float *output, int odd_band)
135 {
136  int i;
137 
138  if (odd_band) {
139  /**
140  * Reverse the odd bands before IMDCT, this is an effect of the QMF
141  * transform or it gives better compression to do it this way.
142  * FIXME: It should be possible to handle this in imdct_calc
143  * for that to happen a modification of the prerotation step of
144  * all SIMD code and C code is needed.
145  * Or fix the functions before so they generate a pre reversed spectrum.
146  */
147  for (i = 0; i < 128; i++)
148  FFSWAP(float, input[i], input[255 - i]);
149  }
150 
151  q->mdct_fn(q->mdct_ctx, output, input, sizeof(float));
152 
153  /* Perform windowing on the output. */
155 }
156 
157 /*
158  * indata descrambling, only used for data coming from the rm container
159  */
160 static int decode_bytes(const uint8_t *input, uint8_t *out, int bytes)
161 {
162  int i, off;
163  uint32_t c;
164  const uint32_t *buf;
165  uint32_t *output = (uint32_t *)out;
166 
167  off = (intptr_t)input & 3;
168  buf = (const uint32_t *)(input - off);
169  if (off)
170  c = av_be2ne32((0x537F6103U >> (off * 8)) | (0x537F6103U << (32 - (off * 8))));
171  else
172  c = av_be2ne32(0x537F6103U);
173  bytes += 3 + off;
174  for (i = 0; i < bytes / 4; i++)
175  output[i] = c ^ buf[i];
176 
177  if (off)
178  avpriv_request_sample(NULL, "Offset of %d", off);
179 
180  return off;
181 }
182 
183 static av_cold void init_imdct_window(void)
184 {
185  int i, j;
186 
187  /* generate the mdct window, for details see
188  * http://wiki.multimedia.cx/index.php?title=RealAudio_atrc#Windows */
189  for (i = 0, j = 255; i < 128; i++, j--) {
190  float wi = sin(((i + 0.5) / 256.0 - 0.5) * M_PI) + 1.0;
191  float wj = sin(((j + 0.5) / 256.0 - 0.5) * M_PI) + 1.0;
192  float w = 0.5 * (wi * wi + wj * wj);
193  mdct_window[i] = mdct_window[511 - i] = wi / w;
194  mdct_window[j] = mdct_window[511 - j] = wj / w;
195  }
196 }
197 
199 {
200  ATRAC3Context *q = avctx->priv_data;
201 
202  av_freep(&q->units);
204 
205  av_tx_uninit(&q->mdct_ctx);
206 
207  return 0;
208 }
209 
210 /**
211  * Mantissa decoding
212  *
213  * @param selector which table the output values are coded with
214  * @param coding_flag constant length coding or variable length coding
215  * @param mantissas mantissa output table
216  * @param num_codes number of values to get
217  */
218 static void read_quant_spectral_coeffs(GetBitContext *gb, int selector,
219  int coding_flag, int *mantissas,
220  int num_codes)
221 {
222  int i, code, huff_symb;
223 
224  if (selector == 1)
225  num_codes /= 2;
226 
227  if (coding_flag != 0) {
228  /* constant length coding (CLC) */
229  int num_bits = clc_length_tab[selector];
230 
231  if (selector > 1) {
232  for (i = 0; i < num_codes; i++) {
233  if (num_bits)
234  code = get_sbits(gb, num_bits);
235  else
236  code = 0;
237  mantissas[i] = code;
238  }
239  } else {
240  for (i = 0; i < num_codes; i++) {
241  if (num_bits)
242  code = get_bits(gb, num_bits); // num_bits is always 4 in this case
243  else
244  code = 0;
245  mantissas[i * 2 ] = mantissa_clc_tab[code >> 2];
246  mantissas[i * 2 + 1] = mantissa_clc_tab[code & 3];
247  }
248  }
249  } else {
250  /* variable length coding (VLC) */
251  if (selector != 1) {
252  for (i = 0; i < num_codes; i++) {
253  mantissas[i] = get_vlc2(gb, spectral_coeff_tab[selector-1].table,
254  ATRAC3_VLC_BITS, 1);
255  }
256  } else {
257  for (i = 0; i < num_codes; i++) {
258  huff_symb = get_vlc2(gb, spectral_coeff_tab[selector - 1].table,
259  ATRAC3_VLC_BITS, 1);
260  mantissas[i * 2 ] = mantissa_vlc_tab[huff_symb * 2 ];
261  mantissas[i * 2 + 1] = mantissa_vlc_tab[huff_symb * 2 + 1];
262  }
263  }
264  }
265 }
266 
267 /**
268  * Restore the quantized band spectrum coefficients
269  *
270  * @return subband count, fix for broken specification/files
271  */
272 static int decode_spectrum(GetBitContext *gb, float *output)
273 {
274  int num_subbands, coding_mode, i, j, first, last, subband_size;
275  int subband_vlc_index[32], sf_index[32];
276  int mantissas[128];
277  float scale_factor;
278 
279  num_subbands = get_bits(gb, 5); // number of coded subbands
280  coding_mode = get_bits1(gb); // coding Mode: 0 - VLC/ 1-CLC
281 
282  /* get the VLC selector table for the subbands, 0 means not coded */
283  for (i = 0; i <= num_subbands; i++)
284  subband_vlc_index[i] = get_bits(gb, 3);
285 
286  /* read the scale factor indexes from the stream */
287  for (i = 0; i <= num_subbands; i++) {
288  if (subband_vlc_index[i] != 0)
289  sf_index[i] = get_bits(gb, 6);
290  }
291 
292  for (i = 0; i <= num_subbands; i++) {
293  first = subband_tab[i ];
294  last = subband_tab[i + 1];
295 
296  subband_size = last - first;
297 
298  if (subband_vlc_index[i] != 0) {
299  /* decode spectral coefficients for this subband */
300  /* TODO: This can be done faster is several blocks share the
301  * same VLC selector (subband_vlc_index) */
302  read_quant_spectral_coeffs(gb, subband_vlc_index[i], coding_mode,
303  mantissas, subband_size);
304 
305  /* decode the scale factor for this subband */
306  scale_factor = ff_atrac_sf_table[sf_index[i]] *
307  inv_max_quant[subband_vlc_index[i]];
308 
309  /* inverse quantize the coefficients */
310  for (j = 0; first < last; first++, j++)
311  output[first] = mantissas[j] * scale_factor;
312  } else {
313  /* this subband was not coded, so zero the entire subband */
314  memset(output + first, 0, subband_size * sizeof(*output));
315  }
316  }
317 
318  /* clear the subbands that were not coded */
319  first = subband_tab[i];
320  memset(output + first, 0, (SAMPLES_PER_FRAME - first) * sizeof(*output));
321  return num_subbands;
322 }
323 
324 /**
325  * Restore the quantized tonal components
326  *
327  * @param components tonal components
328  * @param num_bands number of coded bands
329  */
331  TonalComponent *components, int num_bands)
332 {
333  int i, b, c, m;
334  int nb_components, coding_mode_selector, coding_mode;
335  int band_flags[4], mantissa[8];
336  int component_count = 0;
337 
338  nb_components = get_bits(gb, 5);
339 
340  /* no tonal components */
341  if (nb_components == 0)
342  return 0;
343 
344  coding_mode_selector = get_bits(gb, 2);
345  if (coding_mode_selector == 2)
346  return AVERROR_INVALIDDATA;
347 
348  coding_mode = coding_mode_selector & 1;
349 
350  for (i = 0; i < nb_components; i++) {
351  int coded_values_per_component, quant_step_index;
352 
353  for (b = 0; b <= num_bands; b++)
354  band_flags[b] = get_bits1(gb);
355 
356  coded_values_per_component = get_bits(gb, 3);
357 
358  quant_step_index = get_bits(gb, 3);
359  if (quant_step_index <= 1)
360  return AVERROR_INVALIDDATA;
361 
362  if (coding_mode_selector == 3)
363  coding_mode = get_bits1(gb);
364 
365  for (b = 0; b < (num_bands + 1) * 4; b++) {
366  int coded_components;
367 
368  if (band_flags[b >> 2] == 0)
369  continue;
370 
371  coded_components = get_bits(gb, 3);
372 
373  for (c = 0; c < coded_components; c++) {
374  TonalComponent *cmp = &components[component_count];
375  int sf_index, coded_values, max_coded_values;
376  float scale_factor;
377 
378  sf_index = get_bits(gb, 6);
379  if (component_count >= 64)
380  return AVERROR_INVALIDDATA;
381 
382  cmp->pos = b * 64 + get_bits(gb, 6);
383 
384  max_coded_values = SAMPLES_PER_FRAME - cmp->pos;
385  coded_values = coded_values_per_component + 1;
386  coded_values = FFMIN(max_coded_values, coded_values);
387 
388  scale_factor = ff_atrac_sf_table[sf_index] *
389  inv_max_quant[quant_step_index];
390 
391  read_quant_spectral_coeffs(gb, quant_step_index, coding_mode,
392  mantissa, coded_values);
393 
394  cmp->num_coefs = coded_values;
395 
396  /* inverse quant */
397  for (m = 0; m < coded_values; m++)
398  cmp->coef[m] = mantissa[m] * scale_factor;
399 
400  component_count++;
401  }
402  }
403  }
404 
405  return component_count;
406 }
407 
408 /**
409  * Decode gain parameters for the coded bands
410  *
411  * @param block the gainblock for the current band
412  * @param num_bands amount of coded bands
413  */
415  int num_bands)
416 {
417  int b, j;
418  int *level, *loc;
419 
420  AtracGainInfo *gain = block->g_block;
421 
422  for (b = 0; b <= num_bands; b++) {
423  gain[b].num_points = get_bits(gb, 3);
424  level = gain[b].lev_code;
425  loc = gain[b].loc_code;
426 
427  for (j = 0; j < gain[b].num_points; j++) {
428  level[j] = get_bits(gb, 4);
429  loc[j] = get_bits(gb, 5);
430  if (j && loc[j] <= loc[j - 1])
431  return AVERROR_INVALIDDATA;
432  }
433  }
434 
435  /* Clear the unused blocks. */
436  for (; b < 4 ; b++)
437  gain[b].num_points = 0;
438 
439  return 0;
440 }
441 
442 /**
443  * Combine the tonal band spectrum and regular band spectrum
444  *
445  * @param spectrum output spectrum buffer
446  * @param num_components number of tonal components
447  * @param components tonal components for this band
448  * @return position of the last tonal coefficient
449  */
450 static int add_tonal_components(float *spectrum, int num_components,
451  TonalComponent *components)
452 {
453  int i, j, last_pos = -1;
454  float *input, *output;
455 
456  for (i = 0; i < num_components; i++) {
457  last_pos = FFMAX(components[i].pos + components[i].num_coefs, last_pos);
458  input = components[i].coef;
459  output = &spectrum[components[i].pos];
460 
461  for (j = 0; j < components[i].num_coefs; j++)
462  output[j] += input[j];
463  }
464 
465  return last_pos;
466 }
467 
468 #define INTERPOLATE(old, new, nsample) \
469  ((old) + (nsample) * 0.125 * ((new) - (old)))
470 
471 static void reverse_matrixing(float *su1, float *su2, int *prev_code,
472  int *curr_code)
473 {
474  int i, nsample, band;
475  float mc1_l, mc1_r, mc2_l, mc2_r;
476 
477  for (i = 0, band = 0; band < 4 * 256; band += 256, i++) {
478  int s1 = prev_code[i];
479  int s2 = curr_code[i];
480  nsample = band;
481 
482  if (s1 != s2) {
483  /* Selector value changed, interpolation needed. */
484  mc1_l = matrix_coeffs[s1 * 2 ];
485  mc1_r = matrix_coeffs[s1 * 2 + 1];
486  mc2_l = matrix_coeffs[s2 * 2 ];
487  mc2_r = matrix_coeffs[s2 * 2 + 1];
488 
489  /* Interpolation is done over the first eight samples. */
490  for (; nsample < band + 8; nsample++) {
491  float c1 = su1[nsample];
492  float c2 = su2[nsample];
493  c2 = c1 * INTERPOLATE(mc1_l, mc2_l, nsample - band) +
494  c2 * INTERPOLATE(mc1_r, mc2_r, nsample - band);
495  su1[nsample] = c2;
496  su2[nsample] = c1 * 2.0 - c2;
497  }
498  }
499 
500  /* Apply the matrix without interpolation. */
501  switch (s2) {
502  case 0: /* M/S decoding */
503  for (; nsample < band + 256; nsample++) {
504  float c1 = su1[nsample];
505  float c2 = su2[nsample];
506  su1[nsample] = c2 * 2.0;
507  su2[nsample] = (c1 - c2) * 2.0;
508  }
509  break;
510  case 1:
511  for (; nsample < band + 256; nsample++) {
512  float c1 = su1[nsample];
513  float c2 = su2[nsample];
514  su1[nsample] = (c1 + c2) * 2.0;
515  su2[nsample] = c2 * -2.0;
516  }
517  break;
518  case 2:
519  case 3:
520  for (; nsample < band + 256; nsample++) {
521  float c1 = su1[nsample];
522  float c2 = su2[nsample];
523  su1[nsample] = c1 + c2;
524  su2[nsample] = c1 - c2;
525  }
526  break;
527  default:
528  av_assert1(0);
529  }
530  }
531 }
532 
533 static void get_channel_weights(int index, int flag, float ch[2])
534 {
535  if (index == 7) {
536  ch[0] = 1.0;
537  ch[1] = 1.0;
538  } else {
539  ch[0] = (index & 7) / 7.0;
540  ch[1] = sqrt(2 - ch[0] * ch[0]);
541  if (flag)
542  FFSWAP(float, ch[0], ch[1]);
543  }
544 }
545 
546 static void channel_weighting(float *su1, float *su2, int *p3)
547 {
548  int band, nsample;
549  /* w[x][y] y=0 is left y=1 is right */
550  float w[2][2];
551 
552  if (p3[1] != 7 || p3[3] != 7) {
553  get_channel_weights(p3[1], p3[0], w[0]);
554  get_channel_weights(p3[3], p3[2], w[1]);
555 
556  for (band = 256; band < 4 * 256; band += 256) {
557  for (nsample = band; nsample < band + 8; nsample++) {
558  su1[nsample] *= INTERPOLATE(w[0][0], w[0][1], nsample - band);
559  su2[nsample] *= INTERPOLATE(w[1][0], w[1][1], nsample - band);
560  }
561  for(; nsample < band + 256; nsample++) {
562  su1[nsample] *= w[1][0];
563  su2[nsample] *= w[1][1];
564  }
565  }
566  }
567 }
568 
569 /**
570  * Decode a Sound Unit
571  *
572  * @param snd the channel unit to be used
573  * @param output the decoded samples before IQMF in float representation
574  * @param channel_num channel number
575  * @param coding_mode the coding mode (JOINT_STEREO or single channels)
576  */
578  ChannelUnit *snd, float *output,
579  int channel_num, int coding_mode)
580 {
581  int band, ret, num_subbands, last_tonal, num_bands;
582  GainBlock *gain1 = &snd->gain_block[ snd->gc_blk_switch];
583  GainBlock *gain2 = &snd->gain_block[1 - snd->gc_blk_switch];
584 
585  if (coding_mode == JOINT_STEREO && (channel_num % 2) == 1) {
586  if (get_bits(gb, 2) != 3) {
587  av_log(NULL,AV_LOG_ERROR,"JS mono Sound Unit id != 3.\n");
588  return AVERROR_INVALIDDATA;
589  }
590  } else {
591  if (get_bits(gb, 6) != 0x28) {
592  av_log(NULL,AV_LOG_ERROR,"Sound Unit id != 0x28.\n");
593  return AVERROR_INVALIDDATA;
594  }
595  }
596 
597  /* number of coded QMF bands */
598  snd->bands_coded = get_bits(gb, 2);
599 
600  ret = decode_gain_control(gb, gain2, snd->bands_coded);
601  if (ret)
602  return ret;
603 
605  snd->bands_coded);
606  if (snd->num_components < 0)
607  return snd->num_components;
608 
609  num_subbands = decode_spectrum(gb, snd->spectrum);
610 
611  /* Merge the decoded spectrum and tonal components. */
612  last_tonal = add_tonal_components(snd->spectrum, snd->num_components,
613  snd->components);
614 
615 
616  /* calculate number of used MLT/QMF bands according to the amount of coded
617  spectral lines */
618  num_bands = (subband_tab[num_subbands] - 1) >> 8;
619  if (last_tonal >= 0)
620  num_bands = FFMAX((last_tonal + 256) >> 8, num_bands);
621 
622 
623  /* Reconstruct time domain samples. */
624  for (band = 0; band < 4; band++) {
625  /* Perform the IMDCT step without overlapping. */
626  if (band <= num_bands)
627  imlt(q, &snd->spectrum[band * 256], snd->imdct_buf, band & 1);
628  else
629  memset(snd->imdct_buf, 0, 512 * sizeof(*snd->imdct_buf));
630 
631  /* gain compensation and overlapping */
633  &snd->prev_frame[band * 256],
634  &gain1->g_block[band], &gain2->g_block[band],
635  256, &output[band * 256]);
636  }
637 
638  /* Swap the gain control buffers for the next frame. */
639  snd->gc_blk_switch ^= 1;
640 
641  return 0;
642 }
643 
644 static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf,
645  float **out_samples)
646 {
647  ATRAC3Context *q = avctx->priv_data;
648  int ret, i, ch;
649  uint8_t *ptr1;
650  int channels = avctx->ch_layout.nb_channels;
651 
652  if (q->coding_mode == JOINT_STEREO) {
653  /* channel coupling mode */
654 
655  /* Decode sound unit pairs (channels are expected to be even).
656  * Multichannel joint stereo interleaves pairs (6ch: 2ch + 2ch + 2ch) */
657  const uint8_t *js_databuf;
658  int js_pair, js_block_align;
659 
660  js_block_align = (avctx->block_align / channels) * 2; /* block pair */
661 
662  for (ch = 0; ch < channels; ch = ch + 2) {
663  js_pair = ch/2;
664  js_databuf = databuf + js_pair * js_block_align; /* align to current pair */
665 
666  /* Set the bitstream reader at the start of first channel sound unit. */
667  init_get_bits(&q->gb,
668  js_databuf, js_block_align * 8);
669 
670  /* decode Sound Unit 1 */
671  ret = decode_channel_sound_unit(q, &q->gb, &q->units[ch],
672  out_samples[ch], ch, JOINT_STEREO);
673  if (ret != 0)
674  return ret;
675 
676  /* Framedata of the su2 in the joint-stereo mode is encoded in
677  * reverse byte order so we need to swap it first. */
678  if (js_databuf == q->decoded_bytes_buffer) {
679  uint8_t *ptr2 = q->decoded_bytes_buffer + js_block_align - 1;
680  ptr1 = q->decoded_bytes_buffer;
681  for (i = 0; i < js_block_align / 2; i++, ptr1++, ptr2--)
682  FFSWAP(uint8_t, *ptr1, *ptr2);
683  } else {
684  const uint8_t *ptr2 = js_databuf + js_block_align - 1;
685  for (i = 0; i < js_block_align; i++)
686  q->decoded_bytes_buffer[i] = *ptr2--;
687  }
688 
689  /* Skip the sync codes (0xF8). */
690  ptr1 = q->decoded_bytes_buffer;
691  for (i = 4; *ptr1 == 0xF8; i++, ptr1++) {
692  if (i >= js_block_align)
693  return AVERROR_INVALIDDATA;
694  }
695 
696 
697  /* set the bitstream reader at the start of the second Sound Unit */
698  ret = init_get_bits8(&q->gb,
699  ptr1, q->decoded_bytes_buffer + js_block_align - ptr1);
700  if (ret < 0)
701  return ret;
702 
703  /* Fill the Weighting coeffs delay buffer */
704  memmove(q->weighting_delay[js_pair], &q->weighting_delay[js_pair][2],
705  4 * sizeof(*q->weighting_delay[js_pair]));
706  q->weighting_delay[js_pair][4] = get_bits1(&q->gb);
707  q->weighting_delay[js_pair][5] = get_bits(&q->gb, 3);
708 
709  for (i = 0; i < 4; i++) {
710  q->matrix_coeff_index_prev[js_pair][i] = q->matrix_coeff_index_now[js_pair][i];
711  q->matrix_coeff_index_now[js_pair][i] = q->matrix_coeff_index_next[js_pair][i];
712  q->matrix_coeff_index_next[js_pair][i] = get_bits(&q->gb, 2);
713  }
714 
715  /* Decode Sound Unit 2. */
716  ret = decode_channel_sound_unit(q, &q->gb, &q->units[ch+1],
717  out_samples[ch+1], ch+1, JOINT_STEREO);
718  if (ret != 0)
719  return ret;
720 
721  /* Reconstruct the channel coefficients. */
722  reverse_matrixing(out_samples[ch], out_samples[ch+1],
723  q->matrix_coeff_index_prev[js_pair],
724  q->matrix_coeff_index_now[js_pair]);
725 
726  channel_weighting(out_samples[ch], out_samples[ch+1], q->weighting_delay[js_pair]);
727  }
728  } else {
729  /* single channels */
730  /* Decode the channel sound units. */
731  for (i = 0; i < channels; i++) {
732  /* Set the bitstream reader at the start of a channel sound unit. */
733  init_get_bits(&q->gb,
734  databuf + i * avctx->block_align / channels,
735  avctx->block_align * 8 / channels);
736 
737  ret = decode_channel_sound_unit(q, &q->gb, &q->units[i],
738  out_samples[i], i, q->coding_mode);
739  if (ret != 0)
740  return ret;
741  }
742  }
743 
744  /* Apply the iQMF synthesis filter. */
745  for (i = 0; i < channels; i++) {
746  float *p1 = out_samples[i];
747  float *p2 = p1 + 256;
748  float *p3 = p2 + 256;
749  float *p4 = p3 + 256;
750  ff_atrac_iqmf(p1, p2, 256, p1, q->units[i].delay_buf1, q->temp_buf);
751  ff_atrac_iqmf(p4, p3, 256, p3, q->units[i].delay_buf2, q->temp_buf);
752  ff_atrac_iqmf(p1, p3, 512, p1, q->units[i].delay_buf3, q->temp_buf);
753  }
754 
755  return 0;
756 }
757 
758 static int al_decode_frame(AVCodecContext *avctx, const uint8_t *databuf,
759  int size, float **out_samples)
760 {
761  ATRAC3Context *q = avctx->priv_data;
762  int channels = avctx->ch_layout.nb_channels;
763  int ret, i;
764 
765  /* Set the bitstream reader at the start of a channel sound unit. */
766  init_get_bits(&q->gb, databuf, size * 8);
767  /* single channels */
768  /* Decode the channel sound units. */
769  for (i = 0; i < channels; i++) {
770  ret = decode_channel_sound_unit(q, &q->gb, &q->units[i],
771  out_samples[i], i, q->coding_mode);
772  if (ret != 0)
773  return ret;
774  while (i < channels && get_bits_left(&q->gb) > 6 && show_bits(&q->gb, 6) != 0x28) {
775  skip_bits(&q->gb, 1);
776  }
777  }
778 
779  /* Apply the iQMF synthesis filter. */
780  for (i = 0; i < channels; i++) {
781  float *p1 = out_samples[i];
782  float *p2 = p1 + 256;
783  float *p3 = p2 + 256;
784  float *p4 = p3 + 256;
785  ff_atrac_iqmf(p1, p2, 256, p1, q->units[i].delay_buf1, q->temp_buf);
786  ff_atrac_iqmf(p4, p3, 256, p3, q->units[i].delay_buf2, q->temp_buf);
787  ff_atrac_iqmf(p1, p3, 512, p1, q->units[i].delay_buf3, q->temp_buf);
788  }
789 
790  return 0;
791 }
792 
794  int *got_frame_ptr, AVPacket *avpkt)
795 {
796  const uint8_t *buf = avpkt->data;
797  int buf_size = avpkt->size;
798  ATRAC3Context *q = avctx->priv_data;
799  int ret;
800  const uint8_t *databuf;
801 
802  if (buf_size < avctx->block_align) {
803  av_log(avctx, AV_LOG_ERROR,
804  "Frame too small (%d bytes). Truncated file?\n", buf_size);
805  return AVERROR_INVALIDDATA;
806  }
807 
808  /* get output buffer */
809  frame->nb_samples = SAMPLES_PER_FRAME;
810  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
811  return ret;
812 
813  /* Check if we need to descramble and what buffer to pass on. */
814  if (q->scrambled_stream) {
816  databuf = q->decoded_bytes_buffer;
817  } else {
818  databuf = buf;
819  }
820 
821  ret = decode_frame(avctx, databuf, (float **)frame->extended_data);
822  if (ret) {
823  av_log(avctx, AV_LOG_ERROR, "Frame decoding error!\n");
824  return ret;
825  }
826 
827  *got_frame_ptr = 1;
828 
829  return avctx->block_align;
830 }
831 
833  int *got_frame_ptr, AVPacket *avpkt)
834 {
835  int ret;
836 
837  frame->nb_samples = SAMPLES_PER_FRAME;
838  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
839  return ret;
840 
841  ret = al_decode_frame(avctx, avpkt->data, avpkt->size,
842  (float **)frame->extended_data);
843  if (ret) {
844  av_log(avctx, AV_LOG_ERROR, "Frame decoding error!\n");
845  return ret;
846  }
847 
848  *got_frame_ptr = 1;
849 
850  return avpkt->size;
851 }
852 
854 {
856  const uint8_t (*hufftabs)[2] = atrac3_hufftabs;
857  int i;
858 
861 
862  /* Initialize the VLC tables. */
863  for (i = 0; i < 7; i++) {
867  &hufftabs[0][1], 2,
868  &hufftabs[0][0], 2, 1,
870  hufftabs += huff_tab_sizes[i];
871  table += 256;
872  }
873 }
874 
876 {
877  static AVOnce init_static_once = AV_ONCE_INIT;
878  int i, js_pair, ret;
879  int version, delay, samples_per_frame, frame_factor;
880  const uint8_t *edata_ptr = avctx->extradata;
881  ATRAC3Context *q = avctx->priv_data;
882  AVFloatDSPContext *fdsp;
883  float scale = 1.0 / 32768;
884  int channels = avctx->ch_layout.nb_channels;
885 
887  av_log(avctx, AV_LOG_ERROR, "Channel configuration error!\n");
888  return AVERROR(EINVAL);
889  }
890 
891  /* Take care of the codec-specific extradata. */
892  if (avctx->codec_id == AV_CODEC_ID_ATRAC3AL) {
893  version = 4;
894  samples_per_frame = SAMPLES_PER_FRAME * channels;
895  delay = 0x88E;
896  q->coding_mode = SINGLE;
897  } else if (avctx->extradata_size == 14) {
898  /* Parse the extradata, WAV format */
899  av_log(avctx, AV_LOG_DEBUG, "[0-1] %d\n",
900  bytestream_get_le16(&edata_ptr)); // Unknown value always 1
901  edata_ptr += 4; // samples per channel
902  q->coding_mode = bytestream_get_le16(&edata_ptr);
903  av_log(avctx, AV_LOG_DEBUG,"[8-9] %d\n",
904  bytestream_get_le16(&edata_ptr)); //Dupe of coding mode
905  frame_factor = bytestream_get_le16(&edata_ptr); // Unknown always 1
906  av_log(avctx, AV_LOG_DEBUG,"[12-13] %d\n",
907  bytestream_get_le16(&edata_ptr)); // Unknown always 0
908 
909  /* setup */
910  samples_per_frame = SAMPLES_PER_FRAME * channels;
911  version = 4;
912  delay = 0x88E;
914  q->scrambled_stream = 0;
915 
916  if (avctx->block_align != 96 * channels * frame_factor &&
917  avctx->block_align != 152 * channels * frame_factor &&
918  avctx->block_align != 192 * channels * frame_factor) {
919  av_log(avctx, AV_LOG_ERROR, "Unknown frame/channel/frame_factor "
920  "configuration %d/%d/%d\n", avctx->block_align,
921  channels, frame_factor);
922  return AVERROR_INVALIDDATA;
923  }
924  } else if (avctx->extradata_size == 12 || avctx->extradata_size == 10) {
925  /* Parse the extradata, RM format. */
926  version = bytestream_get_be32(&edata_ptr);
927  samples_per_frame = bytestream_get_be16(&edata_ptr);
928  delay = bytestream_get_be16(&edata_ptr);
929  q->coding_mode = bytestream_get_be16(&edata_ptr);
930  q->scrambled_stream = 1;
931 
932  } else {
933  av_log(avctx, AV_LOG_ERROR, "Unknown extradata size %d.\n",
934  avctx->extradata_size);
935  return AVERROR(EINVAL);
936  }
937 
938  /* Check the extradata */
939 
940  if (version != 4) {
941  av_log(avctx, AV_LOG_ERROR, "Version %d != 4.\n", version);
942  return AVERROR_INVALIDDATA;
943  }
944 
945  if (samples_per_frame != SAMPLES_PER_FRAME * channels) {
946  av_log(avctx, AV_LOG_ERROR, "Unknown amount of samples per frame %d.\n",
947  samples_per_frame);
948  return AVERROR_INVALIDDATA;
949  }
950 
951  if (delay != 0x88E) {
952  av_log(avctx, AV_LOG_ERROR, "Unknown amount of delay %x != 0x88E.\n",
953  delay);
954  return AVERROR_INVALIDDATA;
955  }
956 
957  if (q->coding_mode == SINGLE)
958  av_log(avctx, AV_LOG_DEBUG, "Single channels detected.\n");
959  else if (q->coding_mode == JOINT_STEREO) {
960  if (channels % 2 == 1) { /* Joint stereo channels must be even */
961  av_log(avctx, AV_LOG_ERROR, "Invalid joint stereo channel configuration.\n");
962  return AVERROR_INVALIDDATA;
963  }
964  av_log(avctx, AV_LOG_DEBUG, "Joint stereo detected.\n");
965  } else {
966  av_log(avctx, AV_LOG_ERROR, "Unknown channel coding mode %x!\n",
967  q->coding_mode);
968  return AVERROR_INVALIDDATA;
969  }
970 
971  if (avctx->block_align > 4096 || avctx->block_align <= 0)
972  return AVERROR(EINVAL);
973 
976  if (!q->decoded_bytes_buffer)
977  return AVERROR(ENOMEM);
978 
980 
981  /* initialize the MDCT transform */
982  if ((ret = av_tx_init(&q->mdct_ctx, &q->mdct_fn, AV_TX_FLOAT_MDCT, 1, 256,
983  &scale, AV_TX_FULL_IMDCT)) < 0) {
984  av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
985  return ret;
986  }
987 
988  /* init the joint-stereo decoding data */
989  for (js_pair = 0; js_pair < MAX_JS_PAIRS; js_pair++) {
990  q->weighting_delay[js_pair][0] = 0;
991  q->weighting_delay[js_pair][1] = 7;
992  q->weighting_delay[js_pair][2] = 0;
993  q->weighting_delay[js_pair][3] = 7;
994  q->weighting_delay[js_pair][4] = 0;
995  q->weighting_delay[js_pair][5] = 7;
996 
997  for (i = 0; i < 4; i++) {
998  q->matrix_coeff_index_prev[js_pair][i] = 3;
999  q->matrix_coeff_index_now[js_pair][i] = 3;
1000  q->matrix_coeff_index_next[js_pair][i] = 3;
1001  }
1002  }
1003 
1006  if (!fdsp)
1007  return AVERROR(ENOMEM);
1008  q->vector_fmul = fdsp->vector_fmul;
1009  av_free(fdsp);
1010 
1011  q->units = av_calloc(channels, sizeof(*q->units));
1012  if (!q->units)
1013  return AVERROR(ENOMEM);
1014 
1015  ff_thread_once(&init_static_once, atrac3_init_static_data);
1016 
1017  return 0;
1018 }
1019 
1021  .p.name = "atrac3",
1022  CODEC_LONG_NAME("ATRAC3 (Adaptive TRansform Acoustic Coding 3)"),
1023  .p.type = AVMEDIA_TYPE_AUDIO,
1024  .p.id = AV_CODEC_ID_ATRAC3,
1025  .priv_data_size = sizeof(ATRAC3Context),
1027  .close = atrac3_decode_close,
1029  .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
1030  .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
1032  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1033 };
1034 
1036  .p.name = "atrac3al",
1037  CODEC_LONG_NAME("ATRAC3 AL (Adaptive TRansform Acoustic Coding 3 Advanced Lossless)"),
1038  .p.type = AVMEDIA_TYPE_AUDIO,
1039  .p.id = AV_CODEC_ID_ATRAC3AL,
1040  .priv_data_size = sizeof(ATRAC3Context),
1042  .close = atrac3_decode_close,
1044  .p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
1045  .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
1047  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1048 };
mantissa_vlc_tab
static const int8_t mantissa_vlc_tab[18]
Definition: atrac3data.h:82
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
level
uint8_t level
Definition: svq3.c:204
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
ff_atrac3_decoder
const FFCodec ff_atrac3_decoder
Definition: atrac3.c:1020
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
imlt
static void imlt(ATRAC3Context *q, float *input, float *output, int odd_band)
Regular 512 points IMDCT without overlapping, with the exception of the swapping of odd bands caused ...
Definition: atrac3.c:134
JOINT_STEREO
#define JOINT_STEREO
Definition: atrac3.c:58
libm.h
mem_internal.h
out
FILE * out
Definition: movenc.c:54
GainBlock::g_block
AtracGainInfo g_block[4]
Definition: atrac3.c:67
thread.h
src1
const pixel * src1
Definition: h264pred_template.c:421
AVTXContext
Definition: tx_priv.h:228
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ATRAC3Context::gb
GetBitContext gb
Definition: atrac3.c:93
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
SAMPLES_PER_FRAME
#define SAMPLES_PER_FRAME
Definition: atrac3.c:61
w
uint8_t w
Definition: llviddspenc.c:38
AVPacket::data
uint8_t * data
Definition: packet.h:374
ChannelUnit::delay_buf1
float delay_buf1[46]
qmf delay buffers
Definition: atrac3.c:87
ChannelUnit::delay_buf3
float delay_buf3[46]
Definition: atrac3.c:89
b
#define b
Definition: input.c:41
channel_weighting
static void channel_weighting(float *su1, float *su2, int *p3)
Definition: atrac3.c:546
table
static const uint16_t table[]
Definition: prosumer.c:205
FFCodec
Definition: codec_internal.h:119
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
c1
static const uint64_t c1
Definition: murmur3.c:51
ATRAC3Context::matrix_coeff_index_now
int matrix_coeff_index_now[MAX_JS_PAIRS][4]
Definition: atrac3.c:103
atrac3al_decode_frame
static int atrac3al_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: atrac3.c:832
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
clc_length_tab
static const uint8_t clc_length_tab[8]
Definition: atrac3data.h:78
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
ChannelUnit::components
TonalComponent components[64]
Definition: atrac3.c:81
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:883
ChannelUnit::spectrum
float spectrum[SAMPLES_PER_FRAME]
Definition: atrac3.c:84
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
MDCT_SIZE
#define MDCT_SIZE
Definition: atrac3.c:62
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:123
inv_max_quant
static const float inv_max_quant[8]
Definition: atrac3data.h:89
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2059
GetBitContext
Definition: get_bits.h:61
ATRAC3Context::mdct_fn
av_tx_fn mdct_fn
Definition: atrac3.c:119
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:478
atrac3_vlc_table
static VLCElem atrac3_vlc_table[7 *1<< ATRAC3_VLC_BITS]
Definition: atrac3.c:125
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
ff_atrac_init_gain_compensation
av_cold void ff_atrac_init_gain_compensation(AtracGCContext *gctx, int id2exp_offset, int loc_scale)
Initialize gain compensation context.
Definition: atrac.c:67
AV_CODEC_ID_ATRAC3
@ AV_CODEC_ID_ATRAC3
Definition: codec_id.h:466
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:184
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:127
mantissa_clc_tab
static const int8_t mantissa_clc_tab[4]
Definition: atrac3data.h:80
decode_tonal_components
static int decode_tonal_components(GetBitContext *gb, TonalComponent *components, int num_bands)
Restore the quantized tonal components.
Definition: atrac3.c:330
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:500
AV_TX_FLOAT_MDCT
@ AV_TX_FLOAT_MDCT
Standard MDCT with a sample data type of float, double or int32_t, respecively.
Definition: tx.h:68
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:298
atrac.h
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
TonalComponent::coef
float coef[8]
Definition: atrac3.c:73
s1
#define s1
Definition: regdef.h:38
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:363
AtracGainInfo::num_points
int num_points
number of gain control points
Definition: atrac.h:36
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AtracGCContext
Gain compensation context structure.
Definition: atrac.h:44
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
atrac3data.h
decode_bytes
static int decode_bytes(const uint8_t *input, uint8_t *out, int bytes)
Definition: atrac3.c:160
cmp
static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
Definition: motion_est.c:262
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:264
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:408
av_be2ne32
#define av_be2ne32(x)
Definition: bswap.h:95
add_tonal_components
static int add_tonal_components(float *spectrum, int num_components, TonalComponent *components)
Combine the tonal band spectrum and regular band spectrum.
Definition: atrac3.c:450
AV_TX_FULL_IMDCT
@ AV_TX_FULL_IMDCT
Performs a full inverse MDCT rather than leaving out samples that can be derived through symmetry.
Definition: tx.h:151
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:182
ATRAC3Context::temp_buf
float temp_buf[1070]
Definition: atrac3.c:110
ChannelUnit::delay_buf2
float delay_buf2[46]
Definition: atrac3.c:88
NULL
#define NULL
Definition: coverity.c:32
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
ff_init_vlc_from_lengths
int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:328
INIT_VLC_USE_NEW_STATIC
#define INIT_VLC_USE_NEW_STATIC
Definition: vlc.h:100
ff_atrac_sf_table
float ff_atrac_sf_table[64]
Definition: atrac.c:36
GainBlock
Definition: atrac3.c:66
ATRAC3Context
Definition: atrac3.c:92
AtracGainInfo
Gain control parameters for one subband.
Definition: atrac.h:35
ChannelUnit::gain_block
GainBlock gain_block[2]
Definition: atrac3.c:82
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
ATRAC3Context::units
ChannelUnit * units
Definition: atrac3.c:98
AVOnce
#define AVOnce
Definition: thread.h:181
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
read_quant_spectral_coeffs
static void read_quant_spectral_coeffs(GetBitContext *gb, int selector, int coding_flag, int *mantissas, int num_codes)
Mantissa decoding.
Definition: atrac3.c:218
float_dsp.h
ATRAC3Context::matrix_coeff_index_next
int matrix_coeff_index_next[MAX_JS_PAIRS][4]
Definition: atrac3.c:104
AV_CODEC_ID_ATRAC3AL
@ AV_CODEC_ID_ATRAC3AL
Definition: codec_id.h:517
atrac3_decode_frame
static int atrac3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Definition: atrac3.c:793
VLC::table_allocated
int table_allocated
Definition: vlc.h:34
s2
#define s2
Definition: regdef.h:39
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
TonalComponent::num_coefs
int num_coefs
Definition: atrac3.c:72
codec_internal.h
AVFloatDSPContext::vector_fmul
void(* vector_fmul)(float *dst, const float *src0, const float *src1, int len)
Calculate the entry wise product of two vectors of floats and store the result in a vector of floats.
Definition: float_dsp.h:38
ChannelUnit::num_components
int num_components
Definition: atrac3.c:78
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1023
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AtracGainInfo::loc_code
int loc_code[7]
location of gain control points
Definition: atrac.h:38
size
int size
Definition: twinvq_data.h:10344
VLCElem
Definition: vlc.h:27
AVFloatDSPContext
Definition: float_dsp.h:24
init_imdct_window
static av_cold void init_imdct_window(void)
Definition: atrac3.c:183
TonalComponent::pos
int pos
Definition: atrac3.c:71
ChannelUnit::imdct_buf
float imdct_buf[SAMPLES_PER_FRAME]
Definition: atrac3.c:85
decode_spectrum
static int decode_spectrum(GetBitContext *gb, float *output)
Restore the quantized band spectrum coefficients.
Definition: atrac3.c:272
get_channel_weights
static void get_channel_weights(int index, int flag, float ch[2])
Definition: atrac3.c:533
ff_atrac_generate_tables
av_cold void ff_atrac_generate_tables(void)
Generate common tables.
Definition: atrac.c:61
attributes.h
ChannelUnit::bands_coded
int bands_coded
Definition: atrac3.c:77
version
version
Definition: libkvazaar.c:313
MAX_JS_PAIRS
#define MAX_JS_PAIRS
Definition: atrac3.c:56
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
M_PI
#define M_PI
Definition: mathematics.h:52
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
ATRAC3_VLC_BITS
#define ATRAC3_VLC_BITS
Definition: atrac3.c:64
flag
#define flag(name)
Definition: cbs_av1.c:553
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:499
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
ATRAC3Context::coding_mode
int coding_mode
stream data
Definition: atrac3.c:96
ATRAC3Context::mdct_ctx
AVTXContext * mdct_ctx
Definition: atrac3.c:118
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
len
int len
Definition: vorbis_enc_data.h:426
atrac3_decode_close
static av_cold int atrac3_decode_close(AVCodecContext *avctx)
Definition: atrac3.c:198
ATRAC3Context::scrambled_stream
int scrambled_stream
extradata
Definition: atrac3.c:114
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
decode_channel_sound_unit
static int decode_channel_sound_unit(ATRAC3Context *q, GetBitContext *gb, ChannelUnit *snd, float *output, int channel_num, int coding_mode)
Decode a Sound Unit.
Definition: atrac3.c:577
huff_tab_sizes
static const uint8_t huff_tab_sizes[7]
Definition: atrac3data.h:72
avcodec.h
decode_frame
static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf, float **out_samples)
Definition: atrac3.c:644
atrac3_decode_init
static av_cold int atrac3_decode_init(AVCodecContext *avctx)
Definition: atrac3.c:875
ret
ret
Definition: filter_design.txt:187
AVCodecContext::block_align
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1052
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
AtracGainInfo::lev_code
int lev_code[7]
level at corresponding control point
Definition: atrac.h:37
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:398
c2
static const uint64_t c2
Definition: murmur3.c:52
ff_atrac_gain_compensation
void ff_atrac_gain_compensation(AtracGCContext *gctx, float *in, float *prev, AtracGainInfo *gc_now, AtracGainInfo *gc_next, int num_samples, float *out)
Apply gain compensation and perform the MDCT overlapping part.
Definition: atrac.c:85
VLC
Definition: vlc.h:31
spectral_coeff_tab
static VLC spectral_coeff_tab[7]
Definition: atrac3.c:126
TonalComponent
Definition: atrac3.c:70
subband_tab
static const uint16_t subband_tab[33]
Definition: atrac3data.h:94
al_decode_frame
static int al_decode_frame(AVCodecContext *avctx, const uint8_t *databuf, int size, float **out_samples)
Definition: atrac3.c:758
ChannelUnit::prev_frame
float prev_frame[SAMPLES_PER_FRAME]
Definition: atrac3.c:79
VLC::table
VLCElem * table
Definition: vlc.h:33
ChannelUnit::gc_blk_switch
int gc_blk_switch
Definition: atrac3.c:80
src0
const pixel *const src0
Definition: h264pred_template.c:420
ATRAC3Context::matrix_coeff_index_prev
int matrix_coeff_index_prev[MAX_JS_PAIRS][4]
joint-stereo related variables
Definition: atrac3.c:102
AV_CODEC_CAP_SUBFRAMES
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time,...
Definition: codec.h:100
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:288
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
ATRAC3Context::weighting_delay
int weighting_delay[MAX_JS_PAIRS][6]
Definition: atrac3.c:105
INTERPOLATE
#define INTERPOLATE(old, new, nsample)
Definition: atrac3.c:468
bytestream.h
reverse_matrixing
static void reverse_matrixing(float *su1, float *su2, int *prev_code, int *curr_code)
Definition: atrac3.c:471
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
matrix_coeffs
static const float matrix_coeffs[8]
Definition: atrac3data.h:103
atrac3_init_static_data
static av_cold void atrac3_init_static_data(void)
Definition: atrac3.c:853
ATRAC3Context::decoded_bytes_buffer
uint8_t * decoded_bytes_buffer
data buffers
Definition: atrac3.c:109
ff_atrac3al_decoder
const FFCodec ff_atrac3al_decoder
Definition: atrac3.c:1035
SINGLE
#define SINGLE
Definition: atrac3.c:59
ChannelUnit
Definition: atrac3.c:76
ATRAC3Context::vector_fmul
void(* vector_fmul)(float *dst, const float *src0, const float *src1, int len)
Definition: atrac3.c:120
ff_atrac_iqmf
void ff_atrac_iqmf(float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp)
Quadrature mirror synthesis filter.
Definition: atrac.c:128
decode_gain_control
static int decode_gain_control(GetBitContext *gb, GainBlock *block, int num_bands)
Decode gain parameters for the coded bands.
Definition: atrac3.c:414
ATRAC3Context::gainc_ctx
AtracGCContext gainc_ctx
Definition: atrac3.c:117
mdct_window
static float mdct_window[MDCT_SIZE]
Definition: atrac3.c:124
tx.h
MAX_CHANNELS
#define MAX_CHANNELS
Definition: atrac3.c:55
atrac3_hufftabs
static const uint8_t atrac3_hufftabs[][2]
Definition: atrac3data.h:35