FFmpeg
atrac3plusdec.c
Go to the documentation of this file.
1 /*
2  * ATRAC3+ compatible decoder
3  *
4  * Copyright (c) 2010-2013 Maxim Poliakovski
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Sony ATRAC3+ compatible decoder.
26  *
27  * Container formats used to store its data:
28  * RIFF WAV (.at3) and Sony OpenMG (.oma, .aa3).
29  *
30  * Technical description of this codec can be found here:
31  * http://wiki.multimedia.cx/index.php?title=ATRAC3plus
32  *
33  * Kudos to Benjamin Larsson and Michael Karcher
34  * for their precious technical help!
35  */
36 
37 #include <stdint.h>
38 #include <string.h>
39 
41 #include "libavutil/float_dsp.h"
42 #include "avcodec.h"
43 #include "get_bits.h"
44 #include "internal.h"
45 #include "atrac.h"
46 #include "atrac3plus.h"
47 
48 typedef struct ATRAC3PContext {
51 
52  DECLARE_ALIGNED(32, float, samples)[2][ATRAC3P_FRAME_SAMPLES]; ///< quantized MDCT spectrum
53  DECLARE_ALIGNED(32, float, mdct_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the IMDCT
54  DECLARE_ALIGNED(32, float, time_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the gain compensation
56 
57  AtracGCContext gainc_ctx; ///< gain compensation context
59  FFTContext ipqf_dct_ctx; ///< IDCT context used by IPQF
60 
61  Atrac3pChanUnitCtx *ch_units; ///< global channel units
62 
63  int num_channel_blocks; ///< number of channel blocks
64  uint8_t channel_blocks[5]; ///< channel configuration descriptor
65  uint64_t my_channel_layout; ///< current channel layout
67 
69 {
70  ATRAC3PContext *ctx = avctx->priv_data;
71 
72  av_freep(&ctx->ch_units);
73  av_freep(&ctx->fdsp);
74 
75  ff_mdct_end(&ctx->mdct_ctx);
77 
78  return 0;
79 }
80 
82  AVCodecContext *avctx)
83 {
84  memset(ctx->channel_blocks, 0, sizeof(ctx->channel_blocks));
85 
86  switch (avctx->channels) {
87  case 1:
88  if (avctx->channel_layout != AV_CH_FRONT_LEFT)
90 
91  ctx->num_channel_blocks = 1;
92  ctx->channel_blocks[0] = CH_UNIT_MONO;
93  break;
94  case 2:
96  ctx->num_channel_blocks = 1;
98  break;
99  case 3:
101  ctx->num_channel_blocks = 2;
102  ctx->channel_blocks[0] = CH_UNIT_STEREO;
103  ctx->channel_blocks[1] = CH_UNIT_MONO;
104  break;
105  case 4:
107  ctx->num_channel_blocks = 3;
108  ctx->channel_blocks[0] = CH_UNIT_STEREO;
109  ctx->channel_blocks[1] = CH_UNIT_MONO;
110  ctx->channel_blocks[2] = CH_UNIT_MONO;
111  break;
112  case 6:
114  ctx->num_channel_blocks = 4;
115  ctx->channel_blocks[0] = CH_UNIT_STEREO;
116  ctx->channel_blocks[1] = CH_UNIT_MONO;
117  ctx->channel_blocks[2] = CH_UNIT_STEREO;
118  ctx->channel_blocks[3] = CH_UNIT_MONO;
119  break;
120  case 7:
122  ctx->num_channel_blocks = 5;
123  ctx->channel_blocks[0] = CH_UNIT_STEREO;
124  ctx->channel_blocks[1] = CH_UNIT_MONO;
125  ctx->channel_blocks[2] = CH_UNIT_STEREO;
126  ctx->channel_blocks[3] = CH_UNIT_MONO;
127  ctx->channel_blocks[4] = CH_UNIT_MONO;
128  break;
129  case 8:
131  ctx->num_channel_blocks = 5;
132  ctx->channel_blocks[0] = CH_UNIT_STEREO;
133  ctx->channel_blocks[1] = CH_UNIT_MONO;
134  ctx->channel_blocks[2] = CH_UNIT_STEREO;
135  ctx->channel_blocks[3] = CH_UNIT_STEREO;
136  ctx->channel_blocks[4] = CH_UNIT_MONO;
137  break;
138  default:
139  av_log(avctx, AV_LOG_ERROR,
140  "Unsupported channel count: %d!\n", avctx->channels);
141  return AVERROR_INVALIDDATA;
142  }
143 
144  return 0;
145 }
146 
148 {
149  ATRAC3PContext *ctx = avctx->priv_data;
150  int i, ch, ret;
151 
152  if (!avctx->block_align) {
153  av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
154  return AVERROR(EINVAL);
155  }
156 
158 
159  /* initialize IPQF */
160  ff_mdct_init(&ctx->ipqf_dct_ctx, 5, 1, 32.0 / 32768.0);
161 
162  ff_atrac3p_init_imdct(avctx, &ctx->mdct_ctx);
163 
165 
167 
168  if ((ret = set_channel_params(ctx, avctx)) < 0)
169  return ret;
170 
171  ctx->my_channel_layout = avctx->channel_layout;
172 
173  ctx->ch_units = av_mallocz_array(ctx->num_channel_blocks, sizeof(*ctx->ch_units));
175 
176  if (!ctx->ch_units || !ctx->fdsp) {
177  atrac3p_decode_close(avctx);
178  return AVERROR(ENOMEM);
179  }
180 
181  for (i = 0; i < ctx->num_channel_blocks; i++) {
182  for (ch = 0; ch < 2; ch++) {
183  ctx->ch_units[i].channels[ch].ch_num = ch;
190  }
191 
192  ctx->ch_units[i].waves_info = &ctx->ch_units[i].wave_synth_hist[0];
194  }
195 
197 
198  return 0;
199 }
200 
202  float out[2][ATRAC3P_FRAME_SAMPLES],
203  int num_channels,
204  AVCodecContext *avctx)
205 {
206  int i, sb, ch, qu, nspeclines, RNG_index;
207  float *dst, q;
208  int16_t *src;
209  /* calculate RNG table index for each subband */
210  int sb_RNG_index[ATRAC3P_SUBBANDS] = { 0 };
211 
212  if (ch_unit->mute_flag) {
213  for (ch = 0; ch < num_channels; ch++)
214  memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
215  return;
216  }
217 
218  for (qu = 0, RNG_index = 0; qu < ch_unit->used_quant_units; qu++)
219  RNG_index += ch_unit->channels[0].qu_sf_idx[qu] +
220  ch_unit->channels[1].qu_sf_idx[qu];
221 
222  for (sb = 0; sb < ch_unit->num_coded_subbands; sb++, RNG_index += 128)
223  sb_RNG_index[sb] = RNG_index & 0x3FC;
224 
225  /* inverse quant and power compensation */
226  for (ch = 0; ch < num_channels; ch++) {
227  /* clear channel's residual spectrum */
228  memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
229 
230  for (qu = 0; qu < ch_unit->used_quant_units; qu++) {
231  src = &ch_unit->channels[ch].spectrum[ff_atrac3p_qu_to_spec_pos[qu]];
233  nspeclines = ff_atrac3p_qu_to_spec_pos[qu + 1] -
234  ff_atrac3p_qu_to_spec_pos[qu];
235 
236  if (ch_unit->channels[ch].qu_wordlen[qu] > 0) {
237  q = ff_atrac3p_sf_tab[ch_unit->channels[ch].qu_sf_idx[qu]] *
239  for (i = 0; i < nspeclines; i++)
240  dst[i] = src[i] * q;
241  }
242  }
243 
244  for (sb = 0; sb < ch_unit->num_coded_subbands; sb++)
245  ff_atrac3p_power_compensation(ch_unit, ctx->fdsp, ch, &out[ch][0],
246  sb_RNG_index[sb], sb);
247  }
248 
249  if (ch_unit->unit_type == CH_UNIT_STEREO) {
250  for (sb = 0; sb < ch_unit->num_coded_subbands; sb++) {
251  if (ch_unit->swap_channels[sb]) {
252  for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
253  FFSWAP(float, out[0][sb * ATRAC3P_SUBBAND_SAMPLES + i],
254  out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
255  }
256 
257  /* flip coefficients' sign if requested */
258  if (ch_unit->negate_coeffs[sb])
259  for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
260  out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i] = -(out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
261  }
262  }
263 }
264 
266  int num_channels, AVCodecContext *avctx)
267 {
268  int ch, sb;
269 
270  for (ch = 0; ch < num_channels; ch++) {
271  for (sb = 0; sb < ch_unit->num_subbands; sb++) {
272  /* inverse transform and windowing */
273  ff_atrac3p_imdct(ctx->fdsp, &ctx->mdct_ctx,
274  &ctx->samples[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
275  &ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
276  (ch_unit->channels[ch].wnd_shape_prev[sb] << 1) +
277  ch_unit->channels[ch].wnd_shape[sb], sb);
278 
279  /* gain compensation and overlapping */
281  &ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
282  &ch_unit->prev_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
283  &ch_unit->channels[ch].gain_data_prev[sb],
284  &ch_unit->channels[ch].gain_data[sb],
285  ATRAC3P_SUBBAND_SAMPLES,
286  &ctx->time_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES]);
287  }
288 
289  /* zero unused subbands in both output and overlapping buffers */
290  memset(&ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],
291  0,
292  (ATRAC3P_SUBBANDS - ch_unit->num_subbands) *
293  ATRAC3P_SUBBAND_SAMPLES *
294  sizeof(ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));
295  memset(&ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],
296  0,
297  (ATRAC3P_SUBBANDS - ch_unit->num_subbands) *
298  ATRAC3P_SUBBAND_SAMPLES *
299  sizeof(ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));
300 
301  /* resynthesize and add tonal signal */
302  if (ch_unit->waves_info->tones_present ||
303  ch_unit->waves_info_prev->tones_present) {
304  for (sb = 0; sb < ch_unit->num_subbands; sb++)
305  if (ch_unit->channels[ch].tones_info[sb].num_wavs ||
306  ch_unit->channels[ch].tones_info_prev[sb].num_wavs) {
307  ff_atrac3p_generate_tones(ch_unit, ctx->fdsp, ch, sb,
308  &ctx->time_buf[ch][sb * 128]);
309  }
310  }
311 
312  /* subband synthesis and acoustic signal output */
313  ff_atrac3p_ipqf(&ctx->ipqf_dct_ctx, &ch_unit->ipqf_ctx[ch],
314  &ctx->time_buf[ch][0], &ctx->outp_buf[ch][0]);
315  }
316 
317  /* swap window shape and gain control buffers. */
318  for (ch = 0; ch < num_channels; ch++) {
319  FFSWAP(uint8_t *, ch_unit->channels[ch].wnd_shape,
320  ch_unit->channels[ch].wnd_shape_prev);
321  FFSWAP(AtracGainInfo *, ch_unit->channels[ch].gain_data,
322  ch_unit->channels[ch].gain_data_prev);
323  FFSWAP(Atrac3pWavesData *, ch_unit->channels[ch].tones_info,
324  ch_unit->channels[ch].tones_info_prev);
325  }
326 
328 }
329 
330 static int atrac3p_decode_frame(AVCodecContext *avctx, void *data,
331  int *got_frame_ptr, AVPacket *avpkt)
332 {
333  ATRAC3PContext *ctx = avctx->priv_data;
334  AVFrame *frame = data;
335  int i, ret, ch_unit_id, ch_block = 0, out_ch_index = 0, channels_to_process;
336  float **samples_p = (float **)frame->extended_data;
337 
339  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
340  return ret;
341 
342  if ((ret = init_get_bits8(&ctx->gb, avpkt->data, avpkt->size)) < 0)
343  return ret;
344 
345  if (get_bits1(&ctx->gb)) {
346  av_log(avctx, AV_LOG_ERROR, "Invalid start bit!\n");
347  return AVERROR_INVALIDDATA;
348  }
349 
350  while (get_bits_left(&ctx->gb) >= 2 &&
351  (ch_unit_id = get_bits(&ctx->gb, 2)) != CH_UNIT_TERMINATOR) {
352  if (ch_unit_id == CH_UNIT_EXTENSION) {
353  avpriv_report_missing_feature(avctx, "Channel unit extension");
354  return AVERROR_PATCHWELCOME;
355  }
356  if (ch_block >= ctx->num_channel_blocks ||
357  ctx->channel_blocks[ch_block] != ch_unit_id) {
358  av_log(avctx, AV_LOG_ERROR,
359  "Frame data doesn't match channel configuration!\n");
360  return AVERROR_INVALIDDATA;
361  }
362 
363  ctx->ch_units[ch_block].unit_type = ch_unit_id;
364  channels_to_process = ch_unit_id + 1;
365 
366  if ((ret = ff_atrac3p_decode_channel_unit(&ctx->gb,
367  &ctx->ch_units[ch_block],
368  channels_to_process,
369  avctx)) < 0)
370  return ret;
371 
372  decode_residual_spectrum(ctx, &ctx->ch_units[ch_block], ctx->samples,
373  channels_to_process, avctx);
374  reconstruct_frame(ctx, &ctx->ch_units[ch_block],
375  channels_to_process, avctx);
376 
377  for (i = 0; i < channels_to_process; i++)
378  memcpy(samples_p[out_ch_index + i], ctx->outp_buf[i],
379  ATRAC3P_FRAME_SAMPLES * sizeof(**samples_p));
380 
381  ch_block++;
382  out_ch_index += channels_to_process;
383  }
384 
385  *got_frame_ptr = 1;
386 
387  return avctx->codec_id == AV_CODEC_ID_ATRAC3P ? FFMIN(avctx->block_align, avpkt->size) : avpkt->size;
388 }
389 
391  .name = "atrac3plus",
392  .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
393  .type = AVMEDIA_TYPE_AUDIO,
394  .id = AV_CODEC_ID_ATRAC3P,
395  .capabilities = AV_CODEC_CAP_DR1,
396  .priv_data_size = sizeof(ATRAC3PContext),
398  .close = atrac3p_decode_close,
400 };
401 
403  .name = "atrac3plusal",
404  .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ AL (Adaptive TRansform Acoustic Coding 3+ Advanced Lossless)"),
405  .type = AVMEDIA_TYPE_AUDIO,
406  .id = AV_CODEC_ID_ATRAC3PAL,
407  .capabilities = AV_CODEC_CAP_DR1,
408  .priv_data_size = sizeof(ATRAC3PContext),
410  .close = atrac3p_decode_close,
412 };
float prev_buf[2][ATRAC3P_FRAME_SAMPLES]
overlapping buffer
Definition: atrac3plus.h:153
float, planar
Definition: samplefmt.h:69
const float ff_atrac3p_sf_tab[64]
Definition: atrac3plusdsp.c:52
#define AV_CH_LAYOUT_7POINT1
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
Atrac3pWaveSynthParams wave_synth_hist[2]
waves synth history for two frames
Definition: atrac3plus.h:148
const uint16_t ff_atrac3p_qu_to_spec_pos[33]
Map quant unit number to its position in the spectrum.
Definition: atrac3plusdsp.c:42
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
Atrac3pChanUnitCtx * ch_units
global channel units
Definition: atrac3plusdec.c:61
#define AV_CH_LAYOUT_SURROUND
void ff_atrac3p_init_wave_synth(void)
Initialize sine waves synthesizer.
Definition: atrac3plusdsp.c:97
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
GetBitContext gb
Definition: atrac3plusdec.c:49
Atrac3pWavesData * tones_info_prev
Definition: atrac3plus.h:115
int num_coded_subbands
number of subbands with coded spectrum
Definition: atrac3plus.h:137
int size
Definition: avcodec.h:1478
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define AV_CH_LAYOUT_4POINT0
static av_cold int atrac3p_decode_close(AVCodecContext *avctx)
Definition: atrac3plusdec.c:68
int num_wavs
number of sine waves in the group
Definition: atrac3plus.h:76
#define AV_CH_LAYOUT_STEREO
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3477
int used_quant_units
number of quant units with coded spectrum
Definition: atrac3plus.h:136
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2258
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
static void decode_residual_spectrum(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit, float out[2][ATRAC3P_FRAME_SAMPLES], int num_channels, AVCodecContext *avctx)
uint8_t negate_coeffs[ATRAC3P_SUBBANDS]
1 - subband-wise IMDCT coefficients negation
Definition: atrac3plus.h:144
#define ATRAC3P_SUBBANDS
Global unit sizes.
Definition: atrac3plus.h:40
AtracGCContext gainc_ctx
gain compensation context
Definition: atrac3plusdec.c:57
AtracGainInfo * gain_data_prev
gain control data for previous frame
Definition: atrac3plus.h:109
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2229
uint8_t
#define av_cold
Definition: attributes.h:82
int16_t spectrum[2048]
decoded IMDCT spectrum
Definition: atrac3plus.h:98
float mdct_buf[2][ATRAC3P_FRAME_SAMPLES]
output of the IMDCT
Definition: atrac3plusdec.c:53
static void reconstruct_frame(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit, int num_channels, AVCodecContext *avctx)
#define ATRAC3P_FRAME_SAMPLES
Definition: atrac3plus.h:42
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
int ff_atrac3p_decode_channel_unit(GetBitContext *gb, Atrac3pChanUnitCtx *ctx, int num_channels, AVCodecContext *avctx)
Decode bitstream data of a channel unit.
Definition: atrac3plus.c:1757
uint8_t * data
Definition: avcodec.h:1477
ATRAC common header.
int qu_sf_idx[32]
array of scale factor indexes for each quant unit
Definition: atrac3plus.h:96
bitstream reader API header.
uint8_t * wnd_shape
IMDCT window shape for current frame.
Definition: atrac3plus.h:103
#define av_log(a,...)
void ff_atrac3p_imdct(AVFloatDSPContext *fdsp, FFTContext *mdct_ctx, float *pIn, float *pOut, int wind_id, int sb)
Regular IMDCT and windowing without overlapping, with spectrum reversal in the odd subbands...
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int num_channel_blocks
number of channel blocks
Definition: atrac3plusdec.c:63
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
uint8_t * wnd_shape_prev
IMDCT window shape for previous frame.
Definition: atrac3plus.h:104
void ff_atrac3p_ipqf(FFTContext *dct_ctx, Atrac3pIPQFChannelCtx *hist, const float *in, float *out)
Subband synthesis filter based on the polyphase quadrature (pseudo-QMF) filter bank.
Parameters of a group of sine waves.
Definition: atrac3plus.h:73
static av_cold int set_channel_params(ATRAC3PContext *ctx, AVCodecContext *avctx)
Definition: atrac3plusdec.c:81
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
#define ff_mdct_init
Definition: fft.h:169
static const float qu[2]
Definition: sipr16kdata.h:28
AVFloatDSPContext * fdsp
Definition: atrac3plusdec.c:50
static av_cold int atrac3p_decode_init(AVCodecContext *avctx)
Gain compensation context structure.
Definition: atrac.h:44
int qu_wordlen[32]
array of word lengths for each quant unit
Definition: atrac3plus.h:95
av_cold void ff_atrac_init_gain_compensation(AtracGCContext *gctx, int id2exp_offset, int loc_scale)
Initialize gain compensation context.
Definition: atrac.c:66
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2272
#define ATRAC3P_SUBBAND_SAMPLES
number of samples per subband
Definition: atrac3plus.h:41
float samples[2][ATRAC3P_FRAME_SAMPLES]
quantized MDCT spectrum
Definition: atrac3plusdec.c:52
Definition: fft.h:88
unit containing one coded channel
Definition: atrac3plus.h:51
audio channel layout utility functions
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:908
#define FFMIN(a, b)
Definition: common.h:96
float time_buf[2][ATRAC3P_FRAME_SAMPLES]
output of the gain compensation
Definition: atrac3plusdec.c:54
Atrac3pWavesData * tones_info
Definition: atrac3plus.h:114
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int unit_type
unit type (mono/stereo)
Definition: atrac3plus.h:133
uint8_t swap_channels[ATRAC3P_SUBBANDS]
1 - perform subband-wise channel swapping
Definition: atrac3plus.h:143
void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, AVFloatDSPContext *fdsp, int ch_index, float *sp, int rng_index, int sb_num)
Perform power compensation aka noise dithering.
av_cold void ff_atrac3p_init_vlcs(void)
Initialize VLC tables for bitstream parsing.
Definition: atrac3plus.c:80
#define AV_CH_LAYOUT_5POINT1_BACK
#define AV_CH_LAYOUT_6POINT1_BACK
if(ret)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
uint8_t channel_blocks[5]
channel configuration descriptor
Definition: atrac3plusdec.c:64
void ff_atrac3p_init_imdct(AVCodecContext *avctx, FFTContext *mdct_ctx)
Initialize IMDCT transform.
Definition: atrac3plusdsp.c:80
Gain control parameters for one subband.
Definition: atrac.h:35
Libavcodec external API header.
void ff_atrac3p_generate_tones(Atrac3pChanUnitCtx *ch_unit, AVFloatDSPContext *fdsp, int ch_num, int sb, float *out)
Synthesize sine waves for a particular subband.
enum AVCodecID codec_id
Definition: avcodec.h:1575
float outp_buf[2][ATRAC3P_FRAME_SAMPLES]
Definition: atrac3plusdec.c:55
AVCodec ff_atrac3p_decoder
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static int atrac3p_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
main external API structure.
Definition: avcodec.h:1565
#define AV_CH_FRONT_LEFT
Atrac3pIPQFChannelCtx ipqf_ctx[2]
Definition: atrac3plus.h:152
Channel unit parameters.
Definition: atrac3plus.h:131
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
unit sequence terminator
Definition: atrac3plus.h:54
AVCodec ff_atrac3pal_decoder
const float ff_atrac3p_mant_tab[8]
Definition: atrac3plusdsp.c:67
Atrac3pWaveSynthParams * waves_info_prev
Definition: atrac3plus.h:150
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint8_t wnd_shape_hist[2][ATRAC3P_SUBBANDS]
IMDCT window shape, 0=sine/1=steep.
Definition: atrac3plus.h:102
common internal api header.
AtracGainInfo * gain_data
gain control data for next frame
Definition: atrac3plus.h:108
#define ff_mdct_end
Definition: fft.h:170
FFTContext mdct_ctx
Definition: atrac3plusdec.c:58
unit containing two jointly-coded channels
Definition: atrac3plus.h:52
Atrac3pWaveSynthParams * waves_info
Definition: atrac3plus.h:149
void * priv_data
Definition: avcodec.h:1592
int channels
number of audio channels
Definition: avcodec.h:2222
Atrac3pChanParams channels[2]
Definition: atrac3plus.h:145
void ff_atrac_gain_compensation(AtracGCContext *gctx, float *in, float *prev, AtracGainInfo *gc_now, AtracGainInfo *gc_next, int num_samples, float *out)
Apply gain compensation and perform the MDCT overlapping part.
Definition: atrac.c:84
uint64_t my_channel_layout
current channel layout
Definition: atrac3plusdec.c:65
FILE * out
Definition: movenc.c:54
#define av_freep(p)
int mute_flag
mute flag
Definition: atrac3plus.h:138
#define FFSWAP(type, a, b)
Definition: common.h:99
int tones_present
1 - tones info present
Definition: atrac3plus.h:120
FFTContext ipqf_dct_ctx
IDCT context used by IPQF.
Definition: atrac3plusdec.c:59
unit containing extension information
Definition: atrac3plus.h:53
Atrac3pWavesData tones_info_hist[2][ATRAC3P_SUBBANDS]
Definition: atrac3plus.h:113
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:315
#define AV_CH_LAYOUT_MONO
This structure stores compressed data.
Definition: avcodec.h:1454
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:334
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
Global structures, constants and data for ATRAC3+ decoder.
for(j=16;j >0;--j)
AtracGainInfo gain_data_hist[2][ATRAC3P_SUBBANDS]
gain control data for all subbands
Definition: atrac3plus.h:107
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191