FFmpeg
hcadec.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "libavutil/crc.h"
20 #include "libavutil/float_dsp.h"
21 #include "libavutil/intreadwrite.h"
22 #include "libavutil/tx.h"
23 
24 #include "avcodec.h"
25 #include "get_bits.h"
26 #include "internal.h"
27 #include "hca_data.h"
28 
29 typedef struct ChannelContext {
30  float base[128];
31  DECLARE_ALIGNED(32, float, imdct_in)[128];
32  DECLARE_ALIGNED(32, float, imdct_out)[128];
33  DECLARE_ALIGNED(32, float, imdct_prev)[128];
34  int8_t scale_factors[128];
35  uint8_t scale[128];
36  int8_t intensity[8];
37  int8_t *hfr_scale;
38  unsigned count;
39  int chan_type;
41 
42 typedef struct HCAContext {
44 
45  const AVCRC *crc_table;
46 
48 
49  uint8_t ath[128];
50 
51  int ath_type;
52  unsigned hfr_group_count;
59 
63 } HCAContext;
64 
65 static void ath_init1(uint8_t *ath, int sample_rate)
66 {
67  unsigned int index;
68  unsigned int acc = 0;
69 
70  for (int i = 0; i < 128; i++) {
71  acc += sample_rate;
72  index = acc >> 13;
73 
74  if (index >= 654) {
75  memset(ath+i, 0xFF, (128 - i));
76  break;
77  }
78 
79  ath[i] = ath_base_curve[index];
80  }
81 }
82 
83 static int ath_init(uint8_t *ath, int type, int sample_rate)
84 {
85  switch (type) {
86  case 0:
87  /* nothing to do */
88  break;
89  case 1:
90  ath_init1(ath, sample_rate);
91  break;
92  default:
93  return AVERROR_INVALIDDATA;
94  }
95 
96  return 0;
97 }
98 
99 static inline unsigned ceil2(unsigned a, unsigned b)
100 {
101  return (b > 0) ? (a / b + ((a % b) ? 1 : 0)) : 0;
102 }
103 
105 {
106  HCAContext *c = avctx->priv_data;
107  GetBitContext *gb = &c->gb;
108  int8_t r[16] = { 0 };
109  float scale = 1.f / 8.f;
110  unsigned b, chunk;
111  int version, ret;
112 
115 
116  if (avctx->channels <= 0 || avctx->channels > 16)
117  return AVERROR(EINVAL);
118 
119  ret = init_get_bits8(gb, avctx->extradata, avctx->extradata_size);
120  if (ret < 0)
121  return ret;
122  skip_bits_long(gb, 32);
123  version = get_bits(gb, 16);
124  skip_bits_long(gb, 16);
125 
126  c->ath_type = version >= 0x200 ? 0 : 1;
127 
128  if (get_bits_long(gb, 32) != MKBETAG('f', 'm', 't', 0))
129  return AVERROR_INVALIDDATA;
130  skip_bits_long(gb, 32);
131  skip_bits_long(gb, 32);
132  skip_bits_long(gb, 32);
133 
134  chunk = get_bits_long(gb, 32);
135  if (chunk == MKBETAG('c', 'o', 'm', 'p')) {
136  skip_bits_long(gb, 16);
137  skip_bits_long(gb, 8);
138  skip_bits_long(gb, 8);
139  c->track_count = get_bits(gb, 8);
140  c->channel_config = get_bits(gb, 8);
141  c->total_band_count = get_bits(gb, 8);
142  c->base_band_count = get_bits(gb, 8);
143  c->stereo_band_count = get_bits(gb, 8);
144  c->bands_per_hfr_group = get_bits(gb, 8);
145  } else if (chunk == MKBETAG('d', 'e', 'c', 0)) {
146  skip_bits_long(gb, 16);
147  skip_bits_long(gb, 8);
148  skip_bits_long(gb, 8);
149  c->total_band_count = get_bits(gb, 8) + 1;
150  c->base_band_count = get_bits(gb, 8) + 1;
151  c->track_count = get_bits(gb, 4);
152  c->channel_config = get_bits(gb, 4);
153  if (!get_bits(gb, 8))
156  c->bands_per_hfr_group = 0;
157  } else
158  return AVERROR_INVALIDDATA;
159 
160  while (get_bits_left(gb) >= 32) {
161  chunk = get_bits_long(gb, 32);
162  if (chunk == MKBETAG('v', 'b', 'r', 0)) {
163  skip_bits_long(gb, 16);
164  skip_bits_long(gb, 16);
165  } else if (chunk == MKBETAG('a', 't', 'h', 0)) {
166  c->ath_type = get_bits(gb, 16);
167  } else if (chunk == MKBETAG('r', 'v', 'a', 0)) {
168  skip_bits_long(gb, 32);
169  } else if (chunk == MKBETAG('c', 'o', 'm', 'm')) {
170  skip_bits_long(gb, get_bits(gb, 8) * 8);
171  } else if (chunk == MKBETAG('c', 'i', 'p', 'h')) {
172  skip_bits_long(gb, 16);
173  } else if (chunk == MKBETAG('l', 'o', 'o', 'p')) {
174  skip_bits_long(gb, 32);
175  skip_bits_long(gb, 32);
176  skip_bits_long(gb, 16);
177  skip_bits_long(gb, 16);
178  } else if (chunk == MKBETAG('p', 'a', 'd', 0)) {
179  break;
180  } else {
181  break;
182  }
183  }
184 
185  ret = ath_init(c->ath, c->ath_type, avctx->sample_rate);
186  if (ret < 0)
187  return ret;
188 
189  if (!c->track_count)
190  c->track_count = 1;
191 
192  b = avctx->channels / c->track_count;
193  if (c->stereo_band_count && b > 1) {
194  int8_t *x = r;
195 
196  for (int i = 0; i < c->track_count; i++, x+=b) {
197  switch (b) {
198  case 2:
199  case 3:
200  x[0] = 1;
201  x[1] = 2;
202  break;
203  case 4:
204  x[0]=1; x[1] = 2;
205  if (c->channel_config == 0) {
206  x[2]=1;
207  x[3]=2;
208  }
209  break;
210  case 5:
211  x[0]=1; x[1] = 2;
212  if (c->channel_config <= 2) {
213  x[3]=1;
214  x[4]=2;
215  }
216  break;
217  case 6:
218  case 7:
219  x[0] = 1; x[1] = 2; x[4] = 1; x[5] = 2;
220  break;
221  case 8:
222  x[0] = 1; x[1] = 2; x[4] = 1; x[5] = 2; x[6] = 1; x[7] = 2;
223  break;
224  }
225  }
226  }
227 
228  if (c->total_band_count < c->base_band_count)
229  return AVERROR_INVALIDDATA;
230 
233 
234  if (c->base_band_count + c->stereo_band_count + (unsigned long)c->hfr_group_count > 128ULL)
235  return AVERROR_INVALIDDATA;
236 
237  for (int i = 0; i < avctx->channels; i++) {
238  c->ch[i].chan_type = r[i];
239  c->ch[i].count = c->base_band_count + ((r[i] != 2) ? c->stereo_band_count : 0);
241  if (c->ch[i].count > 128)
242  return AVERROR_INVALIDDATA;
243  }
244 
246  if (!c->fdsp)
247  return AVERROR(ENOMEM);
248 
249  return av_tx_init(&c->tx_ctx, &c->tx_fn, AV_TX_FLOAT_MDCT, 1, 128, &scale, 0);
250 }
251 
252 static void run_imdct(HCAContext *c, ChannelContext *ch, int index, float *out)
253 {
254  c->tx_fn(c->tx_ctx, ch->imdct_out, ch->imdct_in, sizeof(float));
255 
256  c->fdsp->vector_fmul_window(out, ch->imdct_prev + (128 >> 1),
257  ch->imdct_out, window, 128 >> 1);
258 
259  memcpy(ch->imdct_prev, ch->imdct_out, 128 * sizeof(float));
260 }
261 
263  int index, unsigned band_count, unsigned base_band_count,
264  unsigned stereo_band_count)
265 {
266  float ratio_l = intensity_ratio_table[ch1->intensity[index]];
267  float ratio_r = ratio_l - 2.0f;
268  float *c1 = &ch1->imdct_in[base_band_count];
269  float *c2 = &ch2->imdct_in[base_band_count];
270 
271  if (ch1->chan_type != 1 || !stereo_band_count)
272  return;
273 
274  for (int i = 0; i < band_count; i++) {
275  *(c2++) = *c1 * ratio_r;
276  *(c1++) *= ratio_l;
277  }
278 }
279 
281  unsigned hfr_group_count,
282  unsigned bands_per_hfr_group,
283  unsigned start_band, unsigned total_band_count)
284 {
285  if (ch->chan_type == 2 || !bands_per_hfr_group)
286  return;
287 
288  for (int i = 0, k = start_band, l = start_band - 1; i < hfr_group_count; i++){
289  for (int j = 0; j < bands_per_hfr_group && k < total_band_count; j++, k++, l--){
290  ch->imdct_in[k] = scale_conversion_table[ch->hfr_scale[i] - ch->scale_factors[l]] * ch->imdct_in[l];
291  }
292  }
293 
294  ch->imdct_in[127] = 0;
295 }
296 
298 {
299  GetBitContext *gb = &c->gb;
300 
301  for (int i = 0; i < ch->count; i++) {
302  unsigned scale = ch->scale[i];
303  int nb_bits = max_bits_table[scale];
304  int value = get_bitsz(gb, nb_bits);
305  float factor;
306 
307  if (scale > 7) {
308  value = (1 - ((value & 1) << 1)) * (value >> 1);
309  if (!value)
310  skip_bits_long(gb, -1);
311  factor = value;
312  } else {
313  value += scale << 4;
314  skip_bits_long(gb, quant_spectrum_bits[value] - nb_bits);
315  factor = quant_spectrum_value[value];
316  }
317  ch->imdct_in[i] = factor * ch->base[i];
318  }
319 
320  memset(ch->imdct_in + ch->count, 0, sizeof(ch->imdct_in) - ch->count * sizeof(ch->imdct_in[0]));
321 }
322 
324  unsigned hfr_group_count,
325  int packed_noise_level,
326  const uint8_t *ath)
327 {
328  GetBitContext *gb = &c->gb;
329  int delta_bits = get_bits(gb, 3);
330 
331  if (delta_bits > 5) {
332  for (int i = 0; i < ch->count; i++)
333  ch->scale_factors[i] = get_bits(gb, 6);
334  } else if (delta_bits) {
335  int factor = get_bits(gb, 6);
336  int max_value = (1 << delta_bits) - 1;
337  int half_max = max_value >> 1;
338 
339  ch->scale_factors[0] = factor;
340  for (int i = 1; i < ch->count; i++){
341  int delta = get_bits(gb, delta_bits);
342 
343  if (delta == max_value) {
344  factor = get_bits(gb, 6);
345  } else {
346  factor += delta - half_max;
347  }
348  factor = av_clip_uintp2(factor, 6);
349 
350  ch->scale_factors[i] = factor;
351  }
352  } else {
353  memset(ch->scale_factors, 0, 128);
354  }
355 
356  if (ch->chan_type == 2){
357  ch->intensity[0] = get_bits(gb, 4);
358  if (ch->intensity[0] < 15) {
359  for (int i = 1; i < 8; i++)
360  ch->intensity[i] = get_bits(gb, 4);
361  }
362  } else {
363  for (int i = 0; i < hfr_group_count; i++)
364  ch->hfr_scale[i] = get_bits(gb, 6);
365  }
366 
367  for (int i = 0; i < ch->count; i++) {
368  int scale = ch->scale_factors[i];
369 
370  if (scale) {
371  scale = c->ath[i] + ((packed_noise_level + i) >> 8) - ((scale * 5) >> 1) + 2;
372  scale = scale_table[av_clip(scale, 0, 58)];
373  }
374  ch->scale[i] = scale;
375  }
376 
377  memset(ch->scale + ch->count, 0, sizeof(ch->scale) - ch->count);
378 
379  for (int i = 0; i < ch->count; i++)
381 }
382 
383 static int decode_frame(AVCodecContext *avctx, void *data,
384  int *got_frame_ptr, AVPacket *avpkt)
385 {
386  AVFrame *frame = data;
387  HCAContext *c = avctx->priv_data;
388  int ch, ret, packed_noise_level;
389  GetBitContext *gb = &c->gb;
390  float **samples;
391 
392  if (avctx->err_recognition & AV_EF_CRCCHECK) {
393  if (av_crc(c->crc_table, 0, avpkt->data, avpkt->size))
394  return AVERROR_INVALIDDATA;
395  }
396 
397  if ((ret = init_get_bits8(gb, avpkt->data, avpkt->size)) < 0)
398  return ret;
399 
400  if (get_bits(gb, 16) != 0xFFFF)
401  return AVERROR_INVALIDDATA;
402 
403  frame->nb_samples = 1024;
404  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
405  return ret;
406  samples = (float **)frame->extended_data;
407 
408  packed_noise_level = (get_bits(gb, 9) << 8) - get_bits(gb, 7);
409 
410  for (ch = 0; ch < avctx->channels; ch++)
411  unpack(c, &c->ch[ch], c->hfr_group_count, packed_noise_level, c->ath);
412 
413  for (int i = 0; i < 8; i++) {
414  for (ch = 0; ch < avctx->channels; ch++)
415  dequantize_coefficients(c, &c->ch[ch]);
416  for (ch = 0; ch < avctx->channels; ch++)
419  for (ch = 0; ch < avctx->channels - 1; ch++)
420  apply_intensity_stereo(c, &c->ch[ch], &c->ch[ch+1], i,
423  for (ch = 0; ch < avctx->channels; ch++)
424  run_imdct(c, &c->ch[ch], i, samples[ch] + i * 128);
425  }
426 
427  *got_frame_ptr = 1;
428 
429  return avpkt->size;
430 }
431 
433 {
434  HCAContext *c = avctx->priv_data;
435 
436  av_freep(&c->fdsp);
437  av_tx_uninit(&c->tx_ctx);
438 
439  return 0;
440 }
441 
443  .name = "hca",
444  .long_name = NULL_IF_CONFIG_SMALL("CRI HCA"),
445  .type = AVMEDIA_TYPE_AUDIO,
446  .id = AV_CODEC_ID_HCA,
447  .priv_data_size = sizeof(HCAContext),
448  .init = decode_init,
449  .decode = decode_frame,
450  .close = decode_close,
451  .capabilities = AV_CODEC_CAP_DR1,
452  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
454 };
float, planar
Definition: samplefmt.h:69
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets ctx to NULL, does nothing when ctx == NULL.
Definition: tx.c:110
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
version
Definition: libkvazaar.c:292
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
float imdct_in[128]
Definition: hcadec.c:31
uint8_t ath[128]
Definition: hcadec.c:49
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static const float dequantizer_scaling_table[]
Definition: hca_data.h:111
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
uint8_t stereo_band_count
Definition: hcadec.c:57
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static const int8_t quant_spectrum_value[]
Definition: hca_data.h:41
int acc
Definition: yuv2rgb.c:555
int size
Definition: packet.h:356
static void apply_intensity_stereo(HCAContext *s, ChannelContext *ch1, ChannelContext *ch2, int index, unsigned band_count, unsigned base_band_count, unsigned stereo_band_count)
Definition: hcadec.c:262
float imdct_out[128]
Definition: hcadec.c:32
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
GLint GLenum type
Definition: opengl_enc.c:104
static av_cold float ath(float f, float add)
Calculate ATH value for given frequency.
Definition: aacpsy.c:292
static const float quant_step_size[]
Definition: hca_data.h:123
AVCodec.
Definition: codec.h:190
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int8_t scale_factors[128]
Definition: hcadec.c:34
av_tx_fn tx_fn
Definition: hcadec.c:60
static const float scale_conversion_table[]
Definition: hca_data.h:99
uint8_t total_band_count
Definition: hcadec.c:55
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
uint8_t
#define av_cold
Definition: attributes.h:88
static void run_imdct(HCAContext *c, ChannelContext *ch, int index, float *out)
Definition: hcadec.c:252
static const uint8_t quant_spectrum_bits[]
Definition: hca_data.h:29
static const uint8_t ath_base_curve[656]
Definition: hca_data.h:129
float delta
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
Public header for CRC hash function implementation.
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
uint8_t * data
Definition: packet.h:355
static const uint64_t c1
Definition: murmur3.c:49
bitstream reader API header.
uint8_t bands_per_hfr_group
Definition: hcadec.c:58
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
void(* vector_fmul_window)(float *dst, const float *src0, const float *src1, const float *win, int len)
Overlap/add with window function.
Definition: float_dsp.h:119
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:92
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
int8_t intensity[8]
Definition: hcadec.c:36
static void ath_init1(uint8_t *ath, int sample_rate)
Definition: hcadec.c:65
float imdct_prev[128]
Definition: hcadec.c:33
GetBitContext gb
Definition: hcadec.c:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static av_cold int decode_init(AVCodecContext *avctx)
Definition: hcadec.c:104
const char * r
Definition: vf_curves.c:114
AVTXContext * tx_ctx
Definition: hcadec.c:61
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
const char * name
Name of the codec implementation.
Definition: codec.h:197
static const uint8_t scale_table[]
Definition: hca_data.h:53
AVCodec ff_hca_decoder
Definition: hcadec.c:442
unsigned count
Definition: hcadec.c:38
static SDL_Window * window
Definition: ffplay.c:368
int ath_type
Definition: hcadec.c:51
#define b
Definition: input.c:41
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
int chan_type
Definition: hcadec.c:39
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
uint8_t base_band_count
Definition: hcadec.c:56
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static const float intensity_ratio_table[]
Definition: hca_data.h:85
uint8_t channel_config
Definition: hcadec.c:54
static int ath_init(uint8_t *ath, int type, int sample_rate)
Definition: hcadec.c:83
sample_rate
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static av_cold int decode_close(AVCodecContext *avctx)
Definition: hcadec.c:432
int sample_rate
samples per second
Definition: avcodec.h:1186
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
main external API structure.
Definition: avcodec.h:526
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
int extradata_size
Definition: avcodec.h:628
static void dequantize_coefficients(HCAContext *c, ChannelContext *ch)
Definition: hcadec.c:297
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: hcadec.c:383
int index
Definition: gxfenc.c:89
static unsigned ceil2(unsigned a, unsigned b)
Definition: hcadec.c:99
ChannelContext ch[16]
Definition: hcadec.c:47
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data...
Definition: avcodec.h:1663
static const int factor[16]
Definition: vf_pp7.c:75
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration Currently power of two lengths from 2 to ...
Definition: tx.c:123
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int8_t * hfr_scale
Definition: hcadec.c:37
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
common internal api header.
static const uint8_t max_bits_table[]
Definition: hca_data.h:25
static void unpack(HCAContext *c, ChannelContext *ch, unsigned hfr_group_count, int packed_noise_level, const uint8_t *ath)
Definition: hcadec.c:323
uint8_t scale[128]
Definition: hcadec.c:35
static const uint64_t c2
Definition: murmur3.c:50
static void reconstruct_hfr(HCAContext *s, ChannelContext *ch, unsigned hfr_group_count, unsigned bands_per_hfr_group, unsigned start_band, unsigned total_band_count)
Definition: hcadec.c:280
#define MKBETAG(a, b, c, d)
Definition: common.h:407
void * priv_data
Definition: avcodec.h:553
uint8_t track_count
Definition: hcadec.c:53
Standard MDCT with sample data type of float and a scale type of float.
Definition: tx.h:55
const AVCRC * crc_table
Definition: hcadec.c:45
int channels
number of audio channels
Definition: avcodec.h:1187
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:731
FILE * out
Definition: movenc.c:54
Filter the word “frame” indicates either a video frame or a group of audio samples
#define av_freep(p)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFloatDSPContext * fdsp
Definition: hcadec.c:62
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:347
float base[128]
Definition: hcadec.c:30
This structure stores compressed data.
Definition: packet.h:332
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
uint32_t AVCRC
Definition: crc.h:47
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
unsigned hfr_group_count
Definition: hcadec.c:52