Go to the documentation of this file.
25 #define FFT_FIXED_32 1
44 #define MAX_CHANNELS 6
45 #define DCA_MAX_FRAME_SIZE 16384
46 #define DCA_HEADER_SIZE 13
47 #define DCA_LFE_SAMPLES 8
49 #define DCAENC_SUBBANDS 32
51 #define SUBSUBFRAMES 2
52 #define SUBBAND_SAMPLES (SUBFRAMES * SUBSUBFRAMES * 8)
55 #define COS_T(x) (c->cos_table[(x) & 2047])
115 double f1 =
f / 1000;
117 return -3.64 * pow(f1, -0.8)
118 + 6.8 *
exp(-0.6 * (f1 - 3.4) * (f1 - 3.4))
119 - 6.0 *
exp(-0.15 * (f1 - 8.7) * (f1 - 8.7))
120 - 0.0006 * (f1 * f1) * (f1 * f1);
129 return 20 * log10(
h);
145 c->subband[
ch][band] = bufer +
155 if (
c->subband[0][0]) {
158 c->subband[0][0] =
NULL;
166 int i, j, k, min_frame_bits;
172 c->fullband_channels =
c->channels = avctx->
channels;
174 c->band_interpolation =
c->band_interpolation_tab[1];
175 c->band_spectrum =
c->band_spectrum_tab[1];
176 c->worst_quantization_noise = -2047;
177 c->worst_noise_ever = -2047;
178 c->consumed_adpcm_bits = 0;
185 "encoder will guess the layout, but it "
186 "might be incorrect.\n");
200 if (
c->lfe_channel) {
201 c->fullband_channels--;
212 c->bit_allocation_sel[
i] = 6;
216 c->prediction_mode[
i][j] = -1;
221 for (
i = 0;
i < 9;
i++) {
227 c->samplerate_index =
i;
235 c->bitrate_index =
i;
237 min_frame_bits = 132 + (493 + 28 * 32) *
c->fullband_channels +
c->lfe_channel * 72;
241 c->frame_size = (
c->frame_bits + 7) / 8;
249 c->cos_table[0] = 0x7fffffff;
250 c->cos_table[512] = 0;
251 c->cos_table[1024] = -
c->cos_table[0];
252 for (
i = 1;
i < 512;
i++) {
254 c->cos_table[1024-
i] = -
c->cos_table[
i];
255 c->cos_table[1024+
i] = -
c->cos_table[
i];
256 c->cos_table[2048-
i] = +
c->cos_table[
i];
259 for (
i = 0;
i < 2048;
i++)
262 for (k = 0; k < 32; k++) {
263 for (j = 0; j < 8; j++) {
269 for (
i = 0;
i < 512;
i++) {
274 for (
i = 0;
i < 9;
i++) {
275 for (j = 0; j <
AUBANDS; j++) {
276 for (k = 0; k < 256; k++) {
284 for (
i = 0;
i < 256;
i++) {
286 c->cb_to_add[
i] = (
int32_t)(100 * log10(add));
288 for (j = 0; j < 8; j++) {
290 for (
i = 0;
i < 512;
i++) {
292 accum += reconst * cos(2 *
M_PI * (
i + 0.5 - 256) * (j + 0.5) / 512);
294 c->band_spectrum_tab[0][j] = (
int32_t)(200 * log10(accum));
296 for (j = 0; j < 8; j++) {
298 for (
i = 0;
i < 512;
i++) {
300 accum += reconst * cos(2 *
M_PI * (
i + 0.5 - 256) * (j + 0.5) / 512);
302 c->band_spectrum_tab[1][j] = (
int32_t)(200 * log10(accum));
320 int ch, subs,
i, k, j;
322 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
326 const int chi =
c->channel_order_tab[
ch];
328 memcpy(hist, &
c->history[
ch][0], 512 *
sizeof(
int32_t));
336 memset(accum, 0, 64 *
sizeof(
int32_t));
338 for (k = 0,
i = hist_start, j = 0;
339 i < 512; k = (k + 1) & 63,
i++, j++)
340 accum[k] +=
mul32(hist[
i],
c->band_interpolation[j]);
341 for (
i = 0;
i < hist_start; k = (k + 1) & 63,
i++, j++)
342 accum[k] +=
mul32(hist[
i],
c->band_interpolation[j]);
344 for (k = 16; k < 32; k++)
345 accum[k] = accum[k] - accum[31 - k];
346 for (k = 32; k < 48; k++)
347 accum[k] = accum[k] + accum[95 - k];
349 for (band = 0; band < 32; band++) {
351 for (
i = 16;
i < 48;
i++) {
352 int s = (2 * band + 1) * (2 * (
i + 16) + 1);
356 c->subband[
ch][band][subs] = ((band + 1) & 2) ? -resp : resp;
360 for (
i = 0;
i < 32;
i++)
361 hist[
i + hist_start] =
input[(subs * 32 +
i) *
c->channels + chi];
363 hist_start = (hist_start + 32) & 511;
371 const int lfech =
lfe_index[
c->channel_config];
377 memcpy(hist, &
c->history[
c->channels - 1][0], 512 *
sizeof(
int32_t));
383 for (
i = hist_start, j = 0;
i < 512;
i++, j++)
384 accum +=
mul32(hist[
i],
c->lfe_fir_64i[j]);
385 for (
i = 0;
i < hist_start;
i++, j++)
386 accum +=
mul32(hist[
i],
c->lfe_fir_64i[j]);
388 c->downsampled_lfe[lfes] = accum;
391 for (
i = 0;
i < 64;
i++)
392 hist[
i + hist_start] =
input[(lfes * 64 +
i) *
c->channels + lfech];
394 hist_start = (hist_start + 64) & 511;
403 for (
i = 1024;
i > 0;
i >>= 1) {
404 if (
c->cb_to_level[
i + res] >=
in)
417 return a +
c->cb_to_add[
a -
b];
427 for (
i = 0;
i < 512;
i++)
431 for (
i = 0;
i < 256;
i++) {
445 const int samplerate_index =
c->samplerate_index;
450 for (j = 0; j < 256; j++)
451 out_cb_unnorm[j] = -2047;
455 for (j = 0; j < 256; j++)
456 denom =
add_cb(
c, denom, power[j] +
c->auf[samplerate_index][
i][j]);
457 for (j = 0; j < 256; j++)
458 out_cb_unnorm[j] =
add_cb(
c, out_cb_unnorm[j],
459 -denom +
c->auf[samplerate_index][
i][j]);
462 for (j = 0; j < 256; j++)
463 out_cb[j] =
add_cb(
c, out_cb[j], -out_cb_unnorm[j] - ca_cb - cs_cb);
476 for (
f = 0;
f < 4;
f++)
479 for (
f = 0;
f < 8;
f++)
480 walk(
c, band, band - 1, 8 * band - 4 +
f,
491 for (
f = 0;
f < 4;
f++)
494 for (
f = 0;
f < 8;
f++)
495 walk(
c, band, band + 1, 8 * band + 4 +
f,
506 if (value < c->band_masking_cb[band1])
507 c->band_masking_cb[band1] =
value;
512 int i, k, band,
ch, ssf;
515 for (
i = 0;
i < 256;
i++)
517 c->masking_curve_cb[ssf][
i] = -2047;
520 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
521 const int chi =
c->channel_order_tab[
ch];
523 for (
i = 0, k = 128 + 256 * ssf; k < 512;
i++, k++)
525 for (k -= 512;
i < 512;
i++, k++)
529 for (
i = 0;
i < 256;
i++) {
533 if (
c->masking_curve_cb[ssf][
i] < m)
534 m =
c->masking_curve_cb[ssf][
i];
535 c->eff_masking_curve_cb[
i] = m;
538 for (band = 0; band < 32; band++) {
539 c->band_masking_cb[band] = 2048;
561 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
562 for (band = 0; band < 32; band++)
578 c->consumed_adpcm_bits = 0;
579 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
580 for (band = 0; band < 32; band++) {
584 if (pred_vq_id >= 0) {
585 c->prediction_mode[
ch][band] = pred_vq_id;
586 c->consumed_adpcm_bits += 12;
587 c->diff_peak_cb[
ch][band] =
find_peak(
c, estimated_diff, 16);
589 c->prediction_mode[
ch][band] = -1;
596 #define USED_1ABITS 1
597 #define USED_26ABITS 4
603 if (
c->bitrate_index == 3)
615 int our_nscale, try_remove;
622 peak =
c->cb_to_level[-peak_cb];
624 for (try_remove = 64; try_remove > 0; try_remove >>= 1) {
631 our_nscale -= try_remove;
634 if (our_nscale >= 125)
647 int32_t diff_peak_cb =
c->diff_peak_cb[
ch][band];
650 &
c->quant[
ch][band]);
656 step_size,
c->adpcm_history[
ch][band],
c->subband[
ch][band],
657 c->adpcm_history[
ch][band] + 4,
c->quantized[
ch][band],
665 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
666 for (band = 0; band < 32; band++)
667 if (
c->prediction_mode[
ch][band] >= 0)
675 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
676 for (band = 0; band < 32; band++) {
677 if (
c->prediction_mode[
ch][band] == -1) {
704 uint32_t t,
bits = 0;
709 if (vlc_bits[
i][0] == 0) {
716 best_sel_bits[
i] = vlc_bits[
i][0];
719 if (best_sel_bits[
i] > vlc_bits[
i][sel] && vlc_bits[
i][sel]) {
720 best_sel_bits[
i] = vlc_bits[
i][sel];
721 best_sel_id[
i] = sel;
726 t = best_sel_bits[
i] + 2;
727 if (t < clc_bits[
i]) {
728 res[
i] = best_sel_id[
i];
748 if (abits[
i] > 12 || abits[
i] == 0) {
771 uint32_t bits_counter = 0;
773 c->consumed_bits = 132 + 333 *
c->fullband_channels;
774 c->consumed_bits +=
c->consumed_adpcm_bits;
776 c->consumed_bits += 72;
779 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
780 for (band = 0; band < 32; band++) {
781 int snr_cb =
c->peak_cb[
ch][band] -
c->band_masking_cb[band] -
noise;
783 if (snr_cb >= 1312) {
784 c->abits[
ch][band] = 26;
786 }
else if (snr_cb >= 222) {
787 c->abits[
ch][band] = 8 +
mul32(snr_cb - 222, 69000000);
789 }
else if (snr_cb >= 0) {
790 c->abits[
ch][band] = 2 +
mul32(snr_cb, 106000000);
792 }
else if (forbid_zero || snr_cb >= -140) {
793 c->abits[
ch][band] = 1;
796 c->abits[
ch][band] = 0;
801 &
c->bit_allocation_sel[
ch]);
807 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
808 for (band = 0; band < 32; band++) {
809 if (
c->prediction_mode[
ch][band] == -1) {
812 &
c->quant[
ch][band]);
821 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
822 for (band = 0; band < 32; band++) {
825 c->quantized[
ch][band],
826 huff_bit_count_accum[
ch][
c->abits[
ch][band] - 1]);
834 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
836 clc_bit_count_accum[
ch],
837 c->quant_index_sel[
ch]);
840 c->consumed_bits += bits_counter;
853 low = high =
c->worst_quantization_noise;
854 if (
c->consumed_bits >
c->frame_bits) {
855 while (
c->consumed_bits >
c->frame_bits) {
865 while (
c->consumed_bits <=
c->frame_bits) {
875 for (down =
snr_fudge >> 1; down; down >>= 1) {
877 if (
c->consumed_bits <=
c->frame_bits)
882 c->worst_quantization_noise = high;
883 if (high >
c->worst_noise_ever)
884 c->worst_noise_ever = high;
891 for (k = 0; k < 512; k++)
892 for (
ch = 0;
ch <
c->channels;
ch++) {
893 const int chi =
c->channel_order_tab[
ch];
895 c->history[
ch][k] =
input[k *
c->channels + chi];
907 for (
ch = 0;
ch <
c->channels;
ch++) {
908 for (band = 0; band < 32; band++) {
910 if (
c->prediction_mode[
ch][band] == -1) {
914 c->quantized[
ch][band]+12, step_size,
927 samples[0] =
c->adpcm_history[
ch][band][0] << 7;
928 samples[1] =
c->adpcm_history[
ch][band][1] << 7;
929 samples[2] =
c->adpcm_history[
ch][band][2] << 7;
930 samples[3] =
c->adpcm_history[
ch][band][3] << 7;
1031 put_bits(&
c->pb, 3,
c->fullband_channels - 1);
1034 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1038 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1042 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1046 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1050 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1054 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1059 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1064 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1073 int i, j, sum,
bits, sel;
1076 sel =
c->quant_index_sel[
ch][
c->abits[
ch][band] - 1];
1080 sel,
c->abits[
ch][band] - 1);
1085 if (
c->abits[
ch][band] <= 7) {
1086 for (
i = 0;
i < 8;
i += 4) {
1088 for (j = 3; j >= 0; j--) {
1090 sum +=
c->quantized[
ch][band][
ss * 8 +
i + j];
1099 for (
i = 0;
i < 8;
i++) {
1107 int i, band,
ss,
ch;
1116 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1118 put_bits(&
c->pb, 1, !(
c->prediction_mode[
ch][band] == -1));
1121 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1123 if (
c->prediction_mode[
ch][band] >= 0)
1127 for (
ch = 0;
ch <
c->fullband_channels;
ch++) {
1128 if (
c->bit_allocation_sel[
ch] == 6) {
1134 c->bit_allocation_sel[
ch]);
1140 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1142 if (
c->abits[
ch][band])
1147 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1149 if (
c->abits[
ch][band])
1160 if (
c->lfe_channel) {
1168 for (
ch = 0;
ch <
c->fullband_channels;
ch++)
1170 if (
c->abits[
ch][band])
1194 if (
c->options.adpcm_mode)
1217 *got_packet_ptr = 1;
1221 #define DCAENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
1229 .
class_name =
"DCA (DTS Coherent Acoustics)",
static void lfe_downsample(DCAEncContext *c, const int32_t *input)
int frame_size
Number of samples per channel in an audio frame.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
int ff_dcaadpcm_do_real(int pred_vq_index, softfloat quant, int32_t scale_factor, int32_t step_size, const int32_t *prev_hist, const int32_t *in, int32_t *next_hist, int32_t *out, int len, int32_t peak)
int32_t * subband[MAX_CHANNELS][DCAENC_SUBBANDS]
int sample_rate
samples per second
#define FFSWAP(type, a, b)
static double cb(void *priv, double x, double y)
static const AVOption options[]
static enum AVSampleFormat sample_fmts[]
static void walk_band_low(DCAEncContext *c, int band, int channel, walk_band_t walk, int32_t *arg)
const uint32_t ff_dca_bit_rates[32]
#define AV_CH_LAYOUT_MONO
av_cold void ff_dcaadpcm_free(DCAADPCMEncContext *s)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
static const softfloat scalefactor_inv[128]
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static const uint16_t erb[]
static const uint8_t lfe_index[7]
static void put_subframe(DCAEncContext *c, int subframe)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
CompressionOptions options
static int32_t get_step_size(DCAEncContext *c, int ch, int band)
const uint32_t ff_dca_lossy_quant[32]
static void calc_lfe_scales(DCAEncContext *c)
int32_t adpcm_history[MAX_CHANNELS][DCAENC_SUBBANDS][DCA_ADPCM_COEFFS *2]
#define fc(width, name, range_min, range_max)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
static void update_band_masking(DCAEncContext *c, int band1, int band2, int f, int32_t spectrum1, int32_t spectrum2, int channel, int32_t *arg)
static int calc_one_scale(DCAEncContext *c, int32_t peak_cb, int abits, softfloat *quant)
static int32_t quantize_value(int32_t value, softfloat quant)
const int32_t * band_interpolation
static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static void put_frame_header(DCAEncContext *c)
DCAADPCMEncContext adpcm_ctx
uint32_t ff_dca_vlc_calc_alloc_bits(int *values, uint8_t n, uint8_t sel)
int32_t history[MAX_CHANNELS][512]
static void calc_masking(DCAEncContext *c, const int32_t *input)
static void adpcm_analysis(DCAEncContext *c)
const float ff_dca_fir_32bands_nonperfect[512]
const uint8_t ff_dca_quant_index_group_size[DCA_CODE_BOOKS]
static void accumulate_huff_bit_consumption(int abits, int32_t *quantized, uint32_t *result)
static int init_quantization_noise(DCAEncContext *c, int noise, int forbid_zero)
const uint32_t ff_dca_quant_levels[32]
#define ss(width, name, subs,...)
int32_t auf[9][AUBANDS][256]
#define AV_CH_LAYOUT_STEREO
static const int bit_consumption[27]
static void walk_band_high(DCAEncContext *c, int band, int channel, walk_band_t walk, int32_t *arg)
static void quantize_adpcm_subband(DCAEncContext *c, int ch, int band)
int32_t quantized[MAX_CHANNELS][DCAENC_SUBBANDS][SUBBAND_SAMPLES]
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_CODEC_CAP_EXPERIMENTAL
Codec is experimental and is thus avoided in favor of non experimental encoders.
static void quantize_adpcm(DCAEncContext *c)
int abits[MAX_CHANNELS][DCAENC_SUBBANDS]
int32_t peak_cb[MAX_CHANNELS][DCAENC_SUBBANDS]
const int32_t * band_spectrum
static double hom(double f)
int32_t eff_masking_curve_cb[256]
int32_t downsampled_lfe[DCA_LFE_SAMPLES]
static uint32_t set_best_abits_code(int abits[DCAENC_SUBBANDS], int bands, int32_t *res)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const float bands[]
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
static void adjust_jnd(DCAEncContext *c, const int32_t in[512], int32_t out_cb[256])
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
const uint32_t ff_dca_lossless_quant[32]
static int32_t mul32(int32_t a, int32_t b)
const float ff_dca_lfe_fir_64[256]
#define AV_COPY128U(d, s)
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
uint32_t ff_dca_vlc_calc_quant_bits(int *values, uint8_t n, uint8_t sel, uint8_t table)
static const softfloat stepsize_inv[27]
#define AV_CH_LAYOUT_5POINT1
int32_t band_masking_cb[32]
int ff_dcaadpcm_subband_analysis(const DCAADPCMEncContext *s, const int32_t *in, int len, int *diff)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_cold int encode_close(AVCodecContext *avctx)
int32_t worst_quantization_noise
int32_t band_interpolation_tab[2][512]
const uint32_t ff_dca_scale_factor_quant7[128]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static void subband_bufer_free(DCAEncContext *c)
softfloat quant[MAX_CHANNELS][DCAENC_SUBBANDS]
void(* walk_band_t)(DCAEncContext *c, int band1, int band2, int f, int32_t spectrum1, int32_t spectrum2, int channel, int32_t *arg)
static int32_t add_cb(DCAEncContext *c, int32_t a, int32_t b)
#define DCA_BITALLOC_12_COUNT
static int encode_init(AVCodecContext *avctx)
static void fill_in_adpcm_bufer(DCAEncContext *c)
#define DCA_MAX_FRAME_SIZE
static void quantize_pcm(DCAEncContext *c)
const char const char void * val
int32_t masking_curve_cb[SUBSUBFRAMES][256]
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static void put_primary_audio_header(DCAEncContext *c)
int32_t quant_index_sel[MAX_CHANNELS][DCA_CODE_BOOKS]
int channels
number of audio channels
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
#define AV_CH_LAYOUT_5POINT0
static void find_peaks(DCAEncContext *c)
const uint8_t ff_dca_quant_index_sel_nbits[DCA_CODE_BOOKS]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
void ff_dca_vlc_enc_quant(PutBitContext *pb, int *values, uint8_t n, uint8_t sel, uint8_t table)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int put_bits_count(PutBitContext *s)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
AVSampleFormat
Audio sample formats.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Name of the codec implementation.
static int32_t norm__(int64_t a, int bits)
static const int8_t channel_reorder_nolfe[7][5]
static const int snr_fudge
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static void subband_transform(DCAEncContext *c, const int32_t *input)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int consumed_adpcm_bits
Number of bits to transmit ADPCM related info.
static const int8_t channel_reorder_lfe[7][5]
static void ff_dca_core_dequantize(int32_t *output, const int32_t *input, int32_t step_size, int32_t scale, int residual, int len)
av_cold int ff_dcaadpcm_init(DCAADPCMEncContext *s)
main external API structure.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
static int noise(AVBSFContext *ctx, AVPacket *pkt)
Filter the word “frame” indicates either a video frame or a group of audio samples
static int subband_bufer_alloc(DCAEncContext *c)
static void assign_bits(DCAEncContext *c)
static int32_t get_cb(DCAEncContext *c, int32_t in)
static const uint8_t bitstream_sfreq[]
static int32_t find_peak(DCAEncContext *c, const int32_t *in, int len)
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
const float ff_dca_fir_32bands_perfect[512]
static const AVCodecDefault defaults[]
static void shift_history(DCAEncContext *c, const int32_t *input)
static uint32_t set_best_code(uint32_t vlc_bits[DCA_CODE_BOOKS][7], uint32_t clc_bits[DCA_CODE_BOOKS], int32_t res[DCA_CODE_BOOKS])
int32_t prediction_mode[MAX_CHANNELS][DCAENC_SUBBANDS]
void ff_dca_vlc_enc_alloc(PutBitContext *pb, int *values, uint8_t n, uint8_t sel)
static const double coeff[2][5]
#define LOCAL_ALIGNED_32(t, v,...)
int32_t cb_to_level[2048]
static void put_subframe_samples(DCAEncContext *c, int ss, int band, int ch)
int32_t bit_allocation_sel[MAX_CHANNELS]
int32_t band_spectrum_tab[2][8]
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static void calc_power(DCAEncContext *c, const int32_t in[2 *256], int32_t power[256])
@ AV_SAMPLE_FMT_S32
signed 32 bits
int32_t diff_peak_cb[MAX_CHANNELS][DCAENC_SUBBANDS]
expected peak of residual signal
static const AVClass dcaenc_class
int scale_factor[MAX_CHANNELS][DCAENC_SUBBANDS]
static double gammafilter(int i, double f)
const int8_t * channel_order_tab
channel reordering table, lfe and non lfe