50 #define BACKSTEP_SIZE 512
52 #define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
110 #define SCALE_GEN(v) \
111 { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
126 g->region_size[2] = 576 / 2;
127 for (
i = 0;
i < 3;
i++) {
128 k =
FFMIN(
g->region_size[
i],
g->big_values);
129 g->region_size[
i] = k - j;
136 if (
g->block_type == 2) {
137 if (
s->sample_rate_index != 8)
138 g->region_size[0] = (36 / 2);
140 g->region_size[0] = (72 / 2);
142 if (
s->sample_rate_index <= 2)
143 g->region_size[0] = (36 / 2);
144 else if (
s->sample_rate_index != 8)
145 g->region_size[0] = (54 / 2);
147 g->region_size[0] = (108 / 2);
149 g->region_size[1] = (576 / 2);
158 l =
FFMIN(ra1 + ra2 + 2, 22);
164 if (
g->block_type == 2) {
165 if (
g->switch_point) {
166 if(
s->sample_rate_index == 8)
171 if (
s->sample_rate_index <= 2)
189 static inline int l1_unscale(
int n,
int mant,
int scale_factor)
233 m = (m + ((1U << e) >> 1)) >> e;
243 for (
i = 0;
i < 15;
i++) {
246 norm = ((INT64_C(1) << n) *
FRAC_ONE) / ((1 << n) - 1);
250 ff_dlog(
NULL,
"%d: norm=%x s=%"PRIx32
" %"PRIx32
" %"PRIx32
"\n",
i,
261 for (
i = 0;
i < 16;
i++) {
265 for (j = 0; j < 2; j++) {
266 e = -(j + 1) * ((
i + 1) >> 1);
276 RENAME(ff_mpa_synth_init)();
315 #define C3 FIXHR(0.86602540378443864676/2)
316 #define C4 FIXHR(0.70710678118654752439/2) //0.5 / cos(pi*(9)/36)
317 #define C5 FIXHR(0.51763809020504152469/2) //0.5 / cos(pi*(5)/36)
318 #define C6 FIXHR(1.93185165257813657349/4) //0.5 / cos(pi*(15)/36)
327 in1 = in[1*3] + in[0*3];
328 in2 = in[2*3] + in[1*3];
329 in3 = in[3*3] + in[2*3];
330 in4 = in[4*3] + in[3*3];
331 in5 = in[5*3] + in[4*3];
367 int sec_byte_len = sec_len >> 3;
368 int sec_rem_bits = sec_len & 7;
371 uint32_t crc_val =
av_crc(crc_tab, UINT16_MAX, &buf[2], 2);
372 crc_val =
av_crc(crc_tab, crc_val, &buf[6], sec_byte_len);
375 ((buf[6 + sec_byte_len] & (0xFF00 >> sec_rem_bits)) << 24) +
376 ((
s->crc << 16) >> sec_rem_bits));
378 crc_val =
av_crc(crc_tab, crc_val, tmp_buf, 3);
392 int bound,
i, v, n, ch, j, mant;
402 bound = (
s->mode_ext + 1) * 4;
408 for (ch = 0; ch <
s->nb_channels; ch++) {
417 for (ch = 0; ch <
s->nb_channels; ch++) {
418 if (allocation[ch][
i])
423 if (allocation[0][
i]) {
430 for (j = 0; j < 12; j++) {
432 for (ch = 0; ch <
s->nb_channels; ch++) {
433 n = allocation[ch][
i];
440 s->sb_samples[ch][j][
i] = v;
444 n = allocation[0][
i];
448 s->sb_samples[0][j][
i] = v;
450 s->sb_samples[1][j][
i] = v;
452 s->sb_samples[0][j][
i] = 0;
453 s->sb_samples[1][j][
i] = 0;
473 s->sample_rate,
s->lsf);
478 bound = (
s->mode_ext + 1) * 4;
492 for (ch = 0; ch <
s->nb_channels; ch++)
494 j += 1 << bit_alloc_bits;
501 j += 1 << bit_alloc_bits;
505 for (
i = 0;
i < sblimit;
i++) {
506 for (ch = 0; ch <
s->nb_channels; ch++) {
517 for (
i = 0;
i < sblimit;
i++) {
518 for (ch = 0; ch <
s->nb_channels; ch++) {
520 sf = scale_factors[ch][
i];
521 switch (scale_code[ch][
i]) {
549 for (k = 0; k < 3; k++) {
550 for (l = 0; l < 12; l += 3) {
554 for (ch = 0; ch <
s->nb_channels; ch++) {
557 scale = scale_factors[ch][
i][k];
567 s->sb_samples[ch][k * 12 + l + 0][
i] =
569 s->sb_samples[ch][k * 12 + l + 1][
i] =
571 s->sb_samples[ch][k * 12 + l + 2][
i] =
574 for (m = 0; m < 3; m++) {
577 s->sb_samples[ch][k * 12 + l + m][
i] = v;
581 s->sb_samples[ch][k * 12 + l + 0][
i] = 0;
582 s->sb_samples[ch][k * 12 + l + 1][
i] = 0;
583 s->sb_samples[ch][k * 12 + l + 2][
i] = 0;
587 j += 1 << bit_alloc_bits;
594 int mant, scale0, scale1;
595 scale0 = scale_factors[0][
i][k];
596 scale1 = scale_factors[1][
i][k];
605 s->sb_samples[0][k * 12 + l + 0][
i] =
607 s->sb_samples[1][k * 12 + l + 0][
i] =
611 s->sb_samples[0][k * 12 + l + 1][
i] =
613 s->sb_samples[1][k * 12 + l + 1][
i] =
615 s->sb_samples[0][k * 12 + l + 2][
i] =
617 s->sb_samples[1][k * 12 + l + 2][
i] =
620 for (m = 0; m < 3; m++) {
622 s->sb_samples[0][k * 12 + l + m][
i] =
624 s->sb_samples[1][k * 12 + l + m][
i] =
629 s->sb_samples[0][k * 12 + l + 0][
i] = 0;
630 s->sb_samples[0][k * 12 + l + 1][
i] = 0;
631 s->sb_samples[0][k * 12 + l + 2][
i] = 0;
632 s->sb_samples[1][k * 12 + l + 0][
i] = 0;
633 s->sb_samples[1][k * 12 + l + 1][
i] = 0;
634 s->sb_samples[1][k * 12 + l + 2][
i] = 0;
637 j += 1 << bit_alloc_bits;
641 for (ch = 0; ch <
s->nb_channels; ch++) {
642 s->sb_samples[ch][k * 12 + l + 0][
i] = 0;
643 s->sb_samples[ch][k * 12 + l + 1][
i] = 0;
644 s->sb_samples[ch][k * 12 + l + 2][
i] = 0;
652 #define SPLIT(dst,sf,n) \
654 int m = (sf * 171) >> 9; \
657 } else if (n == 4) { \
660 } else if (n == 5) { \
661 int m = (sf * 205) >> 10; \
664 } else if (n == 6) { \
665 int m = (sf * 171) >> 10; \
675 SPLIT(slen[3], sf, n3)
676 SPLIT(slen[2], sf, n2)
677 SPLIT(slen[1], sf, n1)
684 const uint8_t *bstab, *pretab;
689 gain =
g->global_gain - 210;
690 shift =
g->scalefac_scale + 1;
694 for (
i = 0;
i <
g->long_end;
i++) {
695 v0 = gain - ((
g->scale_factors[
i] + pretab[
i]) <<
shift) + 400;
697 for (j =
len; j > 0; j--)
701 if (
g->short_start < 13) {
703 gains[0] = gain - (
g->subblock_gain[0] << 3);
704 gains[1] = gain - (
g->subblock_gain[1] << 3);
705 gains[2] = gain - (
g->subblock_gain[2] << 3);
707 for (
i =
g->short_start;
i < 13;
i++) {
709 for (l = 0; l < 3; l++) {
710 v0 = gains[l] - (
g->scale_factors[k++] <<
shift) + 400;
711 for (j =
len; j > 0; j--)
721 if (
s->in_gb.buffer && *
pos >=
s->gb.size_in_bits -
s->extrasize * 8) {
723 s->in_gb.buffer =
NULL;
740 #define READ_FLIP_SIGN(dst,src) \
741 v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
744 #define READ_FLIP_SIGN(dst,src) \
745 v = -get_bits1(&s->gb); \
746 *(dst) = (*(src) ^ v) - v;
750 int16_t *exponents,
int end_pos2)
754 int last_pos, bits_left;
756 int end_pos =
FFMIN(end_pos2,
s->gb.size_in_bits -
s->extrasize * 8);
760 for (
i = 0;
i < 3;
i++) {
761 int j, k, l, linbits;
762 j =
g->region_size[
i];
766 k =
g->table_select[
i];
772 memset(&
g->sb_hybrid[s_index], 0,
sizeof(*
g->sb_hybrid) * 2 * j);
791 g->sb_hybrid[s_index ] =
792 g->sb_hybrid[s_index + 1] = 0;
797 exponent= exponents[s_index];
799 ff_dlog(
s->avctx,
"region=%d n=%d y=%d exp=%d\n",
800 i,
g->region_size[
i] - j, y, exponent);
811 g->sb_hybrid[s_index] = v;
820 g->sb_hybrid[s_index + 1] = v;
833 g->sb_hybrid[s_index+!!y] = v;
835 g->sb_hybrid[s_index + !y] = 0;
844 while (s_index <= 572) {
847 if (
pos >= end_pos) {
848 if (
pos > end_pos2 && last_pos) {
865 ff_dlog(
s->avctx,
"t=%d code=%d\n",
g->count1table_select,
code);
866 g->sb_hybrid[s_index + 0] =
867 g->sb_hybrid[s_index + 1] =
868 g->sb_hybrid[s_index + 2] =
869 g->sb_hybrid[s_index + 3] = 0;
871 static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
873 int pos = s_index + idxtab[
code];
888 memset(&
g->sb_hybrid[s_index], 0,
sizeof(*
g->sb_hybrid) * (576 - s_index));
906 if (
g->block_type != 2)
909 if (
g->switch_point) {
910 if (
s->sample_rate_index != 8)
911 ptr =
g->sb_hybrid + 36;
913 ptr =
g->sb_hybrid + 72;
918 for (
i =
g->short_start;
i < 13;
i++) {
922 for (j =
len; j > 0; j--) {
929 memcpy(ptr1,
tmp,
len * 3 *
sizeof(*ptr1));
933 #define ISQRT2 FIXR(0.70710678118654752440)
938 int sf_max, sf,
len, non_zero_found;
942 int non_zero_found_short[3];
957 non_zero_found_short[0] = 0;
958 non_zero_found_short[1] = 0;
959 non_zero_found_short[2] = 0;
966 for (l = 2; l >= 0; l--) {
969 if (!non_zero_found_short[l]) {
971 for (j = 0; j <
len; j++) {
973 non_zero_found_short[l] = 1;
983 for (j = 0; j <
len; j++) {
993 for (j = 0; j <
len; j++) {
1004 non_zero_found = non_zero_found_short[0] |
1005 non_zero_found_short[1] |
1006 non_zero_found_short[2];
1013 if (!non_zero_found) {
1014 for (j = 0; j <
len; j++) {
1021 k = (
i == 21) ? 20 :
i;
1027 for (j = 0; j <
len; j++) {
1037 for (j = 0; j <
len; j++) {
1055 for (
i = 0;
i < 576;
i++) {
1058 tab0[
i] = tmp0 + tmp1;
1059 tab1[
i] = tmp0 - tmp1;
1075 #ifndef compute_antialias
1077 #define AA(j) do { \
1078 float tmp0 = ptr[-1-j]; \
1079 float tmp1 = ptr[ j]; \
1080 ptr[-1-j] = tmp0 * csa_table[j][0] - tmp1 * csa_table[j][1]; \
1081 ptr[ j] = tmp0 * csa_table[j][1] + tmp1 * csa_table[j][0]; \
1084 #define AA(j) do { \
1085 SUINT tmp0 = ptr[-1-j]; \
1086 SUINT tmp1 = ptr[ j]; \
1087 SUINT tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
1088 ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
1089 ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
1099 if (
g->block_type == 2) {
1100 if (!
g->switch_point)
1108 ptr =
g->sb_hybrid + 18;
1109 for (
i = n;
i > 0;
i--) {
1129 int i, j, mdct_long_end, sblimit;
1132 ptr =
g->sb_hybrid + 576;
1133 ptr1 =
g->sb_hybrid + 2 * 18;
1134 while (ptr >= ptr1) {
1138 if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
1141 sblimit = ((ptr -
g->sb_hybrid) / 18) + 1;
1143 if (
g->block_type == 2) {
1145 if (
g->switch_point)
1150 mdct_long_end = sblimit;
1153 s->mpadsp.RENAME(imdct36_blocks)(sb_samples, mdct_buf,
g->sb_hybrid,
1154 mdct_long_end,
g->switch_point,
1157 buf = mdct_buf + 4*18*(mdct_long_end >> 2) + (mdct_long_end & 3);
1158 ptr =
g->sb_hybrid + 18 * mdct_long_end;
1160 for (j = mdct_long_end; j < sblimit; j++) {
1162 win =
RENAME(ff_mdct_win)[2 + (4 & -(j & 1))];
1163 out_ptr = sb_samples + j;
1165 for (
i = 0;
i < 6;
i++) {
1166 *out_ptr = buf[4*
i];
1170 for (
i = 0;
i < 6;
i++) {
1171 *out_ptr =
MULH3(out2[
i ],
win[
i ], 1) + buf[4*(
i + 6*1)];
1172 buf[4*(
i + 6*2)] =
MULH3(out2[
i + 6],
win[
i + 6], 1);
1176 for (
i = 0;
i < 6;
i++) {
1177 *out_ptr =
MULH3(out2[
i ],
win[
i ], 1) + buf[4*(
i + 6*2)];
1178 buf[4*(
i + 6*0)] =
MULH3(out2[
i + 6],
win[
i + 6], 1);
1182 for (
i = 0;
i < 6;
i++) {
1183 buf[4*(
i + 6*0)] =
MULH3(out2[
i ],
win[
i ], 1) + buf[4*(
i + 6*0)];
1184 buf[4*(
i + 6*1)] =
MULH3(out2[
i + 6],
win[
i + 6], 1);
1185 buf[4*(
i + 6*2)] = 0;
1188 buf += (j&3) != 3 ? 1 : (4*18-3);
1191 for (j = sblimit; j <
SBLIMIT; j++) {
1193 out_ptr = sb_samples + j;
1194 for (
i = 0;
i < 18;
i++) {
1195 *out_ptr = buf[4*
i];
1199 buf += (j&3) != 3 ? 1 : (4*18-3);
1206 int nb_granules, main_data_begin;
1207 int gr, ch, blocksplit_flag,
i, j, k, n, bits_pos;
1209 int16_t exponents[576];
1221 if (
s->nb_channels == 2)
1226 for (ch = 0; ch <
s->nb_channels; ch++) {
1227 s->granules[ch][0].scfsi = 0;
1228 s->granules[ch][1].scfsi =
get_bits(&
s->gb, 4);
1234 for (gr = 0; gr < nb_granules; gr++) {
1235 for (ch = 0; ch <
s->nb_channels; ch++) {
1236 ff_dlog(
s->avctx,
"gr=%d ch=%d: side_info\n", gr, ch);
1237 g = &
s->granules[ch][gr];
1240 if (
g->big_values > 288) {
1250 g->global_gain -= 2;
1256 if (blocksplit_flag) {
1258 if (
g->block_type == 0) {
1263 for (
i = 0;
i < 2;
i++)
1265 for (
i = 0;
i < 3;
i++)
1269 int region_address1, region_address2;
1271 g->switch_point = 0;
1272 for (
i = 0;
i < 3;
i++)
1277 ff_dlog(
s->avctx,
"region1=%d region2=%d\n",
1278 region_address1, region_address2);
1289 ff_dlog(
s->avctx,
"block_type=%d switch_point=%d\n",
1290 g->block_type,
g->switch_point);
1301 ff_dlog(
s->avctx,
"seekback:%d, lastbuf:%d\n",
1302 main_data_begin,
s->last_buf_size);
1304 memcpy(
s->last_buf +
s->last_buf_size, ptr,
s->extrasize);
1307 s->last_buf_size <<= 3;
1308 for (gr = 0; gr < nb_granules && (
s->last_buf_size >> 3) < main_data_begin; gr++) {
1309 for (ch = 0; ch <
s->nb_channels; ch++) {
1310 g = &
s->granules[ch][gr];
1311 s->last_buf_size +=
g->part2_3_length;
1312 memset(
g->sb_hybrid, 0,
sizeof(
g->sb_hybrid));
1316 skip =
s->last_buf_size - 8 * main_data_begin;
1317 if (skip >=
s->gb.size_in_bits -
s->extrasize * 8 &&
s->in_gb.buffer) {
1320 s->in_gb.buffer =
NULL;
1330 for (; gr < nb_granules; gr++) {
1331 for (ch = 0; ch <
s->nb_channels; ch++) {
1332 g = &
s->granules[ch][gr];
1337 int slen, slen1, slen2;
1342 ff_dlog(
s->avctx,
"slen1=%d slen2=%d\n", slen1, slen2);
1343 if (
g->block_type == 2) {
1344 n =
g->switch_point ? 17 : 18;
1347 for (
i = 0;
i < n;
i++)
1348 g->scale_factors[j++] =
get_bits(&
s->gb, slen1);
1350 for (
i = 0;
i < n;
i++)
1351 g->scale_factors[j++] = 0;
1354 for (
i = 0;
i < 18;
i++)
1355 g->scale_factors[j++] =
get_bits(&
s->gb, slen2);
1356 for (
i = 0;
i < 3;
i++)
1357 g->scale_factors[j++] = 0;
1359 for (
i = 0;
i < 21;
i++)
1360 g->scale_factors[j++] = 0;
1363 sc =
s->granules[ch][0].scale_factors;
1365 for (k = 0; k < 4; k++) {
1367 if ((
g->scfsi & (0x8 >> k)) == 0) {
1368 slen = (k < 2) ? slen1 : slen2;
1370 for (
i = 0;
i < n;
i++)
1371 g->scale_factors[j++] =
get_bits(&
s->gb, slen);
1373 for (
i = 0;
i < n;
i++)
1374 g->scale_factors[j++] = 0;
1378 for (
i = 0;
i < n;
i++) {
1379 g->scale_factors[j] = sc[j];
1384 g->scale_factors[j++] = 0;
1387 int tindex, tindex2, slen[4], sl, sf;
1390 if (
g->block_type == 2)
1391 tindex =
g->switch_point ? 2 : 1;
1395 sf =
g->scalefac_compress;
1402 }
else if (sf < 244) {
1414 }
else if (sf < 500) {
1425 for (k = 0; k < 4; k++) {
1429 for (
i = 0;
i < n;
i++)
1430 g->scale_factors[j++] =
get_bits(&
s->gb, sl);
1432 for (
i = 0;
i < n;
i++)
1433 g->scale_factors[j++] = 0;
1438 g->scale_factors[j] = 0;
1450 for (ch = 0; ch <
s->nb_channels; ch++) {
1451 g = &
s->granules[ch][gr];
1460 return nb_granules * 18;
1464 const uint8_t *buf,
int buf_size)
1466 int i, nb_frames, ch,
ret;
1470 if (
s->error_protection)
1475 s->avctx->frame_size = 384;
1479 s->avctx->frame_size = 1152;
1483 s->avctx->frame_size =
s->lsf ? 576 : 1152;
1488 if (
s->in_gb.buffer) {
1497 s->in_gb.buffer =
NULL;
1509 av_assert1(i <= buf_size - HEADER_SIZE && i >= 0);
1510 memcpy(
s->last_buf +
s->last_buf_size,
s->gb.buffer + buf_size -
HEADER_SIZE -
i,
i);
1511 s->last_buf_size +=
i;
1520 s->frame->nb_samples =
s->avctx->frame_size;
1527 for (ch = 0; ch <
s->nb_channels; ch++) {
1533 samples_ptr =
samples[0] + ch;
1534 sample_stride =
s->nb_channels;
1536 for (
i = 0;
i < nb_frames;
i++) {
1537 RENAME(ff_mpa_synth_filter)(&
s->mpadsp,
s->synth_buf[ch],
1538 &(
s->synth_buf_offset[ch]),
1539 RENAME(ff_mpa_synth_window),
1540 &
s->dither_state, samples_ptr,
1541 sample_stride,
s->sb_samples[ch][
i]);
1542 samples_ptr += 32 * sample_stride;
1546 return nb_frames * 32 *
sizeof(
OUT_INT) *
s->nb_channels;
1552 const uint8_t *buf = avpkt->
data;
1553 int buf_size = avpkt->
size;
1559 while(buf_size && !*buf){
1571 return buf_size + skipped;
1577 }
else if (
ret == 1) {
1588 if (
s->frame_size <= 0) {
1591 }
else if (
s->frame_size < buf_size) {
1593 buf_size=
s->frame_size;
1616 return buf_size + skipped;
1621 memset(
ctx->synth_buf, 0,
sizeof(
ctx->synth_buf));
1622 memset(
ctx->mdct_buf, 0,
sizeof(
ctx->mdct_buf));
1623 ctx->last_buf_size = 0;
1624 ctx->dither_state = 0;
1632 #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
1634 int *got_frame_ptr,
AVPacket *avpkt)
1636 const uint8_t *buf = avpkt->
data;
1637 int buf_size = avpkt->
size;
1669 s->frame_size =
len;
1685 #if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
1690 typedef struct MP3On4DecodeContext {
1693 const uint8_t *coff;
1695 } MP3On4DecodeContext;
1702 static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
1705 static const uint8_t chan_offset[8][5] = {
1717 static const int16_t chan_layout[8] = {
1733 for (
i = 0;
i <
s->frames;
i++)
1763 s->syncword = 0xffe00000;
1765 s->syncword = 0xfff00000;
1774 if (!
s->mp3decctx[0])
1783 s->mp3decctx[0]->adu_mode = 1;
1788 for (
i = 1;
i <
s->frames;
i++) {
1790 if (!
s->mp3decctx[
i])
1792 s->mp3decctx[
i]->adu_mode = 1;
1793 s->mp3decctx[
i]->avctx = avctx;
1794 s->mp3decctx[
i]->mpadsp =
s->mp3decctx[0]->mpadsp;
1795 s->mp3decctx[
i]->butterflies_float =
s->mp3decctx[0]->butterflies_float;
1807 for (
i = 0;
i <
s->frames;
i++)
1813 int *got_frame_ptr,
AVPacket *avpkt)
1816 const uint8_t *buf = avpkt->
data;
1817 int buf_size = avpkt->
size;
1839 for (fr = 0; fr <
s->frames; fr++) {
1842 m =
s->mp3decctx[fr];
1857 if (ch + m->nb_channels > avctx->
channels ||
1858 s->coff[fr] + m->nb_channels > avctx->
channels) {
1863 ch += m->nb_channels;
1865 outptr[0] = out_samples[
s->coff[fr]];
1866 if (m->nb_channels > 1)
1867 outptr[1] = out_samples[
s->coff[fr] + 1];
1872 if (m->nb_channels > 1)