Go to the documentation of this file.
113 #define WMAPRO_MAX_CHANNELS 8
114 #define MAX_SUBFRAMES 32
116 #define MAX_FRAMESIZE 32768
117 #define XMA_MAX_STREAMS 8
118 #define XMA_MAX_CHANNELS_STREAM 2
119 #define XMA_MAX_CHANNELS (XMA_MAX_STREAMS * XMA_MAX_CHANNELS_STREAM)
121 #define WMAPRO_BLOCK_MIN_BITS 6
122 #define WMAPRO_BLOCK_MAX_BITS 13
123 #define WMAPRO_BLOCK_MIN_SIZE (1 << WMAPRO_BLOCK_MIN_BITS)
124 #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS)
125 #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1)
129 #define SCALEVLCBITS 8
130 #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
131 #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
132 #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
133 #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
134 #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
269 #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
270 #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %"PRIx32"\n", a, b);
272 PRINT(
"ed sample bit depth",
s->bits_per_sample);
273 PRINT_HEX(
"ed decode flags",
s->decode_flags);
274 PRINT(
"samples per frame",
s->samples_per_frame);
275 PRINT(
"log2 frame size",
s->log2_frame_size);
276 PRINT(
"max num subframes",
s->max_num_subframes);
277 PRINT(
"len prefix",
s->len_prefix);
278 PRINT(
"num channels",
s->nb_channels);
324 static VLCElem vlc_buf[2108 + 3912];
352 for (
int i = 0;
i < 33;
i++)
368 unsigned int channel_mask;
370 int log2_max_num_subframes;
371 int num_possible_block_sizes;
386 s->decode_flags = 0x10d6;
387 s->bits_per_sample = 16;
394 s->decode_flags = 0x10d6;
395 s->bits_per_sample = 16;
397 s->nb_channels = edata_ptr[32 + ((edata_ptr[0]==3)?0:8) + 4*num_stream + 0];
399 s->decode_flags = 0x10d6;
400 s->bits_per_sample = 16;
402 s->nb_channels = edata_ptr[8 + 20*num_stream + 17];
404 s->decode_flags =
AV_RL16(edata_ptr+14);
405 channel_mask =
AV_RL32(edata_ptr+2);
406 s->bits_per_sample =
AV_RL16(edata_ptr);
409 if (
s->bits_per_sample > 32 ||
s->bits_per_sample < 1) {
420 if (
s->log2_frame_size > 25) {
429 s->len_prefix = (
s->decode_flags & 0x40);
438 s->samples_per_frame = 1 <<
bits;
440 s->samples_per_frame = 512;
444 log2_max_num_subframes = ((
s->decode_flags & 0x38) >> 3);
445 s->max_num_subframes = 1 << log2_max_num_subframes;
446 if (
s->max_num_subframes == 16 ||
s->max_num_subframes == 4)
447 s->max_subframe_len_bit = 1;
448 s->subframe_len_bits =
av_log2(log2_max_num_subframes) + 1;
450 num_possible_block_sizes = log2_max_num_subframes + 1;
451 s->min_samples_per_subframe =
s->samples_per_frame /
s->max_num_subframes;
452 s->dynamic_range_compression = (
s->decode_flags & 0x80);
456 s->max_num_subframes);
462 s->min_samples_per_subframe);
466 if (
s->nb_channels <= 0) {
481 for (
i = 0;
i <
s->nb_channels;
i++)
482 s->channel[
i].prev_block_len =
s->samples_per_frame;
487 if (channel_mask & 8) {
490 if (channel_mask &
mask)
497 for (
i = 0;
i < num_possible_block_sizes;
i++) {
498 int subframe_len =
s->samples_per_frame >>
i;
503 s->sfb_offsets[
i][0] = 0;
505 for (x = 0; x <
MAX_BANDS-1 &&
s->sfb_offsets[
i][band - 1] < subframe_len; x++) {
508 if (
offset >
s->sfb_offsets[
i][band - 1])
511 if (
offset >= subframe_len)
514 s->sfb_offsets[
i][band - 1] = subframe_len;
515 s->num_sfb[
i] = band - 1;
516 if (
s->num_sfb[
i] <= 0) {
528 for (
i = 0;
i < num_possible_block_sizes;
i++) {
530 for (
b = 0;
b <
s->num_sfb[
i];
b++) {
533 +
s->sfb_offsets[
i][
b + 1] - 1) <<
i) >> 1;
534 for (x = 0; x < num_possible_block_sizes; x++) {
536 while (
s->sfb_offsets[x][v + 1] << x <
offset) {
540 s->sf_offsets[
i][x][
b] = v;
552 / (1ll << (
s->bits_per_sample - 1));
566 for (
i = 0;
i < num_possible_block_sizes;
i++) {
567 int block_size =
s->samples_per_frame >>
i;
568 int cutoff = (440*block_size + 3LL * (
s->avctx->sample_rate >> 1) - 1)
569 /
s->avctx->sample_rate;
570 s->subwoofer_cutoffs[
i] =
av_clip(cutoff, 4, block_size);
614 int frame_len_shift = 0;
618 if (
offset ==
s->samples_per_frame -
s->min_samples_per_subframe)
619 return s->min_samples_per_subframe;
625 if (
s->max_subframe_len_bit) {
627 frame_len_shift = 1 +
get_bits(&
s->gb,
s->subframe_len_bits-1);
629 frame_len_shift =
get_bits(&
s->gb,
s->subframe_len_bits);
631 subframe_len =
s->samples_per_frame >> frame_len_shift;
634 if (subframe_len < s->min_samples_per_subframe ||
635 subframe_len >
s->samples_per_frame) {
667 int channels_for_cur_subframe =
s->nb_channels;
668 int fixed_channel_layout = 0;
669 int min_channel_len = 0;
679 for (
c = 0;
c <
s->nb_channels;
c++)
680 s->channel[
c].num_subframes = 0;
683 fixed_channel_layout = 1;
690 for (
c = 0;
c <
s->nb_channels;
c++) {
691 if (num_samples[
c] == min_channel_len) {
692 if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
693 (min_channel_len ==
s->samples_per_frame -
s->min_samples_per_subframe))
694 contains_subframe[
c] = 1;
698 contains_subframe[
c] = 0;
706 min_channel_len += subframe_len;
707 for (
c = 0;
c <
s->nb_channels;
c++) {
710 if (contains_subframe[
c]) {
713 "broken frame: num subframes > 31\n");
717 num_samples[
c] += subframe_len;
719 if (num_samples[
c] >
s->samples_per_frame) {
721 "channel len > samples_per_frame\n");
724 }
else if (num_samples[
c] <= min_channel_len) {
725 if (num_samples[
c] < min_channel_len) {
726 channels_for_cur_subframe = 0;
727 min_channel_len = num_samples[
c];
729 ++channels_for_cur_subframe;
732 }
while (min_channel_len < s->samples_per_frame);
734 for (
c = 0;
c <
s->nb_channels;
c++) {
737 for (
i = 0;
i <
s->channel[
c].num_subframes;
i++) {
738 ff_dlog(
s->avctx,
"frame[%"PRIu32
"] channel[%i] subframe[%i]"
739 " len %i\n",
s->frame_num,
c,
i,
740 s->channel[
c].subframe_len[
i]);
741 s->channel[
c].subframe_offset[
i] =
offset;
772 for (x = 0; x <
i; x++) {
774 for (y = 0; y <
i + 1; y++) {
777 int n = rotation_offset[
offset + x];
783 cosv =
sin64[32 - n];
785 sinv =
sin64[64 - n];
786 cosv = -
sin64[n - 32];
790 (v1 * sinv) - (v2 * cosv);
792 (v1 * cosv) + (v2 * sinv);
814 if (
s->nb_channels > 1) {
815 int remaining_channels =
s->channels_for_cur_subframe;
819 "Channel transform bit");
823 for (
s->num_chgroups = 0; remaining_channels &&
824 s->num_chgroups <
s->channels_for_cur_subframe;
s->num_chgroups++) {
831 if (remaining_channels > 2) {
832 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
833 int channel_idx =
s->channel_indexes_for_cur_subframe[
i];
834 if (!
s->channel[channel_idx].grouped
837 s->channel[channel_idx].grouped = 1;
838 *channel_data++ =
s->channel[channel_idx].coeffs;
843 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
844 int channel_idx =
s->channel_indexes_for_cur_subframe[
i];
845 if (!
s->channel[channel_idx].grouped)
846 *channel_data++ =
s->channel[channel_idx].coeffs;
847 s->channel[channel_idx].grouped = 1;
856 "Unknown channel transform type");
861 if (
s->nb_channels == 2) {
883 "Coupled channels > 6");
899 for (
i = 0;
i <
s->num_bands;
i++) {
923 static const uint32_t fval_tab[16] = {
924 0x00000000, 0x3f800000, 0x40000000, 0x40400000,
925 0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
926 0x41000000, 0x41100000, 0x41200000, 0x41300000,
927 0x41400000, 0x41500000, 0x41600000, 0x41700000,
938 ff_dlog(
s->avctx,
"decode coefficients for channel %i\n",
c);
953 while ((
s->transmit_num_vec_coeffs || !rl_mode) &&
962 for (
i = 0;
i < 4;
i += 2) {
975 vals[
i] = fval_tab[idx >> 4 ];
976 vals[
i+1] = fval_tab[idx & 0xF];
980 vals[0] = fval_tab[ idx >> 12 ];
981 vals[1] = fval_tab[(idx >> 8) & 0xF];
982 vals[2] = fval_tab[(idx >> 4) & 0xF];
983 vals[3] = fval_tab[ idx & 0xF];
987 for (
i = 0;
i < 4;
i++) {
993 ci->
coeffs[cur_coeff] = 0;
996 rl_mode |= (++num_zeros >
s->subframe_len >> 8);
1003 if (cur_coeff < s->subframe_len) {
1006 memset(&ci->
coeffs[cur_coeff], 0,
1007 sizeof(*ci->
coeffs) * (
s->subframe_len - cur_coeff));
1010 cur_coeff,
s->subframe_len,
1011 s->subframe_len,
s->esc_len, 0);
1032 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1033 int c =
s->channel_indexes_for_cur_subframe[
i];
1036 s->channel[
c].scale_factors =
s->channel[
c].saved_scale_factors[!
s->channel[
c].scale_factor_idx];
1037 sf_end =
s->channel[
c].scale_factors +
s->num_bands;
1044 if (
s->channel[
c].reuse_sf) {
1045 const int8_t* sf_offsets =
s->sf_offsets[
s->table_idx][
s->channel[
c].table_idx];
1047 for (
b = 0;
b <
s->num_bands;
b++)
1048 s->channel[
c].scale_factors[
b] =
1049 s->channel[
c].saved_scale_factors[
s->channel[
c].scale_factor_idx][*sf_offsets++];
1052 if (!
s->channel[
c].cur_subframe ||
get_bits1(&
s->gb)) {
1054 if (!
s->channel[
c].reuse_sf) {
1057 s->channel[
c].scale_factor_step =
get_bits(&
s->gb, 2) + 1;
1058 val = 45 /
s->channel[
c].scale_factor_step;
1059 for (sf =
s->channel[
c].scale_factors; sf < sf_end; sf++) {
1066 for (
i = 0;
i <
s->num_bands;
i++) {
1077 sign = (
code & 1) - 1;
1079 }
else if (idx == 1) {
1088 if (
i >=
s->num_bands) {
1090 "invalid scale factor coding\n");
1093 s->channel[
c].scale_factors[
i] += (
val ^ sign) - sign;
1097 s->channel[
c].scale_factor_idx = !
s->channel[
c].scale_factor_idx;
1098 s->channel[
c].table_idx =
s->table_idx;
1099 s->channel[
c].reuse_sf = 1;
1103 s->channel[
c].max_scale_factor =
s->channel[
c].scale_factors[0];
1104 for (sf =
s->channel[
c].scale_factors + 1; sf < sf_end; sf++) {
1105 s->channel[
c].max_scale_factor =
1106 FFMAX(
s->channel[
c].max_scale_factor, *sf);
1121 for (
i = 0;
i <
s->num_chgroups;
i++) {
1122 if (
s->chgroup[
i].transform) {
1124 const int num_channels =
s->chgroup[
i].num_channels;
1125 float** ch_data =
s->chgroup[
i].channel_data;
1126 float** ch_end = ch_data + num_channels;
1127 const int8_t* tb =
s->chgroup[
i].transform_band;
1131 for (sfb =
s->cur_sfb_offsets;
1132 sfb < s->cur_sfb_offsets +
s->num_bands; sfb++) {
1136 for (y = sfb[0]; y <
FFMIN(sfb[1],
s->subframe_len); y++) {
1137 const float* mat =
s->chgroup[
i].decorrelation_matrix;
1138 const float* data_end =
data + num_channels;
1139 float* data_ptr =
data;
1142 for (ch = ch_data; ch < ch_end; ch++)
1143 *data_ptr++ = (*ch)[y];
1145 for (ch = ch_data; ch < ch_end; ch++) {
1148 while (data_ptr < data_end)
1149 sum += *data_ptr++ * *mat++;
1154 }
else if (
s->nb_channels == 2) {
1155 int len =
FFMIN(sfb[1],
s->subframe_len) - sfb[0];
1156 s->fdsp->vector_fmul_scalar(ch_data[0] + sfb[0],
1157 ch_data[0] + sfb[0],
1159 s->fdsp->vector_fmul_scalar(ch_data[1] + sfb[0],
1160 ch_data[1] + sfb[0],
1175 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1176 int c =
s->channel_indexes_for_cur_subframe[
i];
1178 int winlen =
s->channel[
c].prev_block_len;
1179 float* start =
s->channel[
c].coeffs - (winlen >> 1);
1181 if (
s->subframe_len < winlen) {
1182 start += (winlen -
s->subframe_len) >> 1;
1183 winlen =
s->subframe_len;
1190 s->fdsp->vector_fmul_window(start, start, start + winlen,
1193 s->channel[
c].prev_block_len =
s->subframe_len;
1204 int offset =
s->samples_per_frame;
1205 int subframe_len =
s->samples_per_frame;
1207 int total_samples =
s->samples_per_frame *
s->nb_channels;
1208 int transmit_coeffs = 0;
1209 int cur_subwoofer_cutoff;
1217 for (
i = 0;
i <
s->nb_channels;
i++) {
1218 s->channel[
i].grouped = 0;
1219 if (
offset >
s->channel[
i].decoded_samples) {
1220 offset =
s->channel[
i].decoded_samples;
1222 s->channel[
i].subframe_len[
s->channel[
i].cur_subframe];
1227 "processing subframe with offset %i len %i\n",
offset, subframe_len);
1230 s->channels_for_cur_subframe = 0;
1231 for (
i = 0;
i <
s->nb_channels;
i++) {
1232 const int cur_subframe =
s->channel[
i].cur_subframe;
1234 total_samples -=
s->channel[
i].decoded_samples;
1237 if (
offset ==
s->channel[
i].decoded_samples &&
1238 subframe_len ==
s->channel[
i].subframe_len[cur_subframe]) {
1239 total_samples -=
s->channel[
i].subframe_len[cur_subframe];
1240 s->channel[
i].decoded_samples +=
1241 s->channel[
i].subframe_len[cur_subframe];
1242 s->channel_indexes_for_cur_subframe[
s->channels_for_cur_subframe] =
i;
1243 ++
s->channels_for_cur_subframe;
1250 s->parsed_all_subframes = 1;
1253 ff_dlog(
s->avctx,
"subframe is part of %i channels\n",
1254 s->channels_for_cur_subframe);
1257 s->table_idx =
av_log2(
s->samples_per_frame/subframe_len);
1258 s->num_bands =
s->num_sfb[
s->table_idx];
1259 s->cur_sfb_offsets =
s->sfb_offsets[
s->table_idx];
1260 cur_subwoofer_cutoff =
s->subwoofer_cutoffs[
s->table_idx];
1263 offset +=
s->samples_per_frame >> 1;
1265 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1266 int c =
s->channel_indexes_for_cur_subframe[
i];
1268 s->channel[
c].coeffs = &
s->channel[
c].out[
offset];
1271 s->subframe_len = subframe_len;
1272 s->esc_len =
av_log2(
s->subframe_len - 1) + 1;
1277 if (!(num_fill_bits =
get_bits(&
s->gb, 2))) {
1282 if (num_fill_bits >= 0) {
1303 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1304 int c =
s->channel_indexes_for_cur_subframe[
i];
1305 if ((
s->channel[
c].transmit_coefs =
get_bits1(&
s->gb)))
1306 transmit_coeffs = 1;
1310 if (transmit_coeffs) {
1312 int quant_step = 90 *
s->bits_per_sample >> 4;
1315 if ((
s->transmit_num_vec_coeffs =
get_bits1(&
s->gb))) {
1316 int num_bits =
av_log2((
s->subframe_len + 3)/4) + 1;
1317 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1318 int c =
s->channel_indexes_for_cur_subframe[
i];
1319 int num_vec_coeffs =
get_bits(&
s->gb, num_bits) << 2;
1320 if (num_vec_coeffs >
s->subframe_len) {
1325 s->channel[
c].num_vec_coeffs = num_vec_coeffs;
1328 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1329 int c =
s->channel_indexes_for_cur_subframe[
i];
1330 s->channel[
c].num_vec_coeffs =
s->subframe_len;
1337 const int sign = (
step == 31) - 1;
1343 quant_step += ((
quant +
step) ^ sign) - sign;
1345 if (quant_step < 0) {
1351 if (
s->channels_for_cur_subframe == 1) {
1352 s->channel[
s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step;
1355 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1356 int c =
s->channel_indexes_for_cur_subframe[
i];
1357 s->channel[
c].quant_step = quant_step;
1360 s->channel[
c].quant_step +=
get_bits(&
s->gb, modifier_len) + 1;
1362 ++
s->channel[
c].quant_step;
1372 ff_dlog(
s->avctx,
"BITSTREAM: subframe header length was %i\n",
1376 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1377 int c =
s->channel_indexes_for_cur_subframe[
i];
1378 if (
s->channel[
c].transmit_coefs &&
1382 memset(
s->channel[
c].coeffs, 0,
1383 sizeof(*
s->channel[
c].coeffs) * subframe_len);
1386 ff_dlog(
s->avctx,
"BITSTREAM: subframe length was %i\n",
1389 if (transmit_coeffs) {
1394 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1395 int c =
s->channel_indexes_for_cur_subframe[
i];
1396 const int* sf =
s->channel[
c].scale_factors;
1399 if (
c ==
s->lfe_channel)
1400 memset(&
s->tmp[cur_subwoofer_cutoff], 0,
sizeof(*
s->tmp) *
1401 (subframe_len - cur_subwoofer_cutoff));
1404 for (
b = 0;
b <
s->num_bands;
b++) {
1405 const int end =
FFMIN(
s->cur_sfb_offsets[
b+1],
s->subframe_len);
1406 const int exp =
s->channel[
c].quant_step -
1407 (
s->channel[
c].max_scale_factor - *sf++) *
1408 s->channel[
c].scale_factor_step;
1410 int start =
s->cur_sfb_offsets[
b];
1411 s->fdsp->vector_fmul_scalar(
s->tmp + start,
1412 s->channel[
c].coeffs + start,
1413 quant, end - start);
1417 tx_fn(tx,
s->channel[
c].coeffs,
s->tmp,
sizeof(
float));
1425 for (
i = 0;
i <
s->channels_for_cur_subframe;
i++) {
1426 int c =
s->channel_indexes_for_cur_subframe[
i];
1427 if (
s->channel[
c].cur_subframe >=
s->channel[
c].num_subframes) {
1431 ++
s->channel[
c].cur_subframe;
1446 int more_frames = 0;
1454 ff_dlog(
s->avctx,
"decoding frame with length %x\n",
len);
1465 for (
i = 0;
i <
s->nb_channels *
s->nb_channels;
i++)
1471 if (
s->dynamic_range_compression) {
1473 ff_dlog(
s->avctx,
"drc_gain %i\n",
s->drc_gain);
1483 s->trim_start =
s->trim_end = 0;
1486 ff_dlog(
s->avctx,
"BITSTREAM: frame header length was %i\n",
1490 s->parsed_all_subframes = 0;
1491 for (
i = 0;
i <
s->nb_channels;
i++) {
1492 s->channel[
i].decoded_samples = 0;
1493 s->channel[
i].cur_subframe = 0;
1494 s->channel[
i].reuse_sf = 0;
1498 while (!
s->parsed_all_subframes) {
1506 for (
i = 0;
i <
s->nb_channels;
i++)
1507 memcpy(
frame->extended_data[
i],
s->channel[
i].out,
1508 s->samples_per_frame *
sizeof(*
s->channel[
i].out));
1510 for (
i = 0;
i <
s->nb_channels;
i++) {
1512 memcpy(&
s->channel[
i].out[0],
1513 &
s->channel[
i].out[
s->samples_per_frame],
1514 s->samples_per_frame *
sizeof(*
s->channel[
i].out) >> 1);
1517 if (
s->skip_frame) {
1525 if (
s->len_prefix) {
1529 "frame[%"PRIu32
"] would have to skip %i bits\n",
1579 s->num_saved_bits =
s->frame_offset;
1581 buflen = (
s->num_saved_bits +
len + 7) >> 3;
1593 s->num_saved_bits +=
len;
1619 const uint8_t* buf = avpkt->
data;
1620 int buf_size = avpkt->
size;
1621 int num_bits_prev_frame;
1622 int packet_sequence_number;
1637 for (
i = 0;
i <
s->nb_channels;
i++) {
1638 memset(
frame->extended_data[
i], 0,
1639 s->samples_per_frame *
sizeof(*
s->channel[
i].out));
1641 memcpy(
frame->extended_data[
i],
s->channel[
i].out,
1642 s->samples_per_frame *
sizeof(*
s->channel[
i].out) >> 1);
1650 else if (
s->packet_done ||
s->packet_loss) {
1668 s->buf_bit_size = buf_size << 3;
1675 packet_sequence_number =
get_bits(gb, 4);
1679 ff_dlog(avctx,
"packet[%"PRId64
"]: number of frames %d\n", avctx->
frame_num, num_frames);
1680 packet_sequence_number = 0;
1684 num_bits_prev_frame =
get_bits(gb,
s->log2_frame_size);
1688 ff_dlog(avctx,
"packet[%"PRId64
"]: skip packets %d\n", avctx->
frame_num,
s->skip_packets);
1692 num_bits_prev_frame);
1696 ((
s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
1699 "Packet loss detected! seq %"PRIx8
" vs %x\n",
1700 s->packet_sequence_number, packet_sequence_number);
1702 s->packet_sequence_number = packet_sequence_number;
1704 if (num_bits_prev_frame > 0) {
1706 if (num_bits_prev_frame >= remaining_packet_bits) {
1707 num_bits_prev_frame = remaining_packet_bits;
1714 ff_dlog(avctx,
"accumulated %x bits of frame data\n",
1715 s->num_saved_bits -
s->frame_offset);
1718 if (!
s->packet_loss)
1720 }
else if (
s->num_saved_bits -
s->frame_offset) {
1721 ff_dlog(avctx,
"ignoring %x previously saved bits\n",
1722 s->num_saved_bits -
s->frame_offset);
1725 if (
s->packet_loss) {
1729 s->num_saved_bits = 0;
1735 if (avpkt->
size <
s->next_packet_start) {
1740 s->buf_bit_size = (avpkt->
size -
s->next_packet_start) << 3;
1749 if (!
s->packet_loss)
1751 }
else if (!
s->len_prefix
1771 if (
s->packet_done && !
s->packet_loss &&
1783 if (
s->trim_start <
frame->nb_samples) {
1784 for (
int ch = 0; ch <
frame->ch_layout.nb_channels; ch++)
1785 frame->extended_data[ch] +=
s->trim_start * 4;
1787 frame->nb_samples -=
s->trim_start;
1796 if (
s->trim_end <
frame->nb_samples) {
1797 frame->nb_samples -=
s->trim_end;
1816 int *got_frame_ptr,
AVPacket *avpkt)
1822 frame->nb_samples =
s->samples_per_frame;
1832 int *got_frame_ptr,
AVPacket *avpkt)
1835 int got_stream_frame_ptr = 0;
1836 int i,
ret = 0, eof = 0;
1838 if (!
s->frames[
s->current_stream]->data[0]) {
1840 s->frames[
s->current_stream]->nb_samples = 512;
1843 }
else if (
s->frames[
s->current_stream]->nb_samples != 512) {
1846 s->frames[
s->current_stream]->nb_samples = 512;
1851 if (!
s->xma[
s->current_stream].eof_done) {
1853 &got_stream_frame_ptr, avpkt);
1859 for (
i = 0;
i <
s->num_streams;
i++) {
1860 if (!
s->xma[
i].eof_done &&
s->frames[
i]->data[0]) {
1862 &got_stream_frame_ptr, avpkt);
1865 eof &=
s->xma[
i].eof_done;
1869 if (
s->xma[0].trim_start)
1870 s->trim_start =
s->xma[0].trim_start;
1871 if (
s->xma[0].trim_end)
1872 s->trim_end =
s->xma[0].trim_end;
1875 if (got_stream_frame_ptr) {
1876 const int nb_samples =
s->frames[
s->current_stream]->nb_samples;
1877 void *
left[1] = {
s->frames[
s->current_stream]->extended_data[0] };
1878 void *right[1] = {
s->frames[
s->current_stream]->extended_data[1] };
1881 if (
s->xma[
s->current_stream].nb_channels > 1)
1883 }
else if (
ret < 0) {
1884 s->current_stream = 0;
1891 if (
s->xma[
s->current_stream].packet_done ||
1892 s->xma[
s->current_stream].packet_loss) {
1893 int nb_samples = INT_MAX;
1896 if (
s->xma[
s->current_stream].skip_packets != 0) {
1899 min[0] =
s->xma[0].skip_packets;
1902 for (
i = 1;
i <
s->num_streams;
i++) {
1903 if (
s->xma[
i].skip_packets <
min[0]) {
1904 min[0] =
s->xma[
i].skip_packets;
1909 s->current_stream =
min[1];
1913 for (
i = 0;
i <
s->num_streams;
i++) {
1914 s->xma[
i].skip_packets =
FFMAX(0,
s->xma[
i].skip_packets - 1);
1918 if (!eof && avpkt->
size)
1919 nb_samples -=
FFMIN(nb_samples, 4096);
1922 if ((nb_samples > 0 || eof || !avpkt->
size) && !
s->flushed) {
1926 nb_samples -=
av_clip(
s->trim_end +
s->trim_start - 128 - 64, 0, nb_samples);
1930 frame->nb_samples = nb_samples;
1934 for (
i = 0;
i <
s->num_streams;
i++) {
1935 const int start_ch =
s->start_channel[
i];
1936 void *
left[1] = {
frame->extended_data[start_ch + 0] };
1939 if (
s->xma[
i].nb_channels > 1) {
1940 void *right[1] = {
frame->extended_data[start_ch + 1] };
1945 *got_frame_ptr = nb_samples > 0;
1955 int i,
ret, start_channels = 0;
2000 for (
i = 0;
i <
s->num_streams;
i++) {
2008 s->start_channel[
i] = start_channels;
2009 start_channels +=
s->xma[
i].nb_channels;
2017 if (!
s->samples[0][
i] || !
s->samples[1][
i])
2029 for (
i = 0;
i <
s->num_streams;
i++) {
2048 for (
i = 0;
i <
s->nb_channels;
i++)
2049 memset(
s->channel[
i].out, 0,
s->samples_per_frame *
2050 sizeof(*
s->channel[
i].out));
2052 s->skip_packets = 0;
2078 for (
i = 0;
i <
s->num_streams;
i++)
2081 s->current_stream = 0;
uint16_t num_vec_coeffs
number of vector coded coefficients
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
static const float *const default_decorrelation[]
default decorrelation matrix offsets
static av_cold int xma_decode_init(AVCodecContext *avctx)
int subframe_offset
subframe offset in the bit reservoir
@ AV_SAMPLE_FMT_FLTP
float, planar
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int get_bits_left(GetBitContext *gb)
static int decode_subframe(WMAProDecodeCtx *s)
Decode a single subframe (block).
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetBitContext gb
bitstream reader context
uint16_t samples_per_frame
number of samples to output
SINETABLE_CONST float *const ff_sine_windows[]
int8_t scale_factor_step
scaling step for the current subframe
static const uint8_t vec4_lens[HUFF_VEC4_SIZE]
static void wmapro_window(WMAProDecodeCtx *s)
Apply sine window and reconstruct the output buffer.
#define WMAPRO_BLOCK_MAX_BITS
log2 of max block size
uint16_t min_samples_per_subframe
int sample_rate
samples per second
uint16_t subframe_offset[MAX_SUBFRAMES]
subframe positions in the current frame
static int decode_tilehdr(WMAProDecodeCtx *s)
Decode how the data in the frame is split into subframes.
int av_audio_fifo_write(AVAudioFifo *af, void *const *data, int nb_samples)
Write data to an AVAudioFifo.
static VLCElem vec2_vlc[562]
2 coefficients per symbol
static const uint8_t vec2_table[HUFF_VEC2_SIZE][2]
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static uint8_t * append(uint8_t *buf, const uint8_t *src, int size)
static int get_bits_count(const GetBitContext *s)
static const uint16_t coef0_run[HUFF_COEF0_SIZE]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVCodecContext * avctx
codec context for av_log
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static av_cold int wmapro_decode_init(AVCodecContext *avctx)
Initialize the decoder.
static void flush(WMAProDecodeCtx *s)
static av_cold int get_rate(AVCodecContext *avctx)
#define WMAPRO_BLOCK_MIN_SIZE
minimum block size
static int decode_scale_factors(WMAProDecodeCtx *s)
Extract scale factors from the bitstream.
int ff_wma_run_level_decode(AVCodecContext *avctx, GetBitContext *gb, const VLCElem *vlc, const float *level_table, const uint16_t *run_table, int version, WMACoef *ptr, int offset, int num_coefs, int block_len, int frame_len_bits, int coef_nb_bits)
Decode run level compressed coefficients.
#define WMAPRO_BLOCK_MAX_SIZE
maximum block size
enum AVChannelOrder order
Channel order used in this layout.
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
static VLCElem vec4_vlc[604]
4 coefficients per symbol
int nb_channels
Number of channels in this layout.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
PutBitContext pb
context for filling the frame_data buffer
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
static av_cold int decode_init(WMAProDecodeCtx *s, AVCodecContext *avctx, int num_stream)
Initialize the decoder.
static av_cold int decode_end(WMAProDecodeCtx *s)
Uninitialize the decoder and free all resources.
static const VLCElem * coef_vlc[2]
coefficient run length vlc codes
int16_t sfb_offsets[WMAPRO_BLOCK_SIZES][MAX_BANDS]
scale factor band offsets (multiples of 4)
static void skip_bits(GetBitContext *s, int n)
static av_cold void close(AVCodecParserContext *s)
static float sin64[33]
sine table for decorrelation
#define HUFF_SCALE_RL_SIZE
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
static SDL_Window * window
Context for an Audio FIFO Buffer.
AVCodec p
The public AVCodec.
static VLCElem vec1_vlc[562]
1 coefficient per symbol
AVChannelLayout ch_layout
Audio channel layout.
static const uint16_t coef0_syms[HUFF_COEF0_SIZE]
static av_cold int wmapro_decode_end(AVCodecContext *avctx)
uint8_t num_chgroups
number of channel groups
uint8_t drc_gain
gain for the DRC tool
static int put_bits_left(PutBitContext *s)
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
int8_t num_bands
number of scale factor bands
float tmp[WMAPRO_BLOCK_MAX_SIZE]
IMDCT output buffer.
int8_t sf_offsets[WMAPRO_BLOCK_SIZES][WMAPRO_BLOCK_SIZES][MAX_BANDS]
scale factor resample matrix
WMAProChannelGrp chgroup[WMAPRO_MAX_CHANNELS]
channel group information
int max_scale_factor
maximum scale factor for the current subframe
int quant_step
quantization step for the current subframe
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
uint8_t table_idx
index in sf_offsets for the scale factor reference block
static int decode_subframe_length(WMAProDecodeCtx *s, int offset)
Decode the subframe length.
static VLCElem sf_vlc[616]
scale factor DPCM vlc
static const uint8_t quant[64]
float out[WMAPRO_BLOCK_MAX_SIZE+WMAPRO_BLOCK_MAX_SIZE/2]
output buffer
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int buf_bit_size
buffer size in bits
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
const FFCodec ff_xma1_decoder
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
uint8_t subframe_len_bits
number of bits used for the subframe length
For static VLCs, the number of bits can often be hardcoded at each get_vlc2() callsite.
static void decode_decorrelation_matrix(WMAProDecodeCtx *s, WMAProChannelGrp *chgroup)
Calculate a decorrelation matrix from the bitstream parameters.
frame specific decoder context for a single channel
@ AV_TX_FLOAT_MDCT
Standard MDCT with a sample data type of float, double or int32_t, respecively.
int * scale_factors
pointer to the scale factor values used for decoding
#define FF_CODEC_DECODE_CB(func)
int8_t skip_frame
skip output step
int16_t subwoofer_cutoffs[WMAPRO_BLOCK_SIZES]
subwoofer cutoff values
uint32_t decode_flags
used compression features
const FFCodec ff_xma2_decoder
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
uint8_t packet_loss
set in case of bitstream error
int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask)
Initialize a native channel layout from a bitmask indicating which channels are present.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static void inverse_channel_transform(WMAProDecodeCtx *s)
Reconstruct the individual channel data.
static int get_sbits(GetBitContext *s, int n)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
WMAProDecodeCtx xma[XMA_MAX_STREAMS]
#define CODEC_LONG_NAME(str)
static int decode_coeffs(WMAProDecodeCtx *s, int c)
Extract the coefficients from the bitstream.
#define XMA_MAX_CHANNELS_STREAM
int16_t prev_block_len
length of the previous block
int8_t transmit_num_vec_coeffs
number of vector coded coefficients is part of the bitstream
int8_t channel_indexes_for_cur_subframe[WMAPRO_MAX_CHANNELS]
uint8_t grouped
channel is part of a group
int start_channel[XMA_MAX_STREAMS]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static void wmapro_flush(AVCodecContext *avctx)
Clear decoder buffers (for seeking).
const float * windows[WMAPRO_BLOCK_SIZES]
windows for the different block sizes
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
int8_t transform
transform on / off
static const uint8_t vec1_table[HUFF_VEC1_SIZE][2]
struct AVCodecInternal * internal
Private context used for internal data.
static unsigned int get_bits1(GetBitContext *s)
int8_t nb_channels
number of channels in stream (XMA1/2)
static void xma_flush(AVCodecContext *avctx)
#define WMAPRO_MAX_CHANNELS
current decoder limitations
channel group for channel transformations
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static const uint8_t scale_rl_level[HUFF_SCALE_RL_SIZE]
uint8_t eof_done
set when EOF reached and extra subframe is written (XMA1/2)
uint32_t frame_num
current frame number (not used for decoding)
static int decode_packet(AVCodecContext *avctx, WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
float * coeffs
pointer to the subframe decode buffer
#define DECLARE_ALIGNED(n, t, v)
uint8_t len_prefix
frame is prefixed with its length
static const uint16_t critical_freq[]
frequencies to divide the frequency spectrum into scale factor bands
static const uint8_t scale_table[]
#define WMAPRO_BLOCK_SIZES
possible block sizes
enum AVSampleFormat sample_fmt
audio sample format
uint8_t frame_data[MAX_FRAMESIZE+AV_INPUT_BUFFER_PADDING_SIZE]
compressed frame data
int av_audio_fifo_read(AVAudioFifo *af, void *const *data, int nb_samples)
Read data from an AVAudioFifo.
static av_cold void decode_init_static(void)
int8_t scale_factor_idx
index for the transmitted scale factor values (used for resampling)
#define MAX_SUBFRAMES
max number of subframes per channel
AVAudioFifo * samples[2][XMA_MAX_STREAMS]
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define CODEC_SAMPLEFMTS(...)
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
int8_t transform_band[MAX_BANDS]
controls if the transform is enabled for a certain band
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
uint8_t max_num_subframes
int8_t reuse_sf
share scale factors between subframes
int next_packet_start
start offset of the next wma packet in the demuxer packet
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int put_bits_count(PutBitContext *s)
uint8_t cur_subframe
current subframe number
static const uint8_t scale_rl_run[HUFF_SCALE_RL_SIZE]
uint16_t decoded_samples
number of already processed samples
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static int xma_decode_packet(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
static const float coef1_level[HUFF_COEF1_SIZE]
av_tx_fn tx_fn[WMAPRO_BLOCK_SIZES]
#define MAX_BANDS
max number of scale factor bands
static const uint16_t coef1_run[HUFF_COEF1_SIZE]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Name of the codec implementation.
uint16_t trim_start
number of samples to skip at start
tables for wmapro decoding
GetBitContext pgb
bitstream reader context for the packet
int64_t frame_num
Frame counter, set by libavcodec.
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
static void save_bits(WMAProDecodeCtx *s, GetBitContext *gb, int len, int append)
Fill the bit reservoir with a (partial) frame.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint8_t num_channels
number of channels in the group
static int wmapro_decode_packet(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Decode a single WMA packet.
static av_cold void dump_context(WMAProDecodeCtx *s)
helper function to print the most important members of the context
#define AV_INPUT_BUFFER_PADDING_SIZE
static int decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr)
Decode one WMA frame.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
int8_t channels_for_cur_subframe
number of channels that contain the subframe
main external API structure.
av_cold int ff_wma_get_frame_len_bits(int sample_rate, int version, unsigned int decode_flags)
Get the samples per frame for this stream.
int8_t esc_len
length of escaped coefficients
uint8_t table_idx
index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
int8_t num_sfb[WMAPRO_BLOCK_SIZES]
scale factor bands per block size
uint16_t subframe_len[MAX_SUBFRAMES]
subframe length in samples
void ff_init_ff_sine_windows(int index)
initialize the specified entry of ff_sine_windows
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
uint8_t bits_per_sample
integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1....
uint8_t max_subframe_len_bit
flag indicating that the subframe is of maximum size when the first subframe length bit is 1
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
uint8_t packet_offset
frame offset in the packet
uint8_t skip_packets
packets to skip to find next packet in a stream (XMA1/2)
float * channel_data[WMAPRO_MAX_CHANNELS]
transformation coefficients
const av_cold VLCElem * ff_vlc_init_tables_from_lengths(VLCInitState *state, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags)
AVTXContext * tx[WMAPRO_BLOCK_SIZES]
MDCT context per block size.
int saved_scale_factors[2][MAX_BANDS]
resampled and (previously) transmitted scale factor values
int frame_offset
frame offset in the bit reservoir
AVFrame * frames[XMA_MAX_STREAMS]
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
uint8_t packet_done
set when a packet is fully decoded
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define avpriv_request_sample(...)
static av_cold int xma_decode_end(AVCodecContext *avctx)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
int8_t lfe_channel
lfe channel index
static const uint8_t scale_rl_table[HUFF_SCALE_RL_SIZE][2]
uint16_t trim_end
number of samples to skip at end
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
static const uint16_t vec4_syms[HUFF_VEC4_SIZE]
int16_t * cur_sfb_offsets
sfb offsets for the current block
static void scale(int *out, const int *in, const int w, const int h, const int shift)
static int decode_channel_transform(WMAProDecodeCtx *s)
Decode channel transformation parameters.
int16_t subframe_len
current subframe length
#define VLC_INIT_STATE(_table)
int8_t parsed_all_subframes
all subframes decoded?
This structure stores compressed data.
static const uint8_t coef0_lens[HUFF_COEF0_SIZE]
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
static const float coef0_level[HUFF_COEF0_SIZE]
#define MAX_FRAMESIZE
maximum compressed frame size
uint8_t packet_sequence_number
current packet number
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
uint8_t dynamic_range_compression
frame contains DRC data
static int remaining_bits(WMAProDecodeCtx *s, GetBitContext *gb)
Calculate remaining input buffer length.
void av_audio_fifo_reset(AVAudioFifo *af)
Reset the AVAudioFifo buffer.
unsigned int ff_wma_get_large_val(GetBitContext *gb)
Decode an uncompressed coefficient.
#define FF_DEBUG_BITSTREAM
int num_saved_bits
saved number of bits
float decorrelation_matrix[WMAPRO_MAX_CHANNELS *WMAPRO_MAX_CHANNELS]
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
const FFCodec ff_wmapro_decoder
wmapro decoder
static const uint8_t coef1_table[HUFF_COEF1_SIZE][2]
static VLCElem sf_rl_vlc[1406]
scale factor run length vlc
#define WMAPRO_BLOCK_MIN_BITS
log2 of min block size