85 { -1, -1, -1, -1, 2, 4, 6, 8 },
86 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
90 -1, -1, -1, 1, 4, 7, 10, 12,
94 8, 6, 4, 2, -1, -1, -1, -1,
95 -1, -1, -1, -1, 2, 4, 6, 8,
109 unsigned int min_channels = 1;
110 unsigned int max_channels = 2;
241 add = (delta * 2 + 1) * step;
245 if ((nibble & 8) == 0)
246 pred =
av_clip(pred + (add >> 3), -32767, 32767);
248 pred =
av_clip(pred - (add >> 3), -32767, 32767);
286 step_index =
av_clip(step_index, 0, 88);
293 diff = ((2 * delta + 1) * step) >>
shift;
295 if (sign) predictor -=
diff;
296 else predictor +=
diff;
312 step_index =
av_clip(step_index, 0, 88);
316 diff = (delta *
step) >> shift;
318 if (sign) predictor -=
diff;
319 else predictor +=
diff;
333 delta = step * (2 * nibble - 15);
353 step_index =
av_clip(step_index, 0, 60);
355 predictor = c->
predictor + step * nibble;
371 step_index =
av_clip(step_index, 0, 88);
373 sign = nibble & (1 <<
shift);
375 diff = ((2 * delta + 1) * step) >>
shift;
377 if (sign) predictor -=
diff;
378 else predictor +=
diff;
394 step_index =
av_clip(step_index, 0, 88);
397 if (nibble & 4) diff +=
step;
398 if (nibble & 2) diff += step >> 1;
399 if (nibble & 1) diff += step >> 2;
417 predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->
idelta;
423 if (c->
idelta > INT_MAX/768) {
437 step_index =
av_clip(step_index, 0, 48);
441 diff = ((2 * delta + 1) * step) >> 3;
443 if (sign) predictor -=
diff;
444 else predictor +=
diff;
462 diff = ((2 * delta + 1) * c->
step) >> 3;
477 sign = nibble & (1<<(size-1));
478 delta = nibble & ((1<<(size-1))-1);
485 if (delta >= (2*size - 3) && c->
step < 3)
487 else if (delta == 0 && c->
step > 0)
523 sample += lookup_sample;
525 sample += lookup_sample >> 1;
527 sample += lookup_sample >> 2;
529 sample += lookup_sample >> 3;
531 sample += lookup_sample >> 4;
533 sample += lookup_sample >> 5;
535 sample += lookup_sample >> 6;
560 out0 += sample_offset;
564 out1 += sample_offset;
567 shift = 12 - (in[4+i*2] & 15);
568 filter = in[4+i*2] >> 4;
587 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
600 shift = 12 - (in[5+i*2] & 15);
601 filter = in[5+i*2] >> 4;
618 s = t*(1<<
shift) + ((s_1*f0 + s_2*f1+32)>>6);
644 int k0, signmask, nb_bits,
count;
645 int size = buf_size*8;
653 k0 = 1 << (nb_bits-2);
654 signmask = 1 << (nb_bits-1);
657 for (i = 0; i < avctx->
channels; i++) {
665 for (i = 0; i < avctx->
channels; i++) {
680 if (delta & signmask)
725 int buf_size,
int *coded_samples,
int *approx_nb_samples)
730 int has_coded_samples = 0;
734 *approx_nb_samples = 0;
742 if (buf_size < 76 * ch)
747 if (buf_size < 34 * ch)
764 nb_samples = buf_size * 2 / ch;
781 return (buf_size - header_size) * 2 / ch;
787 has_coded_samples = 1;
788 *coded_samples = bytestream2_get_le32u(gb);
789 nb_samples =
FFMIN((buf_size - 8) * 2, *coded_samples);
793 has_coded_samples = 1;
794 *coded_samples = bytestream2_get_le32(gb);
795 *coded_samples -= *coded_samples % 28;
796 nb_samples = (buf_size - 12) / 30 * 28;
799 has_coded_samples = 1;
800 *coded_samples = bytestream2_get_le32(gb);
801 nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
804 nb_samples = (buf_size - ch) / ch * 2;
811 has_coded_samples = 1;
814 header_size = 4 + 9 * ch;
815 *coded_samples = bytestream2_get_le32(gb);
818 header_size = 4 + 5 * ch;
819 *coded_samples = bytestream2_get_le32(gb);
822 header_size = 4 + 5 * ch;
823 *coded_samples = bytestream2_get_be32(gb);
826 *coded_samples -= *coded_samples % 28;
827 nb_samples = (buf_size - header_size) * 2 / ch;
828 nb_samples -= nb_samples % 28;
829 *approx_nb_samples = 1;
834 nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
839 if (buf_size < 4 * ch)
841 nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
846 nb_samples = (buf_size - 4 * ch) * 2 / ch;
854 if (buf_size < 4 * ch)
856 nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
862 nb_samples = (buf_size - 6 * ch) * 2 / ch;
867 nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
873 int samples_per_byte;
885 nb_samples += buf_size * samples_per_byte / ch;
890 int buf_bits = buf_size * 8 - 2;
891 int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
892 int block_hdr_size = 22 * ch;
893 int block_size = block_hdr_size + nbits * ch * 4095;
894 int nblocks = buf_bits / block_size;
895 int bits_left = buf_bits - nblocks * block_size;
896 nb_samples = nblocks * 4096;
897 if (bits_left >= block_hdr_size)
898 nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
904 nb_samples = buf_size * 14 / (8 * ch);
907 has_coded_samples = 1;
910 bytestream2_get_le32(gb) :
911 bytestream2_get_be32(gb);
912 buf_size -= 8 + 36 * ch;
914 nb_samples = buf_size / 8 * 14;
915 if (buf_size % 8 > 1)
916 nb_samples += (buf_size % 8 - 1) * 2;
917 *approx_nb_samples = 1;
920 nb_samples = buf_size / (9 * ch) * 16;
923 nb_samples = (buf_size / 128) * 224 / ch;
927 nb_samples = buf_size / (16 * ch) * 28;
933 nb_samples = buf_size / ch;
938 if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
945 int *got_frame_ptr,
AVPacket *avpkt)
949 int buf_size = avpkt->
size;
957 int nb_samples, coded_samples, approx_nb_samples,
ret;
961 nb_samples =
get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
962 if (nb_samples <= 0) {
971 samples = (int16_t *)frame->
data[0];
977 if (!approx_nb_samples && coded_samples != nb_samples)
979 frame->
nb_samples = nb_samples = coded_samples;
988 for (channel = 0; channel < avctx->
channels; channel++) {
995 predictor =
sign_extend(bytestream2_get_be16u(&gb), 16);
996 step_index = predictor & 0x7F;
1019 for (m = 0; m < 64; m += 2) {
1020 int byte = bytestream2_get_byteu(&gb);
1045 for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1046 for (i = 0; i < avctx->
channels; i++) {
1050 samples = &samples_p[
i][1 + n * samples_per_block];
1051 for (j = 0; j < block_size; j++) {
1053 (j % 4) + (j / 4) * (avctx->
channels * 4) + i * 4];
1058 for (m = 0; m < samples_per_block; m++) {
1066 for (n = 0; n < (nb_samples - 1) / 8; n++) {
1067 for (i = 0; i < avctx->
channels; i++) {
1069 samples = &samples_p[
i][1 + n * 8];
1070 for (m = 0; m < 8; m += 2) {
1071 int v = bytestream2_get_byteu(&gb);
1080 for (i = 0; i < avctx->
channels; i++)
1083 for (i = 0; i < avctx->
channels; i++) {
1092 for (i = 0; i < avctx->
channels; i++) {
1093 samples = (int16_t *)frame->
data[i];
1095 for (n = nb_samples >> 1; n > 0; n--) {
1096 int v = bytestream2_get_byteu(&gb);
1103 for (i = 0; i < avctx->
channels; i++)
1105 for (i = 0; i < avctx->
channels; i++)
1108 for (n = 0; n < nb_samples >> (1 - st); n++) {
1109 int v = bytestream2_get_byteu(&gb);
1116 int block_predictor;
1119 for (channel = 0; channel < avctx->
channels; channel++) {
1121 block_predictor = bytestream2_get_byteu(&gb);
1122 if (block_predictor > 6) {
1124 channel, block_predictor);
1134 for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1135 int byte = bytestream2_get_byteu(&gb);
1141 block_predictor = bytestream2_get_byteu(&gb);
1142 if (block_predictor > 6) {
1150 block_predictor = bytestream2_get_byteu(&gb);
1151 if (block_predictor > 6) {
1173 for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1174 int byte = bytestream2_get_byteu(&gb);
1182 for (channel = 0; channel < avctx->
channels; channel+=2) {
1185 c->
status[channel + 1].
step = bytestream2_get_le16u(&gb) & 0x1f;
1190 for (n = 0; n < nb_samples; n+=2) {
1191 int v = bytestream2_get_byteu(&gb);
1195 for (n = 0; n < nb_samples; n+=2) {
1196 int v = bytestream2_get_byteu(&gb);
1203 for (channel = 0; channel < avctx->
channels; channel++) {
1213 for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1214 int v = bytestream2_get_byteu(&gb);
1223 int decode_top_nibble_next = 0;
1225 const int16_t *samples_end = samples + avctx->
channels * nb_samples;
1241 #define DK3_GET_NEXT_NIBBLE() \ 1242 if (decode_top_nibble_next) { \ 1243 nibble = last_byte >> 4; \ 1244 decode_top_nibble_next = 0; \ 1246 last_byte = bytestream2_get_byteu(&gb); \ 1247 nibble = last_byte & 0x0F; \ 1248 decode_top_nibble_next = 1; \ 1251 while (samples < samples_end) {
1284 for (channel = 0; channel < avctx->
channels; channel++) {
1295 for (n = nb_samples >> (1 - st); n > 0; n--) {
1297 int v = bytestream2_get_byteu(&gb);
1311 for (channel = 0; channel < avctx->
channels; channel++) {
1322 for (
int subframe = 0; subframe < nb_samples / 256; subframe++) {
1323 for (channel = 0; channel < avctx->
channels; channel++) {
1324 samples = samples_p[
channel] + 256 * subframe;
1325 for (n = 0; n < 256; n += 2) {
1326 int v = bytestream2_get_byteu(&gb);
1334 for (channel = 0; channel < avctx->
channels; channel++) {
1338 for (n = 0; n < nb_samples; n += 2) {
1339 int v = bytestream2_get_byteu(&gb);
1346 for (n = nb_samples >> (1 - st); n > 0; n--) {
1347 int v = bytestream2_get_byteu(&gb);
1353 for (n = nb_samples >> (1 - st); n > 0; n--) {
1354 int v = bytestream2_get_byteu(&gb);
1360 for (n = nb_samples / 2; n > 0; n--) {
1361 for (channel = 0; channel < avctx->
channels; channel++) {
1362 int v = bytestream2_get_byteu(&gb);
1370 for (n = nb_samples / 2; n > 0; n--) {
1371 for (channel = 0; channel < avctx->
channels; channel++) {
1372 int v = bytestream2_get_byteu(&gb);
1380 for (n = 0; n < nb_samples / 2; n++) {
1381 int v = bytestream2_get_byteu(&gb);
1387 for (n = nb_samples >> (1 - st); n > 0; n--) {
1388 int v = bytestream2_get_byteu(&gb);
1394 for (channel = 0; channel < avctx->
channels; channel++) {
1404 for (n = 0; n < nb_samples / 2; n++) {
1407 byte[0] = bytestream2_get_byteu(&gb);
1409 byte[1] = bytestream2_get_byteu(&gb);
1410 for(channel = 0; channel < avctx->
channels; channel++) {
1413 for(channel = 0; channel < avctx->
channels; channel++) {
1420 for (channel = 0; channel < avctx->
channels; channel++) {
1421 int16_t *smp = samples_p[
channel];
1423 for (n = nb_samples / 2; n > 0; n--) {
1424 int v = bytestream2_get_byteu(&gb);
1430 for (n = nb_samples / 2; n > 0; n--) {
1431 for (channel = 0; channel < avctx->
channels; channel++) {
1432 int v = bytestream2_get_byteu(&gb);
1443 int16_t *out0 = samples_p[0];
1444 int16_t *out1 = samples_p[1];
1445 int samples_per_block = 28 * (3 - avctx->
channels) * 4;
1446 int sample_offset = 0;
1447 int bytes_remaining;
1451 avctx->
channels, sample_offset)) < 0)
1454 sample_offset += samples_per_block;
1459 if (bytes_remaining > 0) {
1465 for (i=0; i<=st; i++) {
1473 for (i=0; i<=st; i++) {
1479 for (n = nb_samples >> (1 - st); n > 0; n--) {
1480 int byte = bytestream2_get_byteu(&gb);
1486 for (n = nb_samples >> (1 - st); n > 0; n--) {
1487 int byte = bytestream2_get_byteu(&gb);
1494 int previous_left_sample, previous_right_sample;
1495 int current_left_sample, current_right_sample;
1496 int next_left_sample, next_right_sample;
1497 int coeff1l, coeff2l, coeff1r, coeff2r;
1498 int shift_left, shift_right;
1506 current_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1507 previous_left_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1508 current_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1509 previous_right_sample =
sign_extend(bytestream2_get_le16u(&gb), 16);
1511 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1512 int byte = bytestream2_get_byteu(&gb);
1518 byte = bytestream2_get_byteu(&gb);
1519 shift_left = 20 - (byte >> 4);
1520 shift_right = 20 - (byte & 0x0F);
1522 for (count2 = 0; count2 < 28; count2++) {
1523 byte = bytestream2_get_byteu(&gb);
1524 next_left_sample =
sign_extend(byte >> 4, 4) * (1 << shift_left);
1525 next_right_sample =
sign_extend(byte, 4) * (1 << shift_right);
1527 next_left_sample = (next_left_sample +
1528 (current_left_sample * coeff1l) +
1529 (previous_left_sample * coeff2l) + 0x80) >> 8;
1530 next_right_sample = (next_right_sample +
1531 (current_right_sample * coeff1r) +
1532 (previous_right_sample * coeff2r) + 0x80) >> 8;
1534 previous_left_sample = current_left_sample;
1536 previous_right_sample = current_right_sample;
1538 *samples++ = current_left_sample;
1539 *samples++ = current_right_sample;
1551 for(channel = 0; channel < avctx->
channels; channel++) {
1552 int byte = bytestream2_get_byteu(&gb);
1555 shift[
channel] = 20 - (byte & 0x0F);
1557 for (count1 = 0; count1 < nb_samples / 2; count1++) {
1560 byte[0] = bytestream2_get_byteu(&gb);
1561 if (st) byte[1] = bytestream2_get_byteu(&gb);
1562 for(i = 4; i >= 0; i-=4) {
1563 for(channel = 0; channel < avctx->
channels; channel++) {
1585 int previous_sample, current_sample, next_sample;
1593 for (channel=0; channel<avctx->
channels; channel++)
1594 offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1595 bytestream2_get_le32(&gb)) +
1598 for (channel=0; channel<avctx->
channels; channel++) {
1600 samplesC = samples_p[
channel];
1603 current_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1604 previous_sample =
sign_extend(bytestream2_get_le16(&gb), 16);
1610 for (count1 = 0; count1 < nb_samples / 28; count1++) {
1611 int byte = bytestream2_get_byte(&gb);
1613 current_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1614 previous_sample =
sign_extend(bytestream2_get_be16(&gb), 16);
1616 for (count2=0; count2<28; count2++)
1617 *samplesC++ =
sign_extend(bytestream2_get_be16(&gb), 16);
1621 shift = 20 - (byte & 0x0F);
1623 for (count2=0; count2<28; count2++) {
1627 byte = bytestream2_get_byte(&gb);
1631 next_sample += (current_sample * coeff1) +
1632 (previous_sample * coeff2);
1635 previous_sample = current_sample;
1636 current_sample = next_sample;
1637 *samplesC++ = current_sample;
1643 }
else if (count != count1) {
1645 count =
FFMAX(count, count1);
1659 for (channel=0; channel<avctx->
channels; channel++) {
1662 for (n = 0; n < 4; n++, s += 32) {
1668 val =
sign_extend(bytestream2_get_le16u(&gb), 16);
1669 shift[n] = 20 - (val & 0x0F);
1673 for (m=2; m<32; m+=2) {
1675 for (n = 0; n < 4; n++, s += 32) {
1677 int byte = bytestream2_get_byteu(&gb);
1679 level =
sign_extend(byte >> 4, 4) * (1 << shift[n]);
1680 pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1684 pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1712 for (n = nb_samples >> 1; n > 0; n--) {
1713 int v = bytestream2_get_byteu(&gb);
1719 if (nb_samples & 1) {
1720 int v = bytestream2_get_byteu(&gb);
1731 for (i = 0; i < avctx->
channels; i++) {
1742 for (n = nb_samples >> (1 - st); n > 0; n--) {
1743 int v = bytestream2_get_byteu(&gb);
1750 for (n = nb_samples >> (1 - st); n > 0; n--) {
1751 int v = bytestream2_get_byteu(&gb);
1761 *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1763 *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1768 for (n = nb_samples >> (1 - st); n > 0; n--) {
1769 int byte = bytestream2_get_byteu(&gb);
1776 for (n = (nb_samples<<st) / 3; n > 0; n--) {
1777 int byte = bytestream2_get_byteu(&gb);
1781 (byte >> 2) & 0x07, 3, 0);
1786 for (n = nb_samples >> (2 - st); n > 0; n--) {
1787 int byte = bytestream2_get_byteu(&gb);
1791 (byte >> 4) & 0x03, 2, 2);
1793 (byte >> 2) & 0x03, 2, 2);
1804 for (n = nb_samples >> (1 - st); n > 0; n--) {
1805 int v = bytestream2_get_byteu(&gb);
1812 for (channel = 0; channel < avctx->
channels; channel++)
1816 for (channel = 0; channel < avctx->
channels; channel++) {
1818 for (n = nb_samples >> 1; n > 0; n--) {
1819 int v = bytestream2_get_byteu(&gb);
1827 int samples_per_block;
1831 samples_per_block = avctx->
extradata[0] / 16;
1832 blocks = nb_samples / avctx->
extradata[0];
1834 samples_per_block = nb_samples / 16;
1838 for (m = 0; m < blocks; m++) {
1839 for (channel = 0; channel < avctx->
channels; channel++) {
1843 samples = samples_p[
channel] + m * 16;
1845 for (i = 0; i < samples_per_block; i++) {
1846 int byte = bytestream2_get_byteu(&gb);
1847 int scale = 1 << (byte >> 4);
1848 int index = byte & 0xf;
1853 for (n = 0; n < 16; n++) {
1859 byte = bytestream2_get_byteu(&gb);
1863 sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1884 #define THP_GET16(g) \ 1886 avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \ 1887 bytestream2_get_le16u(&(g)) : \ 1888 bytestream2_get_be16u(&(g)), 16) 1898 for (i = 0; i < avctx->
channels; i++)
1899 for (n = 0; n < 16; n++)
1902 for (i = 0; i < avctx->
channels; i++)
1903 for (n = 0; n < 16; n++)
1908 for (i = 0; i < avctx->
channels; i++) {
1918 for (ch = 0; ch < avctx->
channels; ch++) {
1919 samples = samples_p[ch];
1922 for (i = 0; i < (nb_samples + 13) / 14; i++) {
1923 int byte = bytestream2_get_byteu(&gb);
1924 int index = (byte >> 4) & 7;
1925 unsigned int exp = byte & 0x0F;
1926 int64_t factor1 = table[ch][index * 2];
1927 int64_t factor2 = table[ch][index * 2 + 1];
1930 for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1936 byte = bytestream2_get_byteu(&gb);
1951 for (channel = 0; channel < avctx->
channels; channel++) {
1955 for (i = 0; i < nb_samples / 28; i++) {
1959 header = bytestream2_get_byteu(&gb);
1963 for (n = 0; n < 28; n++) {
1966 switch (header >> 4) {
1982 byte = bytestream2_get_byteu(&gb);
1988 sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2001 for (channel = 0; channel < avctx->
channels; channel++) {
2002 samples = samples_p[
channel] +
block * nb_samples_per_block;
2006 for (i = 0; i < nb_samples_per_block / 28; i++) {
2009 filter = bytestream2_get_byteu(&gb);
2010 shift = filter & 0xf;
2011 filter = filter >> 4;
2014 flag = bytestream2_get_byteu(&gb);
2017 for (n = 0; n < 28; n++) {
2024 byte = bytestream2_get_byteu(&gb);
2028 scale = scale * (1 << 12);
2058 for (channel = 0; channel < avctx->
channels; channel++) {
2065 control = bytestream2_get_byteu(&gb);
2066 shift = (control >> 4) + 2;
2068 for (n = 0; n < 16; n++) {
2069 int sample = bytestream2_get_byteu(&gb);
2078 for (channel = 0; channel < avctx->
channels; channel++) {
2084 for (n = 0; n < nb_samples * avctx->
channels; n++) {
2085 int v = bytestream2_get_byteu(&gb);
2090 for (n = nb_samples / 2; n > 0; n--) {
2091 for (channel = 0; channel < avctx->
channels; channel++) {
2092 int v = bytestream2_get_byteu(&gb);
2133 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \ 2134 AVCodec ff_ ## name_ ## _decoder = { \ 2136 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 2137 .type = AVMEDIA_TYPE_AUDIO, \ 2139 .priv_data_size = sizeof(ADPCMDecodeContext), \ 2140 .init = adpcm_decode_init, \ 2141 .decode = adpcm_decode_frame, \ 2142 .flush = adpcm_flush, \ 2143 .capabilities = AV_CODEC_CAP_DR1, \ 2144 .sample_fmts = sample_fmts_, \ 2145 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
const struct AVCodec * codec
static const int16_t ea_adpcm_table[]
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
ptrdiff_t const GLvoid * data
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define avpriv_request_sample(...)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static enum AVSampleFormat sample_fmts_s16[]
#define FF_ARRAY_ELEMS(a)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
static int get_sbits(GetBitContext *s, int n)
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
enum AVSampleFormat sample_fmt
audio sample format
The exact code depends on how similar the blocks are and how related they are to the block
static const int offsets[]
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void adpcm_flush(AVCodecContext *avctx)
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define u(width, name, range_min, range_max)
static int get_bits_count(const GetBitContext *s)
static const int8_t mtf_index_table[16]
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
bitstream reader API header.
static const uint8_t header[24]
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
static const uint16_t table[]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static const int8_t xa_adpcm_table[5][2]
const uint16_t ff_adpcm_afc_coeffs[2][16]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
ADPCM encoder/decoder common header.
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
const int8_t *const ff_adpcm_index_tables[4]
const int16_t ff_adpcm_step_table[89]
This is the step table.
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
const int8_t ff_adpcm_index_table[16]
static const int8_t swf_index_tables[4][16]
const int16_t ff_adpcm_mtaf_stepsize[32][16]
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
static void predictor(uint8_t *src, ptrdiff_t size)
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
int vqa_version
VQA version.
static const uint8_t ff_adpcm_ima_block_sizes[4]
static enum AVSampleFormat sample_fmts_s16p[]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
const int16_t ff_adpcm_oki_step_table[49]
static const float pred[4]
static const uint8_t ff_adpcm_ima_block_samples[4]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_always_inline int bytestream2_tell(GetByteContext *g)
const int16_t ff_adpcm_AdaptationTable[]
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
main external API structure.
const int16_t ff_adpcm_ima_cunning_step_table[61]
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
#define DK3_GET_NEXT_NIBBLE()
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
ADPCMChannelStatus status[14]
static av_const int sign_extend(int val, unsigned bits)
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
static unsigned int get_bits_le(GetBitContext *s, int n)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const int8_t ff_adpcm_ima_cunning_index_table[9]
const int8_t ff_adpcm_yamaha_difflookup[]
common internal api header.
const int16_t ff_adpcm_yamaha_indexscale[]
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
channel
Use these values when setting the channel map with ebur128_set_channel().
static const int8_t zork_index_table[8]
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
int channels
number of audio channels
static const double coeff[2][5]
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
static float add(float src0, float src1)
static enum AVSampleFormat sample_fmts_both[]
Filter the word “frame” indicates either a video frame or a group of audio samples
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static double val(void *priv, double ch)
This structure stores compressed data.
int nb_samples
number of audio samples (per channel) described by this frame
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)