42 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
43 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
44 #elif CONFIG_VP7_DECODER
45 #define VPX(vp7, f) vp7_ ## f
46 #else // CONFIG_VP8_DECODER
47 #define VPX(vp7, f) vp8_ ## f
103 #if CONFIG_VP8_DECODER
152 for (i = 0; i < 5; i++)
173 #if CONFIG_VP8_VAAPI_HWACCEL
176 #if CONFIG_VP8_NVDEC_HWACCEL
270 for (i = 0; i < 4; i++)
273 for (i = 0; i < 4; i++)
277 for (i = 0; i < 3; i++)
286 for (i = 0; i < 4; i++) {
320 if (buf_size - size < 0)
368 for (i = 0; i < 4; i++) {
421 for (i = 0; i < 4; i++)
422 for (j = 0; j < 16; j++)
432 for (i = 0; i < 4; i++)
433 for (j = 0; j < 8; j++)
434 for (k = 0; k < 3; k++)
443 #define VP7_MVC_SIZE 17
444 #define VP8_MVC_SIZE 19
453 for (i = 0; i < 4; i++)
456 for (i = 0; i < 3; i++)
460 for (i = 0; i < 2; i++)
461 for (j = 0; j < mvc_size; j++)
481 for (j = 1; j < 3; j++) {
482 for (i = 0; i < height / 2; i++)
489 const uint8_t *src, ptrdiff_t src_linesize,
494 for (j = 0; j <
height; j++) {
495 for (i = 0; i <
width; i++) {
496 uint8_t y = src[j * src_linesize + i];
497 dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
508 if (!s->
keyframe && (alpha || beta)) {
535 width, height, alpha, beta);
544 int part1_size, hscale, vscale, i, j, ret;
552 s->
profile = (buf[0] >> 1) & 7;
560 part1_size =
AV_RL24(buf) >> 4;
562 if (buf_size < 4 - s->
profile + part1_size) {
576 buf_size -= part1_size;
584 if (hscale || vscale)
593 for (i = 0; i < 2; i++)
605 for (i = 0; i < 4; i++) {
610 for (j = 0; j < 3; j++)
615 for (j = 0; j < 4; j++)
673 for (i = 1; i < 16; i++)
700 int header_size, hscale, vscale, ret;
712 header_size =
AV_RL24(buf) >> 5;
728 if (header_size > buf_size - 7 * s->
keyframe) {
734 if (
AV_RL24(buf) != 0x2a019d) {
736 "Invalid start code 0x%x\n",
AV_RL24(buf));
739 width =
AV_RL16(buf + 3) & 0x3fff;
740 height =
AV_RL16(buf + 5) & 0x3fff;
741 hscale = buf[4] >> 6;
742 vscale = buf[6] >> 6;
746 if (hscale || vscale)
765 buf_size -= header_size;
840 dst->
x = av_clip(src->
x, av_clip(s->
mv_min.
x, INT16_MIN, INT16_MAX),
841 av_clip(s->
mv_max.
x, INT16_MIN, INT16_MAX));
842 dst->
y = av_clip(src->
y, av_clip(s->
mv_min.
y, INT16_MIN, INT16_MAX),
843 av_clip(s->
mv_max.
y, INT16_MIN, INT16_MAX));
856 for (i = 0; i < 3; i++)
858 for (i = (vp7 ? 7 : 9); i > 3; i--)
913 const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
923 top_mv = top_mb->
bmv;
939 for (n = 0; n < num; n++) {
941 uint32_t left, above;
945 left =
AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
947 left =
AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
949 above =
AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
951 above =
AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
988 int xoffset,
int yoffset,
int boundary,
989 int *edge_x,
int *edge_y)
991 int vwidth = mb_width + 1;
992 int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
993 if (
new < boundary ||
new % vwidth == vwidth - 1)
995 *edge_y =
new / vwidth;
996 *edge_x =
new % vwidth;
1007 int mb_x,
int mb_y,
int layout)
1010 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
1011 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1035 if (
AV_RN32A(&near_mv[CNT_NEAREST])) {
1036 if (mv ==
AV_RN32A(&near_mv[CNT_NEAREST])) {
1038 }
else if (
AV_RN32A(&near_mv[CNT_NEAR])) {
1039 if (mv !=
AV_RN32A(&near_mv[CNT_NEAR]))
1047 AV_WN32A(&near_mv[CNT_NEAREST], mv);
1068 if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
1069 AV_WN32A(&mb->
mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 :
AV_RN32A(&near_mv[CNT_NEAREST]));
1079 mb->
bmv[0] = mb->
mv;
1082 mb->
mv = near_mv[CNT_NEAR];
1083 mb->
bmv[0] = mb->
mv;
1086 mb->
mv = near_mv[CNT_NEAREST];
1087 mb->
bmv[0] = mb->
mv;
1092 mb->
bmv[0] = mb->
mv;
1098 int mb_x,
int mb_y,
int layout)
1103 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1104 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1113 mb_edge[0] = mb + 2;
1114 mb_edge[2] = mb + 1;
1125 #define MV_EDGE_CHECK(n) \
1127 VP8Macroblock *edge = mb_edge[n]; \
1128 int edge_ref = edge->ref_frame; \
1129 if (edge_ref != VP56_FRAME_CURRENT) { \
1130 uint32_t mv = AV_RN32A(&edge->mv); \
1132 if (cur_sign_bias != sign_bias[edge_ref]) { \
1135 mv = ((mv & 0x7fff7fff) + \
1136 0x00010001) ^ (mv & 0x80008000); \
1138 if (!n || mv != AV_RN32A(&near_mv[idx])) \
1139 AV_WN32A(&near_mv[++idx], mv); \
1140 cnt[idx] += 1 + (n != 2); \
1142 cnt[CNT_ZERO] += 1 + (n != 2); \
1155 if (cnt[CNT_SPLITMV] &&
1156 AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) ==
AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1157 cnt[CNT_NEAREST] += 1;
1160 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1162 FFSWAP(
VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1168 clamp_mv(mv_bounds, &mb->
mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1179 mb->
bmv[0] = mb->
mv;
1182 clamp_mv(mv_bounds, &mb->
mv, &near_mv[CNT_NEAR]);
1183 mb->
bmv[0] = mb->
mv;
1186 clamp_mv(mv_bounds, &mb->
mv, &near_mv[CNT_NEAREST]);
1187 mb->
bmv[0] = mb->
mv;
1192 mb->
bmv[0] = mb->
mv;
1198 int mb_x,
int keyframe,
int layout)
1214 for (y = 0; y < 4; y++) {
1215 for (x = 0; x < 4; x++) {
1219 left[y] = top[x] = *intra4x4;
1225 for (i = 0; i < 16; i++)
1237 static const char *
const vp7_feature_name[] = {
"q-index",
1239 "partial-golden-update",
1244 for (i = 0; i < 4; i++) {
1250 "Feature %s present in macroblock (value 0x%x)\n",
1259 *segment = ref ? *ref : *segment;
1326 int i,
uint8_t *token_prob, int16_t qmul[2],
1327 const uint8_t scan[16],
int vp7)
1341 token_prob = probs[i][0];
1349 token_prob = probs[i + 1][1];
1369 int cat = (a << 1) + b;
1370 coeff = 3 + (8 <<
cat);
1374 token_prob = probs[i + 1][2];
1386 int16_t
dc = block[0];
1395 block[0] = pred[0] =
dc;
1400 block[0] = pred[0] =
dc;
1414 token_prob, qmul, scan,
IS_VP7);
1417 #ifndef vp8_decode_block_coeffs_internal
1445 int i,
int zero_nhood, int16_t qmul[2],
1446 const uint8_t scan[16],
int vp7)
1448 uint8_t *token_prob = probs[i][zero_nhood];
1452 token_prob, qmul, scan)
1462 int i, x, y, luma_start = 0, luma_ctx = 3;
1463 int nnz_pred, nnz, nnz_total = 0;
1468 nnz_pred = t_nnz[8] + l_nnz[8];
1474 l_nnz[8] = t_nnz[8] = !!nnz;
1494 for (y = 0; y < 4; y++)
1495 for (x = 0; x < 4; x++) {
1496 nnz_pred = l_nnz[y] + t_nnz[x];
1499 luma_start, nnz_pred,
1505 t_nnz[x] = l_nnz[y] = !!nnz;
1512 for (i = 4; i < 6; i++)
1513 for (y = 0; y < 2; y++)
1514 for (x = 0; x < 2; x++) {
1515 nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1521 t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1535 ptrdiff_t linesize, ptrdiff_t uvlinesize,
int simple)
1537 AV_COPY128(top_border, src_y + 15 * linesize);
1539 AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1540 AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1546 uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize,
int mb_x,
1547 int mb_y,
int mb_width,
int simple,
int xchg)
1549 uint8_t *top_border_m1 = top_border - 32;
1551 src_cb -= uvlinesize;
1552 src_cr -= uvlinesize;
1554 #define XCHG(a, b, xchg) \
1562 XCHG(top_border_m1 + 8, src_y - 8, xchg);
1563 XCHG(top_border, src_y, xchg);
1564 XCHG(top_border + 8, src_y + 8, 1);
1565 if (mb_x < mb_width - 1)
1566 XCHG(top_border + 32, src_y + 16, 1);
1570 if (!simple || !mb_y) {
1571 XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1572 XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1573 XCHG(top_border + 16, src_cb, 1);
1574 XCHG(top_border + 24, src_cr, 1);
1624 int *copy_buf,
int vp7)
1628 if (!mb_x && mb_y) {
1662 int x, y,
mode, nnz;
1678 const uint8_t lo = is_vp7 ? 128 : 127;
1679 const uint8_t hi = is_vp7 ? 128 : 129;
1680 uint8_t tr_top[4] = { lo, lo, lo, lo };
1688 if (mb_y && mb_x == s->
mb_width - 1) {
1689 tr = tr_right[-1] * 0x01010101
u;
1696 for (y = 0; y < 4; y++) {
1698 for (x = 0; x < 4; x++) {
1704 if ((y == 0 || x == 3) && mb_y == 0) {
1707 topright = tr_right;
1710 mb_y + y, ©, is_vp7);
1712 dst = copy_dst + 12;
1716 AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1722 copy_dst[3] = ptr[4 * x - s->
linesize - 1];
1731 copy_dst[11] = ptr[4 * x - 1];
1732 copy_dst[19] = ptr[4 * x + s->
linesize - 1];
1733 copy_dst[27] = ptr[4 * x + s->
linesize * 2 - 1];
1734 copy_dst[35] = ptr[4 * x + s->
linesize * 3 - 1];
1763 mb_x, mb_y, is_vp7);
1774 { 0, 1, 2, 1, 2, 1, 2, 1 },
1776 { 0, 3, 5, 3, 5, 3, 5, 3 },
1777 { 0, 2, 3, 2, 3, 2, 3, 2 },
1799 int x_off,
int y_off,
int block_w,
int block_h,
1806 ptrdiff_t src_linesize = linesize;
1808 int mx = (mv->
x * 2) & 7, mx_idx = subpel_idx[0][mx];
1809 int my = (mv->
y * 2) & 7, my_idx = subpel_idx[0][my];
1811 x_off += mv->
x >> 2;
1812 y_off += mv->
y >> 2;
1816 src += y_off * linesize + x_off;
1817 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1818 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1820 src - my_idx * linesize - mx_idx,
1822 block_w + subpel_idx[1][mx],
1823 block_h + subpel_idx[1][my],
1824 x_off - mx_idx, y_off - my_idx,
1829 mc_func[my_idx][mx_idx](dst, linesize,
src, src_linesize, block_h, mx, my);
1832 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1833 linesize, block_h, 0, 0);
1857 int x_off,
int y_off,
int block_w,
int block_h,
1864 int mx = mv->
x & 7, mx_idx = subpel_idx[0][mx];
1865 int my = mv->
y & 7, my_idx = subpel_idx[0][my];
1867 x_off += mv->
x >> 3;
1868 y_off += mv->
y >> 3;
1871 src1 += y_off * linesize + x_off;
1872 src2 += y_off * linesize + x_off;
1874 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1875 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1877 src1 - my_idx * linesize - mx_idx,
1879 block_w + subpel_idx[1][mx],
1880 block_h + subpel_idx[1][my],
1881 x_off - mx_idx, y_off - my_idx, width, height);
1886 src2 - my_idx * linesize - mx_idx,
1887 EDGE_EMU_LINESIZE, linesize,
1888 block_w + subpel_idx[1][mx],
1889 block_h + subpel_idx[1][my],
1890 x_off - mx_idx, y_off - my_idx, width, height);
1892 mc_func[my_idx][mx_idx](dst2, linesize, src2,
EDGE_EMU_LINESIZE, block_h, mx, my);
1894 mc_func[my_idx][mx_idx](dst1, linesize,
src1, linesize, block_h, mx, my);
1895 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1899 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1900 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1907 int bx_off,
int by_off,
int block_w,
int block_h,
1914 ref_frame, mv, x_off + bx_off, y_off + by_off,
1915 block_w, block_h, width, height, s->
linesize,
1934 dst[2] + by_off * s->
uvlinesize + bx_off, ref_frame,
1935 &uvmv, x_off + bx_off, y_off + by_off,
1936 block_w, block_h, width, height, s->
uvlinesize,
1947 if (s->
ref_count[ref - 1] > (mb_xy >> 5)) {
1948 int x_off = mb_x << 4, y_off = mb_y << 4;
1949 int mx = (mb->
mv.
x >> 2) + x_off + 8;
1950 int my = (mb->
mv.
y >> 2) + y_off;
1952 int off = mx + (my + (mb_x & 3) * 4) * s->
linesize + 64;
1957 off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->
uvlinesize + 64;
1969 int x_off = mb_x << 4, y_off = mb_y << 4;
1977 0, 0, 16, 16, width,
height, &mb->
mv);
1984 for (y = 0; y < 4; y++) {
1985 for (x = 0; x < 4; x++) {
1987 ref, &bmv[4 * y + x],
1988 4 * x + x_off, 4 * y + y_off, 4, 4,
1999 for (y = 0; y < 2; y++) {
2000 for (x = 0; x < 2; x++) {
2001 uvmv.
x = mb->
bmv[2 * y * 4 + 2 * x ].
x +
2002 mb->
bmv[2 * y * 4 + 2 * x + 1].
x +
2003 mb->
bmv[(2 * y + 1) * 4 + 2 * x ].x +
2004 mb->
bmv[(2 * y + 1) * 4 + 2 * x + 1].
x;
2005 uvmv.
y = mb->
bmv[2 * y * 4 + 2 * x ].
y +
2006 mb->
bmv[2 * y * 4 + 2 * x + 1].
y +
2007 mb->
bmv[(2 * y + 1) * 4 + 2 * x ].y +
2008 mb->
bmv[(2 * y + 1) * 4 + 2 * x + 1].
y;
2017 &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
2026 0, 0, 16, 8, width,
height, &bmv[0]);
2028 0, 8, 16, 8, width,
height, &bmv[1]);
2032 0, 0, 8, 16, width,
height, &bmv[0]);
2034 8, 0, 8, 16, width,
height, &bmv[1]);
2038 0, 0, 8, 8, width,
height, &bmv[0]);
2040 8, 0, 8, 8, width,
height, &bmv[1]);
2042 0, 8, 8, 8, width,
height, &bmv[2]);
2044 8, 8, 8, 8, width,
height, &bmv[3]);
2056 for (y = 0; y < 4; y++) {
2059 if (nnz4 & ~0x01010101) {
2060 for (x = 0; x < 4; x++) {
2081 for (ch = 0; ch < 2; ch++) {
2085 if (nnz4 & ~0x01010101) {
2086 for (y = 0; y < 2; y++) {
2087 for (x = 0; x < 2; x++) {
2090 td->
block[4 + ch][(y << 1) + x],
2094 td->
block[4 + ch][(y << 1) + x],
2098 goto chroma_idct_end;
2115 int interior_limit, filter_level;
2129 filter_level = av_clip_uintp2(filter_level, 6);
2131 interior_limit = filter_level;
2136 interior_limit =
FFMAX(interior_limit, 1);
2146 int mb_x,
int mb_y,
int is_vp7)
2148 int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2154 static const uint8_t hev_thresh_lut[2][64] = {
2155 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2156 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2157 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2159 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2160 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2161 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2169 bedge_lim_y = filter_level;
2170 bedge_lim_uv = filter_level * 2;
2171 mbedge_lim = filter_level + 2;
2174 bedge_lim_uv = filter_level * 2 + inner_limit;
2175 mbedge_lim = bedge_lim_y + 4;
2178 hev_thresh = hev_thresh_lut[s->
keyframe][filter_level];
2182 mbedge_lim, inner_limit, hev_thresh);
2184 mbedge_lim, inner_limit, hev_thresh);
2187 #define H_LOOP_FILTER_16Y_INNER(cond) \
2188 if (cond && inner_filter) { \
2189 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2190 bedge_lim_y, inner_limit, \
2192 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2193 bedge_lim_y, inner_limit, \
2195 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2196 bedge_lim_y, inner_limit, \
2198 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2199 uvlinesize, bedge_lim_uv, \
2200 inner_limit, hev_thresh); \
2207 mbedge_lim, inner_limit, hev_thresh);
2209 mbedge_lim, inner_limit, hev_thresh);
2214 linesize, bedge_lim_y,
2215 inner_limit, hev_thresh);
2217 linesize, bedge_lim_y,
2218 inner_limit, hev_thresh);
2220 linesize, bedge_lim_y,
2221 inner_limit, hev_thresh);
2223 dst[2] + 4 * uvlinesize,
2224 uvlinesize, bedge_lim_uv,
2225 inner_limit, hev_thresh);
2235 int mbedge_lim, bedge_lim;
2244 bedge_lim = 2 * filter_level + inner_limit;
2245 mbedge_lim = bedge_lim + 4;
2264 #define MARGIN (16 << 2)
2274 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
2276 ((s->
mb_width + 1) * (mb_y + 1) + 1);
2283 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
2288 prev_frame && prev_frame->
seg_map ?
2311 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2313 int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2314 if (atomic_load(&otd->thread_mb_pos) < tmp) { \
2315 pthread_mutex_lock(&otd->lock); \
2316 atomic_store(&td->wait_mb_pos, tmp); \
2318 if (atomic_load(&otd->thread_mb_pos) >= tmp) \
2320 pthread_cond_wait(&otd->cond, &otd->lock); \
2322 atomic_store(&td->wait_mb_pos, INT_MAX); \
2323 pthread_mutex_unlock(&otd->lock); \
2327 #define update_pos(td, mb_y, mb_x) \
2329 int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2330 int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2332 int is_null = !next_td || !prev_td; \
2333 int pos_check = (is_null) ? 1 : \
2334 (next_td != td && pos >= atomic_load(&next_td->wait_mb_pos)) || \
2335 (prev_td != td && pos >= atomic_load(&prev_td->wait_mb_pos)); \
2336 atomic_store(&td->thread_mb_pos, pos); \
2337 if (sliced_threading && pos_check) { \
2338 pthread_mutex_lock(&td->lock); \
2339 pthread_cond_broadcast(&td->cond); \
2340 pthread_mutex_unlock(&td->lock); \
2344 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2345 #define update_pos(td, mb_y, mb_x) while(0)
2349 int jobnr,
int threadnr,
int is_vp7)
2354 int mb_x, mb_xy = mb_y * s->
mb_width;
2371 prev_td = &s->
thread_data[(jobnr + num_jobs - 1) % num_jobs];
2375 next_td = &s->
thread_data[(jobnr + 1) % num_jobs];
2385 memset(mb - 1, 0,
sizeof(*mb));
2389 if (!is_vp7 || mb_y == 0)
2395 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
2399 if (prev_td != td) {
2400 if (threadnr != 0) {
2402 mb_x + (is_vp7 ? 2 : 1),
2403 mb_y - (is_vp7 ? 2 : 1));
2406 mb_x + (is_vp7 ? 2 : 1) + s->
mb_width + 3,
2407 mb_y - (is_vp7 ? 2 : 1));
2414 dst[2] - dst[1], 2);
2418 prev_frame && prev_frame->seg_map ?
2419 prev_frame->seg_map->data + mb_xy :
NULL, 0, is_vp7);
2450 if (s->
deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2477 int jobnr,
int threadnr)
2483 int jobnr,
int threadnr)
2489 int jobnr,
int threadnr,
int is_vp7)
2511 prev_td = &s->
thread_data[(jobnr + num_jobs - 1) % num_jobs];
2515 next_td = &s->
thread_data[(jobnr + 1) % num_jobs];
2517 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb++) {
2521 (mb_x + 1) + (s->
mb_width + 3), mb_y - 1);
2526 if (num_jobs == 1) {
2538 filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2548 int jobnr,
int threadnr)
2554 int jobnr,
int threadnr)
2561 int threadnr,
int is_vp7)
2573 for (mb_y = jobnr; mb_y < s->
mb_height; mb_y += num_jobs) {
2595 int jobnr,
int threadnr)
2601 int jobnr,
int threadnr)
2611 int ret, i, referenced, num_jobs;
2651 for (i = 0; i < 5; i++)
2653 &s->
frames[i] != prev_frame &&
2676 "Discarding interframe without a prior keyframe!\n");
2681 curframe->tf.f->key_frame = s->
keyframe;
2721 s->
linesize = curframe->tf.f->linesize[0];
2796 #if CONFIG_VP7_DECODER
2845 if (CONFIG_VP7_DECODER && is_vp7) {
2850 }
else if (CONFIG_VP8_DECODER && !is_vp7) {
2868 #if CONFIG_VP7_DECODER
2880 #if CONFIG_VP8_DECODER
2897 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2913 s->
prob[0] = s_src->
prob[!s_src->update_probabilities];
2919 if (s_src->frames[i].tf.f->buf[0]) {
2920 int ret = vp8_ref_frame(s, &s->
frames[i], &s_src->frames[i]);
2926 s->
framep[0] = REBASE(s_src->next_framep[0]);
2927 s->
framep[1] = REBASE(s_src->next_framep[1]);
2928 s->
framep[2] = REBASE(s_src->next_framep[2]);
2929 s->
framep[3] = REBASE(s_src->next_framep[3]);
2936 #if CONFIG_VP7_DECODER
2943 .
init = vp7_decode_init,
2945 .
decode = vp7_decode_frame,
2951 #if CONFIG_VP8_DECODER
2967 #if CONFIG_VP8_VAAPI_HWACCEL
2970 #if CONFIG_VP8_NVDEC_HWACCEL
VP8Macroblock * macroblocks
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
static const uint8_t vp8_submv_prob[5][3]
static const uint16_t vp7_ydc_qlookup[]
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
const struct AVCodec * codec
discard all frames except keyframes
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const uint8_t vp7_mv_default_prob[2][17]
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
static void copy(const float *p1, float *p2, const int length)
(only used in prediction) no split MVs
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
static float alpha(float a)
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
This structure describes decoded (raw) audio or video data.
#define atomic_store(object, desired)
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static const uint8_t vp7_pred4x4_mode[]
int8_t sign_bias[4]
one state [0, 1] per ref frame type
#define HWACCEL_NVDEC(codec)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
#define AV_LOG_WARNING
Something somehow does not look correct.
static int init_thread_copy(AVCodecContext *avctx)
#define VP7_MV_PRED_COUNT
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
uint8_t feature_value[4][4]
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
static av_cold int init(AVCodecContext *avctx)
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
uint8_t * intra4x4_pred_mode_top
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
static void vp8_decode_flush(AVCodecContext *avctx)
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
static const int8_t vp8_pred8x8c_tree[3][2]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static const uint16_t vp7_y2dc_qlookup[]
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: 4-log2(width) second dimension: 0 if no vertical interpolation is needed; 1 4-tap ve...
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
static const uint8_t vp8_pred8x8c_prob_inter[3]
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
static const uint8_t vp8_mbsplits[5][16]
enum AVDiscard skip_frame
Skip decoding for selected frames.
static const int8_t vp8_pred16x16_tree_intra[4][2]
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
uint8_t intra4x4_pred_mode_top[4]
static enum AVPixelFormat get_pixel_format(VP8Context *s)
static av_always_inline void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
static int vp7_update_dimensions(VP8Context *s, int width, int height)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int fade_present
Fade bit present in bitstream (VP7)
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Multithreading support functions.
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define u(width, name, range_min, range_max)
static const uint8_t vp8_mv_update_prob[2][19]
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
int update_last
update VP56_FRAME_PREVIOUS with the current one
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
static void parse_segment_info(VP8Context *s)
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
vp8_mc_func put_pixels_tab[3][3][3]
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
uint8_t feature_index_prob[4][3]
uint8_t intra4x4_pred_mode_mb[16]
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
uint8_t intra4x4_pred_mode_left[4]
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
uint8_t colorspace
0 is the only value allowed (meaning bt601)
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
static const uint8_t vp8_mbsplit_count[4]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const int8_t vp8_coeff_band_indexes[8][10]
static const uint8_t vp8_pred4x4_mode[]
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
static const uint8_t vp8_dct_cat2_prob[]
static const uint8_t vp8_mv_default_prob[2][19]
#define atomic_load(object)
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
static const int sizes[][2]
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
const uint8_t ff_zigzag_scan[16+1]
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static const uint8_t vp8_mbfirstidx[4][16]
struct VP8Context::@147 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
#define EDGE_EMU_LINESIZE
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
const char * name
Name of the codec implementation.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
VP8Macroblock * macroblocks_base
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
static av_always_inline void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
static const uint8_t vp8_pred4x4_prob_inter[9]
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
static const int vp7_mode_contexts[31][4]
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static void vp7_get_quants(VP8Context *s)
struct VP8Context::@150 coder_state_at_header_end
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static const uint8_t vp8_pred16x16_prob_inter[4]
useful rectangle filling function
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
#define FF_THREAD_FRAME
Decode more than one frame at once.
#define H_LOOP_FILTER_16Y_INNER(cond)
uint8_t feature_present_prob[4]
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
uint8_t fullrange
whether we can skip clamping in dsp functions
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
int width
picture width / height.
struct VP8Context::@149 lf_delta
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_cold int vp8_init_frames(VP8Context *s)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
int16_t luma_dc_qmul[2]
luma dc-only block quant
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
uint8_t(* top_border)[16+8+8]
static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
static const int8_t vp7_feature_index_tree[4][2]
static const uint8_t vp7_feature_value_size[2][4]
#define vp56_rac_get_prob
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
HW acceleration through CUDA.
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
the normal 2^n-1 "JPEG" YUV ranges
static const float pred[4]
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
static const int8_t mv[256][2]
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
int coeff_partition_size[8]
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
struct VP8Context::@151 prob[2]
These are all of the updatable probabilities for binary decisions.
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
main external API structure.
static av_always_inline unsigned int vp56_rac_renorm(VP56RangeCoder *c)
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
uint8_t * data
The data buffer.
VP8Frame * next_framep[4]
int mb_layout
This describes the macroblock memory layout.
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
static const uint8_t vp8_mbsplit_prob[3]
VP56RangeCoder c
header context, includes mb modes and motion vectors
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
VP56RangeCoder coeff_partition[8]
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static const int8_t vp8_pred16x16_tree_inter[4][2]
AVBufferRef * hwaccel_priv_buf
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
static int vp8_update_dimensions(VP8Context *s, int width, int height)
struct VP8Context::@146 filter
VP8FilterStrength * filter_strength
enum AVColorSpace colorspace
YUV colorspace type.
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
static void vp78_update_probability_tables(VP8Context *s)
static const int8_t vp8_pred4x4_tree[9][2]
uint8_t enabled
whether each mb can have a different strength based on mode/ref
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
static const uint8_t subpel_idx[3][8]
static void update_refs(VP8Context *s)
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
static const uint8_t vp8_coeff_band[16]
int allocate_progress
Whether to allocate progress for frame threading.
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
static const uint8_t vp8_pred16x16_prob_intra[4]
int header_partition_size
uint8_t update_feature_data
static enum AVPixelFormat pix_fmts[]
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
#define HWACCEL_VAAPI(codec)
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
void * hwaccel_picture_private
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
uint8_t feature_enabled[4]
Macroblock features (VP7)
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
2 8x16 blocks (horizontal)
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
the normal 219*2^(n-8) "MPEG" YUV ranges
struct VP8Context::@145 segmentation
Base parameters for segmentation, i.e.
struct VP8Context::@148 quant
discard all non reference
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
common internal api header.
static void vp8_get_quants(VP8Context *s)
static int ref[MAX_W *MAX_W]
#define LOCAL_ALIGNED(a, t, v,...)
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
static const SiprModeParam modes[MODE_COUNT]
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
#define update_pos(td, mb_y, mb_x)
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
struct AVCodecInternal * internal
Private context used for internal data.
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
static const double coeff[2][5]
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
#define atomic_init(obj, value)
int8_t filter_level[4]
base loop filter level
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static const int vp8_mode_contexts[6][4]
static const uint8_t vp8_dct_cat1_prob[]
#define FFSWAP(type, a, b)
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
void ff_vp8dsp_init(VP8DSPContext *c)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
AVPixelFormat
Pixel format.
static void vp78_reset_probability_tables(VP8Context *s)
This structure stores compressed data.
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
const uint8_t *const ff_vp8_dct_cat_prob[]
mode
Use these values in ebur128_init (or'ed).
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
VP8ThreadData * thread_data
enum AVPixelFormat pix_fmt
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
static const uint16_t vp7_y2ac_qlookup[]
static const uint8_t vp7_submv_prob[3]
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)