54 int16_t *
block,
int n,
int qscale)
56 int i,
level, nCoeffs;
57 const uint16_t *quant_matrix;
64 for(i=1;i<=nCoeffs;i++) {
70 level = (int)(level * qscale * quant_matrix[j]) >> 3;
71 level = (level - 1) | 1;
74 level = (int)(level * qscale * quant_matrix[j]) >> 3;
75 level = (level - 1) | 1;
83 int16_t *
block,
int n,
int qscale)
85 int i,
level, nCoeffs;
86 const uint16_t *quant_matrix;
91 for(i=0; i<=nCoeffs; i++) {
97 level = (((level << 1) + 1) * qscale *
98 ((int) (quant_matrix[j]))) >> 4;
99 level = (level - 1) | 1;
102 level = (((level << 1) + 1) * qscale *
103 ((int) (quant_matrix[j]))) >> 4;
104 level = (level - 1) | 1;
112 int16_t *
block,
int n,
int qscale)
114 int i,
level, nCoeffs;
115 const uint16_t *quant_matrix;
125 for(i=1;i<=nCoeffs;i++) {
131 level = (int)(level * qscale * quant_matrix[j]) >> 4;
134 level = (int)(level * qscale * quant_matrix[j]) >> 4;
142 int16_t *
block,
int n,
int qscale)
144 int i,
level, nCoeffs;
145 const uint16_t *quant_matrix;
157 for(i=1;i<=nCoeffs;i++) {
163 level = (int)(level * qscale * quant_matrix[j]) >> 4;
166 level = (int)(level * qscale * quant_matrix[j]) >> 4;
176 int16_t *
block,
int n,
int qscale)
178 int i,
level, nCoeffs;
179 const uint16_t *quant_matrix;
189 for(i=0; i<=nCoeffs; i++) {
195 level = (((level << 1) + 1) * qscale *
196 ((int) (quant_matrix[j]))) >> 5;
199 level = (((level << 1) + 1) * qscale *
200 ((int) (quant_matrix[j]))) >> 5;
210 int16_t *
block,
int n,
int qscale)
212 int i,
level, qmul, qadd;
221 qadd = (qscale - 1) | 1;
230 for(i=1; i<=nCoeffs; i++) {
234 level = level * qmul - qadd;
236 level = level * qmul + qadd;
244 int16_t *
block,
int n,
int qscale)
246 int i,
level, qmul, qadd;
251 qadd = (qscale - 1) | 1;
256 for(i=0; i<=nCoeffs; i++) {
260 level = level * qmul - qadd;
262 level = level * qmul + qadd;
273 memset(dst + h*linesize, 128, 16);
279 memset(dst + h*linesize, 128, 8);
293 for (i=0; i<4; i++) {
313 if (HAVE_INTRINSICS_NEON)
360 int yc_size = y_size + 2 * c_size;
380 2 * 64 *
sizeof(
int),
fail)
386 for (i = 0; i < 12; i++) {
397 yc_size *
sizeof(int16_t) * 16,
fail);
430 #define COPY(a) bak->a = src->a
431 COPY(sc.edge_emu_buffer);
434 COPY(sc.rd_scratchpad);
435 COPY(sc.b_scratchpad);
436 COPY(sc.obmc_scratchpad);
464 for (i = 0; i < 12; i++) {
475 "scratch buffers.\n");
504 if (
s1->context_initialized){
536 if (
s1->picture &&
s1->picture[i].f->buf[0] &&
541 #define UPDATE_PICTURE(pic)\
543 ff_mpeg_unref_picture(s->avctx, &s->pic);\
544 if (s1->pic.f && s1->pic.f->buf[0])\
545 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
547 ret = ff_update_picture_tables(&s->pic, &s1->pic);\
556 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
557 ((pic && pic >= old_ctx->picture && \
558 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
559 &new_ctx->picture[pic - old_ctx->picture] : NULL)
572 (
char *) &
s1->pb_field_time +
sizeof(
s1->pb_field_time) -
573 (
char *) &
s1->last_time_base);
583 if (
s1->bitstream_buffer) {
584 if (
s1->bitstream_buffer_size +
588 s1->allocated_bitstream_buffer_size);
596 s1->bitstream_buffer_size);
605 &s->
sc,
s1->linesize) < 0) {
607 "scratch buffers.\n");
612 "be allocated due to unknown size.\n");
617 (
char *) &
s1->rtp_mode - (
char *) &
s1->progressive_sequence);
619 if (!
s1->first_field) {
621 if (
s1->current_picture_ptr)
679 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
703 yc_size = y_size + 2 * c_size;
737 mb_array_size *
sizeof(
float),
fail);
739 mb_array_size *
sizeof(
float),
fail);
746 for (i = 0; i < 2; i++) {
748 for (j = 0; j < 2; j++) {
749 for (k = 0; k < 2; k++) {
752 mv_table_size * 2 *
sizeof(int16_t),
781 for (i = 0; i < yc_size; i++)
843 for (i = 0; i < 2; i++) {
844 for (j = 0; j < 2; j++) {
845 for (k = 0; k < 2; k++) {
880 int nb_slices = (HAVE_THREADS &&
896 "decoding to AV_PIX_FMT_NONE is not supported.\n");
907 " reducing to %d\n", nb_slices, max_slices);
908 nb_slices = max_slices;
954 for (i = 0; i < nb_slices; i++) {
963 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
965 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1004 for (i = 0; i < 2; i++) {
1005 for (j = 0; j < 2; j++) {
1006 for (k = 0; k < 2; k++) {
1082 if (nb_slices > 1) {
1083 for (i = 0; i < nb_slices; i++) {
1094 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
1096 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1171 int i, h_chroma_shift, v_chroma_shift;
1175 for(i=0; i<frame->
height; i++)
1179 0x80, AV_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1181 0x80, AV_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1278 ff_dlog(s->
avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1288 int h_chroma_shift, v_chroma_shift;
1290 &h_chroma_shift, &v_chroma_shift);
1293 "allocating dummy last picture for B frame\n");
1296 "warning: first frame is no keyframe\n");
1299 "allocate dummy last picture for field based first keyframe\n");
1323 for(i=0; i<avctx->
height; i++)
1325 0x80, avctx->
width);
1329 0x80, AV_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1331 0x80, AV_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1336 for(i=0; i<avctx->
height; i++)
1366 #if 0 // BUFREF-FIXME
1388 for (i = 0; i < 4; i++) {
1431 static int clip_line(
int *sx,
int *sy,
int *ex,
int *ey,
int maxx)
1439 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1446 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
1465 if (
clip_line(&sx, &sy, &ex, &ey, w - 1))
1467 if (
clip_line(&sy, &sx, &ey, &ex, h - 1))
1470 sx = av_clip(sx, 0, w - 1);
1471 sy = av_clip(sy, 0, h - 1);
1472 ex = av_clip(ex, 0, w - 1);
1473 ey = av_clip(ey, 0, h - 1);
1475 buf[sy * stride + sx] +=
color;
1484 f = ((ey - sy) << 16) / ex;
1485 for (x = 0; x <= ex; x++) {
1487 fr = (x * f) & 0xFFFF;
1488 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1489 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1499 f = ((ex - sx) << 16) / ey;
1502 for(y= 0; y <= ey; y++){
1504 fr = (y*f) & 0xFFFF;
1505 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1506 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1519 int ey,
int w,
int h,
int stride,
int color,
int tail,
int direction)
1528 sx = av_clip(sx, -100, w + 100);
1529 sy = av_clip(sy, -100, h + 100);
1530 ex = av_clip(ex, -100, w + 100);
1531 ey = av_clip(ey, -100, h + 100);
1536 if (dx * dx + dy * dy > 3 * 3) {
1550 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1551 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1553 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1558 int dst_x,
int dst_y,
1559 int motion_x,
int motion_y,
int motion_scale,
1569 mb->
src_x = dst_x + motion_x / motion_scale;
1570 mb->
src_y = dst_y + motion_y / motion_scale;
1571 mb->
source = direction ? 1 : -1;
1580 uint32_t *mbtype_table, int8_t *
qscale_table, int16_t (*motion_val[2])[2],
1582 int mb_width,
int mb_height,
int mb_stride,
int quarter_sample)
1585 const int shift = 1 + quarter_sample;
1586 const int scale = 1 <<
shift;
1588 const int mv_stride = (mb_width << mv_sample_log2) +
1590 int mb_x, mb_y, mbcount = 0;
1598 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1599 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1600 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
1601 for (direction = 0; direction < 2; direction++) {
1605 for (i = 0; i < 4; i++) {
1606 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1607 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1608 int xy = (mb_x * 2 + (i & 1) +
1609 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1610 int mx = motion_val[direction][xy][0];
1611 int my = motion_val[direction][xy][1];
1612 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
1614 }
else if (
IS_16X8(mb_type)) {
1615 for (i = 0; i < 2; i++) {
1616 int sx = mb_x * 16 + 8;
1617 int sy = mb_y * 16 + 4 + 8 * i;
1618 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1619 int mx = motion_val[direction][xy][0];
1620 int my = motion_val[direction][xy][1];
1625 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
1627 }
else if (
IS_8X16(mb_type)) {
1628 for (i = 0; i < 2; i++) {
1629 int sx = mb_x * 16 + 4 + 8 * i;
1630 int sy = mb_y * 16 + 8;
1631 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1632 int mx = motion_val[direction][xy][0];
1633 int my = motion_val[direction][xy][1];
1638 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
1641 int sx = mb_x * 16 + 8;
1642 int sy = mb_y * 16 + 8;
1643 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1644 int mx = motion_val[direction][xy][0];
1645 int my = motion_val[direction][xy][1];
1646 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
1668 if (avctx->
hwaccel || !mbtype_table
1681 for (y = 0; y < mb_height; y++) {
1682 for (x = 0; x < mb_width; x++) {
1684 int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
1691 qscale_table[x + y * mb_stride]);
1694 int mb_type = mbtype_table[x + y * mb_stride];
1710 else if (
IS_GMC(mb_type))
1750 int h_chroma_shift, v_chroma_shift, block_height;
1752 const int shift = 1 + quarter_sample;
1758 const int mv_stride = (mb_width << mv_sample_log2) +
1770 ptr = pict->
data[0];
1772 block_height = 16 >> v_chroma_shift;
1774 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1776 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1777 const int mb_index = mb_x + mb_y * mb_stride;
1779 if ((avctx->
debug_mv) && motion_val[0]) {
1781 for (type = 0; type < 3; type++) {
1803 if (!
USES_LIST(mbtype_table[mb_index], direction))
1806 if (
IS_8X8(mbtype_table[mb_index])) {
1808 for (i = 0; i < 4; i++) {
1809 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1810 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1811 int xy = (mb_x * 2 + (i & 1) +
1812 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1813 int mx = (motion_val[direction][xy][0] >>
shift) + sx;
1814 int my = (motion_val[direction][xy][1] >>
shift) + sy;
1816 height, pict->
linesize[0], 100, 0, direction);
1818 }
else if (
IS_16X8(mbtype_table[mb_index])) {
1820 for (i = 0; i < 2; i++) {
1821 int sx = mb_x * 16 + 8;
1822 int sy = mb_y * 16 + 4 + 8 * i;
1823 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1824 int mx = (motion_val[direction][xy][0] >>
shift);
1825 int my = (motion_val[direction][xy][1] >>
shift);
1830 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1831 height, pict->
linesize[0], 100, 0, direction);
1833 }
else if (
IS_8X16(mbtype_table[mb_index])) {
1835 for (i = 0; i < 2; i++) {
1836 int sx = mb_x * 16 + 4 + 8 * i;
1837 int sy = mb_y * 16 + 8;
1838 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1839 int mx = motion_val[direction][xy][0] >>
shift;
1840 int my = motion_val[direction][xy][1] >>
shift;
1845 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1846 height, pict->
linesize[0], 100, 0, direction);
1849 int sx= mb_x * 16 + 8;
1850 int sy= mb_y * 16 + 8;
1851 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
1852 int mx= (motion_val[direction][xy][0]>>
shift) + sx;
1853 int my= (motion_val[direction][xy][1]>>
shift) + sy;
1854 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->
linesize[0], 100, 0, direction);
1860 uint64_t
c = (qscale_table[mb_index] * 128 / 31) *
1861 0x0101010101010101ULL;
1863 for (y = 0; y < block_height; y++) {
1864 *(uint64_t *)(pict->
data[1] + 8 * mb_x +
1865 (block_height * mb_y + y) *
1867 *(uint64_t *)(pict->
data[2] + 8 * mb_x +
1868 (block_height * mb_y + y) *
1874 int mb_type = mbtype_table[mb_index];
1877 #define COLOR(theta, r) \
1878 u = (int)(128 + r * cos(theta * M_PI / 180)); \
1879 v = (int)(128 + r * sin(theta * M_PI / 180));
1896 }
else if (
IS_GMC(mb_type)) {
1898 }
else if (
IS_SKIP(mb_type)) {
1909 u *= 0x0101010101010101ULL;
1910 v *= 0x0101010101010101ULL;
1911 for (y = 0; y < block_height; y++) {
1912 *(uint64_t *)(pict->
data[1] + 8 * mb_x +
1913 (block_height * mb_y + y) * pict->
linesize[1]) = u;
1914 *(uint64_t *)(pict->
data[2] + 8 * mb_x +
1915 (block_height * mb_y + y) * pict->
linesize[2]) = v;
1920 *(uint64_t *)(pict->
data[0] + 16 * mb_x + 0 +
1921 (16 * mb_y + 8) * pict->
linesize[0]) ^= 0x8080808080808080ULL;
1922 *(uint64_t *)(pict->
data[0] + 16 * mb_x + 8 +
1923 (16 * mb_y + 8) * pict->
linesize[0]) ^= 0x8080808080808080ULL;
1926 for (y = 0; y < 16; y++)
1927 pict->
data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1930 if (
IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1931 int dm = 1 << (mv_sample_log2 - 2);
1932 for (i = 0; i < 4; i++) {
1933 int sx = mb_x * 16 + 8 * (i & 1);
1934 int sy = mb_y * 16 + 8 * (i >> 1);
1935 int xy = (mb_x * 2 + (i & 1) +
1936 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1939 if (mv[0] != mv[dm] ||
1940 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1941 for (y = 0; y < 8; y++)
1942 pict->
data[0][sx + 4 + (sy + y) * pict->
linesize[0]] ^= 0x80;
1943 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1944 *(uint64_t *)(pict->
data[0] + sx + (sy + 4) *
1945 pict->
linesize[0]) ^= 0x8080808080808080ULL;
1955 mbskip_table[mb_index] = 0;
1982 int field_based,
int field_select,
1983 int src_x,
int src_y,
1985 int h_edge_pos,
int v_edge_pos,
1987 int motion_x,
int motion_y)
1990 const int op_index =
FFMIN(lowres, 3);
1991 const int s_mask = (2 <<
lowres) - 1;
2000 sx = motion_x & s_mask;
2001 sy = motion_y & s_mask;
2002 src_x += motion_x >> lowres + 1;
2003 src_y += motion_y >> lowres + 1;
2005 src += src_y * stride + src_x;
2007 if ((
unsigned)src_x >
FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2008 (unsigned)src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2011 w + 1, (h + 1) << field_based,
2012 src_x, src_y << field_based,
2013 h_edge_pos, v_edge_pos);
2018 sx = (sx << 2) >>
lowres;
2019 sy = (sy << 2) >>
lowres;
2022 pix_op[op_index](dest,
src,
stride,
h, sx, sy);
2036 int motion_x,
int motion_y,
2039 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2040 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2044 const int block_s = 8>>
lowres;
2045 const int s_mask = (2 <<
lowres) - 1;
2058 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2061 sx = motion_x & s_mask;
2062 sy = motion_y & s_mask;
2063 src_x = s->
mb_x * 2 * block_s + (motion_x >> lowres + 1);
2064 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2067 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2068 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2069 uvsrc_x = src_x >> 1;
2070 uvsrc_y = src_y >> 1;
2075 uvsx = (2 * mx) & s_mask;
2076 uvsy = (2 * my) & s_mask;
2077 uvsrc_x = s->
mb_x * block_s + (mx >>
lowres);
2078 uvsrc_y = mb_y * block_s + (my >>
lowres);
2085 uvsrc_x = s->
mb_x * block_s + (mx >> lowres + 1);
2086 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2092 uvsy = motion_y & s_mask;
2094 uvsrc_x = s->
mb_x*block_s + (mx >> (lowres+1));
2097 uvsx = motion_x & s_mask;
2098 uvsy = motion_y & s_mask;
2105 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2106 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2107 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2109 if ((
unsigned) src_x >
FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2110 (
unsigned) src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2112 linesize >> field_based, linesize >> field_based,
2113 17, 17 + field_based,
2114 src_x, src_y << field_based, h_edge_pos,
2121 uvlinesize >> field_based, uvlinesize >> field_based,
2123 uvsrc_x, uvsrc_y << field_based,
2124 h_edge_pos >> 1, v_edge_pos >> 1);
2126 uvlinesize >> field_based,uvlinesize >> field_based,
2128 uvsrc_x, uvsrc_y << field_based,
2129 h_edge_pos >> 1, v_edge_pos >> 1);
2148 sx = (sx << 2) >>
lowres;
2149 sy = (sy << 2) >>
lowres;
2150 pix_op[lowres - 1](dest_y, ptr_y,
linesize,
h, sx, sy);
2154 uvsx = (uvsx << 2) >>
lowres;
2155 uvsy = (uvsy << 2) >>
lowres;
2157 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2158 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2171 const int op_index =
FFMIN(lowres, 3);
2172 const int block_s = 8 >>
lowres;
2173 const int s_mask = (2 <<
lowres) - 1;
2174 const int h_edge_pos = s->
h_edge_pos >> lowres + 1;
2175 const int v_edge_pos = s->
v_edge_pos >> lowres + 1;
2176 int emu = 0, src_x, src_y, sx, sy;
2192 src_x = s->
mb_x * block_s + (mx >> lowres + 1);
2193 src_y = s->
mb_y * block_s + (my >> lowres + 1);
2196 ptr = ref_picture[1] +
offset;
2197 if ((
unsigned) src_x >
FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2198 (unsigned) src_y >
FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2202 src_x, src_y, h_edge_pos, v_edge_pos);
2206 sx = (sx << 2) >>
lowres;
2207 sy = (sy << 2) >>
lowres;
2208 pix_op[op_index](dest_cb, ptr, s->
uvlinesize, block_s, sx, sy);
2210 ptr = ref_picture[2] +
offset;
2215 src_x, src_y, h_edge_pos, v_edge_pos);
2218 pix_op[op_index](dest_cr, ptr, s->
uvlinesize, block_s, sx, sy);
2235 int dir,
uint8_t **ref_picture,
2241 const int block_s = 8 >>
lowres;
2250 ref_picture, pix_op,
2251 s->
mv[dir][0][0], s->
mv[dir][0][1],
2257 for (i = 0; i < 4; i++) {
2260 ref_picture[0], 0, 0,
2261 (2 * mb_x + (i & 1)) * block_s,
2262 (2 * mb_y + (i >> 1)) * block_s,
2265 block_s, block_s, pix_op,
2266 s->
mv[dir][i][0], s->
mv[dir][i][1]);
2268 mx += s->
mv[dir][i][0];
2269 my += s->
mv[dir][i][1];
2281 ref_picture, pix_op,
2282 s->
mv[dir][0][0], s->
mv[dir][0][1],
2287 ref_picture, pix_op,
2288 s->
mv[dir][1][0], s->
mv[dir][1][1],
2298 ref_picture, pix_op,
2300 s->
mv[dir][0][1], 2 * block_s, mb_y >> 1);
2304 for (i = 0; i < 2; i++) {
2309 ref2picture = ref_picture;
2316 ref2picture, pix_op,
2317 s->
mv[dir][i][0], s->
mv[dir][i][1] +
2318 2 * block_s * i, block_s, mb_y >> 1);
2320 dest_y += 2 * block_s * s->
linesize;
2327 for (i = 0; i < 2; i++) {
2329 for (j = 0; j < 2; j++) {
2332 ref_picture, pix_op,
2333 s->
mv[dir][2 * i + j][0],
2334 s->
mv[dir][2 * i + j][1],
2340 for (i = 0; i < 2; i++) {
2343 ref_picture, pix_op,
2344 s->
mv[dir][2 * i][0],s->
mv[dir][2 * i][1],
2345 2 * block_s, mb_y >> 1);
2368 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->
quarter_sample;
2369 int my, off, i, mvs;
2388 for (i = 0; i < mvs; i++) {
2389 my = s->
mv[dir][i][1];
2390 my_max =
FFMAX(my_max, my);
2391 my_min =
FFMIN(my_min, my);
2394 off = ((
FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
2403 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
2419 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
2441 memset(s->
ac_val[0][xy ], 0, 32 *
sizeof(int16_t));
2442 memset(s->
ac_val[0][xy + wrap], 0, 32 *
sizeof(int16_t));
2455 memset(s->
ac_val[1][xy], 0, 16 *
sizeof(int16_t));
2456 memset(s->
ac_val[2][xy], 0, 16 *
sizeof(int16_t));
2473 int lowres_flag,
int is_mpeg12)
2488 for(j=0; j<64; j++){
2515 uint8_t *dest_y, *dest_cb, *dest_cr;
2516 int dct_linesize, dct_offset;
2522 const int block_size= lowres_flag ? 8>>s->
avctx->
lowres : 8;
2541 dct_offset = s->
interlaced_dct ? linesize : linesize * block_size;
2545 dest_cb= s->
dest[1];
2546 dest_cr= s->
dest[2];
2629 add_dct(s, block[0], 0, dest_y , dct_linesize);
2630 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2631 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2632 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2636 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2637 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2641 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2643 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2644 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2645 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2646 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2648 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2649 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2650 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2651 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2656 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2663 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->
qscale);
2664 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->
qscale);
2665 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->
qscale);
2682 s->
idsp.
idct_put(dest_y + block_size, dct_linesize, block[1]);
2683 s->
idsp.
idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2684 s->
idsp.
idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2693 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2697 s->
idsp.
idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2698 s->
idsp.
idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2700 s->
idsp.
idct_put(dest_cb + block_size, dct_linesize, block[8]);
2701 s->
idsp.
idct_put(dest_cr + block_size, dct_linesize, block[9]);
2702 s->
idsp.
idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2703 s->
idsp.
idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2759 s->
dest[0] += s->
mb_y * linesize << mb_size;
2763 s->
dest[0] += (s->
mb_y>>1) * linesize << mb_size;
2806 else if (qscale > 31)
int bitstream_buffer_size
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static int init_duplicate_context(MpegEncContext *s)
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
int16_t(* b_bidir_back_mv_table_base)[2]
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
ScanTable intra_v_scantable
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
static int shift(int a, int b)
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
This structure describes decoded (raw) audio or video data.
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define AV_LOG_WARNING
Something somehow does not look correct.
int16_t src_x
Absolute source position.
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
uint8_t * coded_block_base
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
void * opaque
for some private data of the user
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
static void gray_frame(AVFrame *frame)
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
uint8_t * bitstream_buffer
int field_picture
whether or not the picture was encoded in separate fields
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
int16_t(* p_mv_table_base)[2]
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
uint32_t * score_map
map to store the scores
static void free_duplicate_context(MpegEncContext *s)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
#define FF_DEBUG_VIS_MV_B_FOR
int mb_num
number of MBs of a picture
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
int h263_aic
Advanced INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
attribute_deprecated int8_t * qscale_table
QP table Not to be accessed directly from outside libavutil.
enum AVPictureType last_picture
int encoding
true if we are encoding (vs decoding)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Macro definitions for various function/variable attributes.
int16_t(* b_back_mv_table_base)[2]
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
const uint8_t ff_mpeg2_non_linear_qscale[32]
struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define USES_LIST(a, list)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
const uint8_t ff_mpeg1_dc_scale_table[128]
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
enum OutputFormat out_format
output format
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
int ff_mpv_common_frame_size_change(MpegEncContext *s)
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
#define FF_DEBUG_VIS_MV_B_BACK
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Multithreading support functions.
qpel_mc_func(* qpel_put)[16]
int16_t dst_x
Absolute destination position.
void ff_free_picture_tables(Picture *pic)
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Picture current_picture
copy of the current picture structure.
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
void ff_mpv_common_init_ppc(MpegEncContext *s)
Structure to hold side data for an AVFrame.
#define PICT_BOTTOM_FIELD
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
int32_t source
Where the current macroblock comes from; negative value when it comes from the past, positive value when it comes from the future.
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
#define AV_CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
uint16_t pp_time
time distance between the last 2 p,s,i frames
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
int interlaced_frame
The content of the picture is interlaced.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
static av_always_inline void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
high precision timer, useful to profile code
int16_t(*[2][2] p_field_mv_table_base)[2]
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define ROUNDED_DIV(a, b)
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
int intra_only
if true, only intra pictures are generated
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
int h263_plus
H.263+ headers.
int slice_context_count
number of used thread_contexts
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int last_dc[3]
last DC values for MPEG-1
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
int mb_skipped
MUST BE SET only during DECODING.
int partitioned_frame
is current frame partitioned
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
#define MAX_PICTURE_COUNT
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
uint8_t w
Width and height of the block.
int capabilities
Codec capabilities.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int flags
AV_CODEC_FLAG_*.
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw a line from (ex, ey) -> (sx, sy).
int low_delay
no reordering needed / has no B-frames
uint8_t *[2][2] b_field_select_table
static const uint8_t offset[127][2]
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
void ff_mpv_common_end(MpegEncContext *s)
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
void ff_mpeg_flush(AVCodecContext *avctx)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
uint8_t * error_status_table
const uint8_t ff_alternate_horizontal_scan[64]
int ff_mpeg_er_init(MpegEncContext *s)
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
common internal API header
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color, int tail, int direction)
Draw an arrow from (ex, ey) -> (sx, sy).
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
enum AVPictureType pict_type
Picture type of the frame.
#define UPDATE_PICTURE(pic)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define FF_THREAD_FRAME
Decode more than one frame at once.
int overread
the number of bytes which where irreversibly read from the next frame
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed B-frames
int32_t motion_x
Motion vector src_x = dst_x + motion_x / motion_scale src_y = dst_y + motion_y / motion_scale.
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture new_picture
copy of the source picture structure for encoding.
int width
picture width / height.
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
unsigned int allocated_bitstream_buffer_size
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int16_t(*[2][2][2] b_field_mv_table_base)[2]
int16_t(* b_forw_mv_table_base)[2]
int16_t(*[12] pblocks)[64]
int block_last_index[12]
last non zero coefficient in block
uint8_t idct_permutation[64]
IDCT input permutation.
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
int mb_decision
macroblock decision mode
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
preferred ID for MPEG-1/2 video decoding
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
int first_field
is 1 for the first field of a field picture 0 otherwise
static const int8_t mv[256][2]
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
#define MV_TYPE_16X16
1 vector for the whole mb
static void clear_context(MpegEncContext *s)
AVBufferRef * qscale_table_buf
int16_t(* b_bidir_forw_mv_table_base)[2]
int coded_picture_number
picture number in bitstream order
uint16_t inter_matrix[64]
struct MpegEncContext * thread_context[MAX_THREADS]
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
ScanTable intra_scantable
uint8_t * data
The data buffer.
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
int height
picture size. must be a multiple of 16
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
static int add_mb(AVMotionVector *mb, uint32_t mb_type, int dst_x, int dst_y, int motion_x, int motion_y, int motion_scale, int direction)
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
uint32_t state
contains the last few bytes in MSB order
Picture * picture
main picture buffer
ScanTable intra_h_scantable
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
int closed_gop
MPEG1/2 GOP is closed.
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
unsigned int avpriv_toupper4(unsigned int x)
#define FF_DEBUG_DCT_COEFF
#define FF_MB_DECISION_RD
rate distortion
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
static int ff_h263_round_chroma(int x)
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
int f_code
forward MV resolution
int max_b_frames
max number of B-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int size
Size of data in bytes.
int h263_pred
use MPEG-4/H.263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
qpel_mc_func(* qpel_avg)[16]
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
A reference to a data buffer.
discard all non reference
GLint GLenum GLboolean GLsizei stride
uint64_t flags
Extra flag information.
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
static int ref[MAX_W *MAX_W]
const uint8_t ff_default_chroma_qscale_table[32]
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
static av_cold int dct_init(MpegEncContext *s)
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture last_picture
copy of the previous picture structure.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Picture * last_picture_ptr
pointer to the previous picture.
#define FF_DEBUG_VIS_MV_P_FOR
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
const uint8_t ff_alternate_vertical_scan[64]
uint32_t * map
map to avoid duplicate evaluations
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
H264ChromaContext h264chroma
int16_t(* blocks)[12][64]
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
int slices
Number of slices.
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
void ff_mpv_frame_end(MpegEncContext *s)
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
uint8_t * obmc_scratchpad
int16_t(* block)[64]
points to one of the following blocks
ParseContext parse_context
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Picture next_picture
copy of the next picture structure.
int key_frame
1 -> keyframe, 0-> not
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
int flags2
AV_CODEC_FLAG2_*.
int chroma_qscale
chroma QP
#define AV_CODEC_FLAG2_EXPORT_MVS
Export motion vectors through frame side data.
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int frame_number
Frame counter, set by libavcodec.
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
uint32_t * mb_type
types and macros are defined in mpegutils.h
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
int debug_mv
debug Code outside libavcodec should access this field using AVOptions
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
int16_t(* b_direct_mv_table_base)[2]
int b_code
backward MV resolution for B-frames (MPEG-4)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void ff_mpv_report_decode_progress(MpegEncContext *s)
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
#define AV_CEIL_RSHIFT(a, b)