53 int16_t *
block,
int n,
int qscale)
55 int i,
level, nCoeffs;
56 const uint16_t *quant_matrix;
63 for(i=1;i<=nCoeffs;i++) {
69 level = (int)(level * qscale * quant_matrix[j]) >> 3;
70 level = (level - 1) | 1;
73 level = (int)(level * qscale * quant_matrix[j]) >> 3;
74 level = (level - 1) | 1;
82 int16_t *
block,
int n,
int qscale)
84 int i,
level, nCoeffs;
85 const uint16_t *quant_matrix;
90 for(i=0; i<=nCoeffs; i++) {
96 level = (((level << 1) + 1) * qscale *
97 ((int) (quant_matrix[j]))) >> 4;
98 level = (level - 1) | 1;
101 level = (((level << 1) + 1) * qscale *
102 ((int) (quant_matrix[j]))) >> 4;
103 level = (level - 1) | 1;
111 int16_t *
block,
int n,
int qscale)
113 int i,
level, nCoeffs;
114 const uint16_t *quant_matrix;
121 for(i=1;i<=nCoeffs;i++) {
127 level = (int)(level * qscale * quant_matrix[j]) >> 3;
130 level = (int)(level * qscale * quant_matrix[j]) >> 3;
138 int16_t *
block,
int n,
int qscale)
140 int i,
level, nCoeffs;
141 const uint16_t *quant_matrix;
150 for(i=1;i<=nCoeffs;i++) {
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
159 level = (int)(level * qscale * quant_matrix[j]) >> 3;
169 int16_t *
block,
int n,
int qscale)
171 int i,
level, nCoeffs;
172 const uint16_t *quant_matrix;
179 for(i=0; i<=nCoeffs; i++) {
185 level = (((level << 1) + 1) * qscale *
186 ((int) (quant_matrix[j]))) >> 4;
189 level = (((level << 1) + 1) * qscale *
190 ((int) (quant_matrix[j]))) >> 4;
200 int16_t *
block,
int n,
int qscale)
202 int i,
level, qmul, qadd;
211 qadd = (qscale - 1) | 1;
220 for(i=1; i<=nCoeffs; i++) {
224 level = level * qmul - qadd;
226 level = level * qmul + qadd;
234 int16_t *
block,
int n,
int qscale)
236 int i,
level, qmul, qadd;
241 qadd = (qscale - 1) | 1;
246 for(i=0; i<=nCoeffs; i++) {
250 level = level * qmul - qadd;
252 level = level * qmul + qadd;
261 int mb_x,
int mb_y,
int mb_intra,
int mb_skipped)
271 memcpy(s->
mv,
mv,
sizeof(*
mv));
284 "Interlaced error concealment is not fully implemented\n");
291 memset(dst + h*linesize, 128, 16);
297 memset(dst + h*linesize, 128, 8);
311 for (i=0; i<4; i++) {
331 if (HAVE_INTRINSICS_NEON)
404 int chroma_x_shift,
int chroma_y_shift,
405 int linesize,
int uvlinesize)
428 if (r < 0 || !pic->f->buf[0]) {
436 for (i = 0; pic->
f->
data[i]; i++) {
451 av_log(avctx,
AV_LOG_ERROR,
"alloc_frame_buffer() failed (hwaccel private data allocation)\n");
458 if (linesize && (linesize != pic->
f->
linesize[0] ||
461 "get_buffer() failed (stride changed)\n");
468 "get_buffer() failed (uv stride mismatch)\n");
477 "get_buffer() failed to allocate context scratch buffers.\n");
499 for (i = 0; i < 2; i++) {
506 int mb_stride,
int mb_width,
int mb_height,
int b8_stride)
508 const int big_mb_num = mb_stride * (mb_height + 1) + 1;
509 const int mb_array_size = mb_stride * mb_height;
510 const int b8_array_size = b8_stride * mb_height * 2;
531 int mv_size = 2 * (b8_array_size + 4) *
sizeof(int16_t);
532 int ref_index_size = 4 * mb_array_size;
534 for (i = 0; mv_size && i < 2; i++) {
551 #define MAKE_WRITABLE(table) \
554 (ret = av_buffer_make_writable(&pic->table)) < 0)\
565 for (i = 0; i < 2; i++) {
587 int chroma_x_shift,
int chroma_y_shift,
int out_format,
588 int mb_stride,
int mb_width,
int mb_height,
int b8_stride,
589 ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
604 chroma_x_shift, chroma_y_shift,
605 *linesize, *uvlinesize) < 0)
614 mb_stride, mb_width, mb_height, b8_stride);
631 for (i = 0; i < 2; i++) {
667 memset((
uint8_t*)pic + off, 0,
sizeof(*pic) - off);
674 #define UPDATE_TABLE(table)\
677 (!dst->table || dst->table->buffer != src->table->buffer)) {\
678 av_buffer_unref(&dst->table);\
679 dst->table = av_buffer_ref(src->table);\
681 ff_free_picture_tables(dst);\
682 return AVERROR(ENOMEM);\
693 for (i = 0; i < 2; i++) {
704 for (i = 0; i < 2; i++) {
757 int yc_size = y_size + 2 * c_size;
777 2 * 64 *
sizeof(
int), fail)
783 for (i = 0; i < 12; i++) {
794 yc_size *
sizeof(int16_t) * 16, fail);
827 #define COPY(a) bak->a = src->a
828 COPY(sc.edge_emu_buffer);
831 COPY(sc.rd_scratchpad);
832 COPY(sc.b_scratchpad);
833 COPY(sc.obmc_scratchpad);
861 for (i = 0; i < 12; i++) {
872 "scratch buffers.\n");
901 if (
s1->context_initialized){
933 if (
s1->picture[i].f->buf[0] &&
938 #define UPDATE_PICTURE(pic)\
940 ff_mpeg_unref_picture(s->avctx, &s->pic);\
941 if (s1->pic.f && s1->pic.f->buf[0])\
942 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
944 ret = update_picture_tables(&s->pic, &s1->pic);\
953 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
954 ((pic && pic >= old_ctx->picture && \
955 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
956 &new_ctx->picture[pic - old_ctx->picture] : NULL)
969 (
char *) &
s1->pb_field_time +
sizeof(
s1->pb_field_time) -
970 (
char *) &
s1->last_time_base);
980 if (
s1->bitstream_buffer) {
981 if (
s1->bitstream_buffer_size +
985 s1->allocated_bitstream_buffer_size);
993 s1->bitstream_buffer_size);
1002 &s->
sc,
s1->linesize) < 0) {
1004 "scratch buffers.\n");
1009 "be allocated due to unknown size.\n");
1014 (
char *) &
s1->rtp_mode - (
char *) &
s1->progressive_sequence);
1016 if (!
s1->first_field) {
1018 if (
s1->current_picture_ptr)
1112 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x,
y;
1136 yc_size = y_size + 2 * c_size;
1169 mb_array_size *
sizeof(
float), fail);
1171 mb_array_size *
sizeof(
float), fail);
1178 for (i = 0; i < 2; i++) {
1180 for (j = 0; j < 2; j++) {
1181 for (k = 0; k < 2; k++) {
1184 mv_table_size * 2 *
sizeof(int16_t),
1213 for (i = 0; i < yc_size; i++)
1237 int nb_slices = (HAVE_THREADS &&
1251 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1262 " reducing to %d\n", nb_slices, max_slices);
1263 nb_slices = max_slices;
1312 if (nb_slices > 1) {
1313 for (i = 0; i < nb_slices; i++) {
1322 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
1324 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1363 for (i = 0; i < 2; i++) {
1364 for (j = 0; j < 2; j++) {
1365 for (k = 0; k < 2; k++) {
1441 if (nb_slices > 1) {
1442 for (i = 0; i < nb_slices; i++) {
1453 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
1455 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1530 if (!picture[i].reference)
1537 if (!pic->
f->
buf[0])
1550 if (!picture[i].f->buf[0])
1561 "Internal error, picture buffer overflow\n");
1582 if (picture[ret].needs_realloc) {
1593 int i, h_chroma_shift, v_chroma_shift;
1597 for(i=0; i<frame->
height; i++)
1601 0x80, FF_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1603 0x80, FF_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1637 "releasing zombie picture\n");
1697 ff_dlog(s->
avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1707 int h_chroma_shift, v_chroma_shift;
1709 &h_chroma_shift, &v_chroma_shift);
1712 "allocating dummy last picture for B frame\n");
1715 "warning: first frame is no keyframe\n");
1718 "allocate dummy last picture for field based first keyframe\n");
1738 for(i=0; i<avctx->
height; i++)
1740 0x80, avctx->
width);
1744 0x80, FF_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1746 0x80, FF_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1751 for(i=0; i<avctx->
height; i++)
1781 #if 0 // BUFREF-FIXME
1805 for (i = 0; i < 4; i++) {
1848 static int clip_line(
int *sx,
int *sy,
int *ex,
int *ey,
int maxx)
1856 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1863 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
1882 if (
clip_line(&sx, &sy, &ex, &ey, w - 1))
1884 if (
clip_line(&sy, &sx, &ey, &ex, h - 1))
1887 sx = av_clip(sx, 0, w - 1);
1888 sy = av_clip(sy, 0, h - 1);
1889 ex = av_clip(ex, 0, w - 1);
1890 ey = av_clip(ey, 0, h - 1);
1892 buf[sy * stride + sx] +=
color;
1901 f = ((ey - sy) << 16) / ex;
1902 for (x = 0; x <= ex; x++) {
1904 fr = (x * f) & 0xFFFF;
1905 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1906 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1916 f = ((ex - sx) << 16) / ey;
1919 for(y= 0; y <= ey; y++){
1921 fr = (y*f) & 0xFFFF;
1922 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1923 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1936 int ey,
int w,
int h,
int stride,
int color,
int tail,
int direction)
1945 sx = av_clip(sx, -100, w + 100);
1946 sy = av_clip(sy, -100, h + 100);
1947 ex = av_clip(ex, -100, w + 100);
1948 ey = av_clip(ey, -100, h + 100);
1953 if (dx * dx + dy * dy > 3 * 3) {
1967 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1968 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1970 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1975 int dst_x,
int dst_y,
1976 int src_x,
int src_y,
1985 mb->
source = direction ? 1 : -1;
1994 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
1996 int mb_width,
int mb_height,
int mb_stride,
int quarter_sample)
1999 const int shift = 1 + quarter_sample;
2001 const int mv_stride = (mb_width << mv_sample_log2) +
2003 int mb_x, mb_y, mbcount = 0;
2011 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2012 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2013 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2014 for (direction = 0; direction < 2; direction++) {
2018 for (i = 0; i < 4; i++) {
2019 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2020 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2021 int xy = (mb_x * 2 + (i & 1) +
2022 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2023 int mx = (motion_val[direction][xy][0] >>
shift) + sx;
2024 int my = (motion_val[direction][xy][1] >>
shift) + sy;
2025 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2027 }
else if (
IS_16X8(mb_type)) {
2028 for (i = 0; i < 2; i++) {
2029 int sx = mb_x * 16 + 8;
2030 int sy = mb_y * 16 + 4 + 8 * i;
2031 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2032 int mx = (motion_val[direction][xy][0] >>
shift);
2033 int my = (motion_val[direction][xy][1] >>
shift);
2038 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2040 }
else if (
IS_8X16(mb_type)) {
2041 for (i = 0; i < 2; i++) {
2042 int sx = mb_x * 16 + 4 + 8 * i;
2043 int sy = mb_y * 16 + 8;
2044 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2045 int mx = motion_val[direction][xy][0] >>
shift;
2046 int my = motion_val[direction][xy][1] >>
shift;
2051 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2054 int sx = mb_x * 16 + 8;
2055 int sy = mb_y * 16 + 8;
2056 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2057 int mx = (motion_val[direction][xy][0]>>
shift) + sx;
2058 int my = (motion_val[direction][xy][1]>>
shift) + sy;
2059 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2081 if (avctx->
hwaccel || !mbtype_table
2091 for (y = 0; y < mb_height; y++) {
2092 for (x = 0; x < mb_width; x++) {
2094 int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
2101 qscale_table[x + y * mb_stride]);
2104 int mb_type = mbtype_table[x + y * mb_stride];
2120 else if (
IS_GMC(mb_type))
2160 int h_chroma_shift, v_chroma_shift, block_height;
2162 const int shift = 1 + quarter_sample;
2168 const int mv_stride = (mb_width << mv_sample_log2) +
2179 ptr = pict->
data[0];
2181 block_height = 16 >> v_chroma_shift;
2183 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2185 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2186 const int mb_index = mb_x + mb_y * mb_stride;
2188 if ((avctx->
debug_mv) && motion_val[0]) {
2190 for (type = 0; type < 3; type++) {
2212 if (!
USES_LIST(mbtype_table[mb_index], direction))
2215 if (
IS_8X8(mbtype_table[mb_index])) {
2217 for (i = 0; i < 4; i++) {
2218 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2219 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2220 int xy = (mb_x * 2 + (i & 1) +
2221 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2222 int mx = (motion_val[direction][xy][0] >>
shift) + sx;
2223 int my = (motion_val[direction][xy][1] >>
shift) + sy;
2225 height, pict->
linesize[0], 100, 0, direction);
2227 }
else if (
IS_16X8(mbtype_table[mb_index])) {
2229 for (i = 0; i < 2; i++) {
2230 int sx = mb_x * 16 + 8;
2231 int sy = mb_y * 16 + 4 + 8 * i;
2232 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2233 int mx = (motion_val[direction][xy][0] >>
shift);
2234 int my = (motion_val[direction][xy][1] >>
shift);
2239 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2240 height, pict->
linesize[0], 100, 0, direction);
2242 }
else if (
IS_8X16(mbtype_table[mb_index])) {
2244 for (i = 0; i < 2; i++) {
2245 int sx = mb_x * 16 + 4 + 8 * i;
2246 int sy = mb_y * 16 + 8;
2247 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2248 int mx = motion_val[direction][xy][0] >>
shift;
2249 int my = motion_val[direction][xy][1] >>
shift;
2254 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2255 height, pict->
linesize[0], 100, 0, direction);
2258 int sx= mb_x * 16 + 8;
2259 int sy= mb_y * 16 + 8;
2260 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2261 int mx= (motion_val[direction][xy][0]>>
shift) + sx;
2262 int my= (motion_val[direction][xy][1]>>
shift) + sy;
2263 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->
linesize[0], 100, 0, direction);
2269 uint64_t
c = (qscale_table[mb_index] * 128 / 31) *
2270 0x0101010101010101ULL;
2272 for (y = 0; y < block_height; y++) {
2273 *(uint64_t *)(pict->
data[1] + 8 * mb_x +
2274 (block_height * mb_y + y) *
2276 *(uint64_t *)(pict->
data[2] + 8 * mb_x +
2277 (block_height * mb_y + y) *
2283 int mb_type = mbtype_table[mb_index];
2286 #define COLOR(theta, r) \
2287 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2288 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2305 }
else if (
IS_GMC(mb_type)) {
2307 }
else if (
IS_SKIP(mb_type)) {
2318 u *= 0x0101010101010101ULL;
2319 v *= 0x0101010101010101ULL;
2320 for (y = 0; y < block_height; y++) {
2321 *(uint64_t *)(pict->
data[1] + 8 * mb_x +
2322 (block_height * mb_y + y) * pict->
linesize[1]) = u;
2323 *(uint64_t *)(pict->
data[2] + 8 * mb_x +
2324 (block_height * mb_y + y) * pict->
linesize[2]) = v;
2329 *(uint64_t *)(pict->
data[0] + 16 * mb_x + 0 +
2330 (16 * mb_y + 8) * pict->
linesize[0]) ^= 0x8080808080808080ULL;
2331 *(uint64_t *)(pict->
data[0] + 16 * mb_x + 8 +
2332 (16 * mb_y + 8) * pict->
linesize[0]) ^= 0x8080808080808080ULL;
2335 for (y = 0; y < 16; y++)
2336 pict->
data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2339 if (
IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2340 int dm = 1 << (mv_sample_log2 - 2);
2341 for (i = 0; i < 4; i++) {
2342 int sx = mb_x * 16 + 8 * (i & 1);
2343 int sy = mb_y * 16 + 8 * (i >> 1);
2344 int xy = (mb_x * 2 + (i & 1) +
2345 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2348 if (mv[0] != mv[dm] ||
2349 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2350 for (y = 0; y < 8; y++)
2351 pict->
data[0][sx + 4 + (sy + y) * pict->
linesize[0]] ^= 0x80;
2352 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2353 *(uint64_t *)(pict->
data[0] + sx + (sy + 4) *
2354 pict->
linesize[0]) ^= 0x8080808080808080ULL;
2364 mbskip_table[mb_index] = 0;
2391 int field_based,
int field_select,
2392 int src_x,
int src_y,
2394 int h_edge_pos,
int v_edge_pos,
2396 int motion_x,
int motion_y)
2399 const int op_index =
FFMIN(lowres, 3);
2400 const int s_mask = (2 <<
lowres) - 1;
2409 sx = motion_x & s_mask;
2410 sy = motion_y & s_mask;
2411 src_x += motion_x >> lowres + 1;
2412 src_y += motion_y >> lowres + 1;
2414 src += src_y * stride + src_x;
2416 if ((
unsigned)src_x >
FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2417 (unsigned)src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2420 w + 1, (h + 1) << field_based,
2421 src_x, src_y << field_based,
2422 h_edge_pos, v_edge_pos);
2427 sx = (sx << 2) >>
lowres;
2428 sy = (sy << 2) >>
lowres;
2431 pix_op[op_index](dest,
src,
stride,
h, sx, sy);
2445 int motion_x,
int motion_y,
2448 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2449 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2450 ptrdiff_t uvlinesize, linesize;
2453 const int block_s = 8>>
lowres;
2454 const int s_mask = (2 <<
lowres) - 1;
2467 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2470 sx = motion_x & s_mask;
2471 sy = motion_y & s_mask;
2472 src_x = s->
mb_x * 2 * block_s + (motion_x >> lowres + 1);
2473 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2476 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2477 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2478 uvsrc_x = src_x >> 1;
2479 uvsrc_y = src_y >> 1;
2484 uvsx = (2 * mx) & s_mask;
2485 uvsy = (2 * my) & s_mask;
2486 uvsrc_x = s->
mb_x * block_s + (mx >>
lowres);
2487 uvsrc_y = mb_y * block_s + (my >>
lowres);
2494 uvsrc_x = s->
mb_x * block_s + (mx >> lowres + 1);
2495 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2501 uvsy = motion_y & s_mask;
2503 uvsrc_x = s->
mb_x*block_s + (mx >> (lowres+1));
2506 uvsx = motion_x & s_mask;
2507 uvsy = motion_y & s_mask;
2514 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2515 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2516 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2518 if ((
unsigned) src_x >
FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2519 (
unsigned) src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2521 linesize >> field_based, linesize >> field_based,
2522 17, 17 + field_based,
2523 src_x, src_y << field_based, h_edge_pos,
2530 uvlinesize >> field_based, uvlinesize >> field_based,
2532 uvsrc_x, uvsrc_y << field_based,
2533 h_edge_pos >> 1, v_edge_pos >> 1);
2535 uvlinesize >> field_based,uvlinesize >> field_based,
2537 uvsrc_x, uvsrc_y << field_based,
2538 h_edge_pos >> 1, v_edge_pos >> 1);
2557 sx = (sx << 2) >>
lowres;
2558 sy = (sy << 2) >>
lowres;
2559 pix_op[lowres - 1](dest_y, ptr_y, linesize,
h, sx, sy);
2563 uvsx = (uvsx << 2) >>
lowres;
2564 uvsy = (uvsy << 2) >>
lowres;
2566 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2567 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2580 const int op_index =
FFMIN(lowres, 3);
2581 const int block_s = 8 >>
lowres;
2582 const int s_mask = (2 <<
lowres) - 1;
2583 const int h_edge_pos = s->
h_edge_pos >> lowres + 1;
2584 const int v_edge_pos = s->
v_edge_pos >> lowres + 1;
2585 int emu = 0, src_x, src_y, sx, sy;
2601 src_x = s->
mb_x * block_s + (mx >> lowres + 1);
2602 src_y = s->
mb_y * block_s + (my >> lowres + 1);
2605 ptr = ref_picture[1] +
offset;
2606 if ((
unsigned) src_x >
FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2607 (unsigned) src_y >
FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2611 src_x, src_y, h_edge_pos, v_edge_pos);
2615 sx = (sx << 2) >>
lowres;
2616 sy = (sy << 2) >>
lowres;
2617 pix_op[op_index](dest_cb, ptr, s->
uvlinesize, block_s, sx, sy);
2619 ptr = ref_picture[2] +
offset;
2624 src_x, src_y, h_edge_pos, v_edge_pos);
2627 pix_op[op_index](dest_cr, ptr, s->
uvlinesize, block_s, sx, sy);
2644 int dir,
uint8_t **ref_picture,
2650 const int block_s = 8 >>
lowres;
2659 ref_picture, pix_op,
2660 s->
mv[dir][0][0], s->
mv[dir][0][1],
2666 for (i = 0; i < 4; i++) {
2669 ref_picture[0], 0, 0,
2670 (2 * mb_x + (i & 1)) * block_s,
2671 (2 * mb_y + (i >> 1)) * block_s,
2674 block_s, block_s, pix_op,
2675 s->
mv[dir][i][0], s->
mv[dir][i][1]);
2677 mx += s->
mv[dir][i][0];
2678 my += s->
mv[dir][i][1];
2690 ref_picture, pix_op,
2691 s->
mv[dir][0][0], s->
mv[dir][0][1],
2696 ref_picture, pix_op,
2697 s->
mv[dir][1][0], s->
mv[dir][1][1],
2707 ref_picture, pix_op,
2709 s->
mv[dir][0][1], 2 * block_s, mb_y >> 1);
2713 for (i = 0; i < 2; i++) {
2718 ref2picture = ref_picture;
2725 ref2picture, pix_op,
2726 s->
mv[dir][i][0], s->
mv[dir][i][1] +
2727 2 * block_s * i, block_s, mb_y >> 1);
2729 dest_y += 2 * block_s * s->
linesize;
2736 for (i = 0; i < 2; i++) {
2738 for (j = 0; j < 2; j++) {
2741 ref_picture, pix_op,
2742 s->
mv[dir][2 * i + j][0],
2743 s->
mv[dir][2 * i + j][1],
2749 for (i = 0; i < 2; i++) {
2752 ref_picture, pix_op,
2753 s->
mv[dir][2 * i][0],s->
mv[dir][2 * i][1],
2754 2 * block_s, mb_y >> 1);
2777 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->
quarter_sample;
2778 int my, off, i, mvs;
2797 for (i = 0; i < mvs; i++) {
2798 my = s->
mv[dir][i][1];
2799 my_max =
FFMAX(my_max, my);
2800 my_min =
FFMIN(my_min, my);
2803 off = ((
FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
2812 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
2828 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
2850 memset(s->
ac_val[0][xy ], 0, 32 *
sizeof(int16_t));
2851 memset(s->
ac_val[0][xy + wrap], 0, 32 *
sizeof(int16_t));
2864 memset(s->
ac_val[1][xy], 0, 16 *
sizeof(int16_t));
2865 memset(s->
ac_val[2][xy], 0, 16 *
sizeof(int16_t));
2882 int lowres_flag,
int is_mpeg12)
2897 for(j=0; j<64; j++){
2924 uint8_t *dest_y, *dest_cb, *dest_cr;
2925 int dct_linesize, dct_offset;
2931 const int block_size= lowres_flag ? 8>>s->
avctx->
lowres : 8;
2950 dct_offset = s->
interlaced_dct ? linesize : linesize * block_size;
2954 dest_cb= s->
dest[1];
2955 dest_cr= s->
dest[2];
3038 add_dct(s, block[0], 0, dest_y , dct_linesize);
3039 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3040 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3041 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3045 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3046 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3050 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
3052 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3053 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3054 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3055 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3057 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3058 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3059 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3060 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3065 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3072 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->
qscale);
3073 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->
qscale);
3074 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->
qscale);
3091 s->
idsp.
idct_put(dest_y + block_size, dct_linesize, block[1]);
3092 s->
idsp.
idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3093 s->
idsp.
idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3102 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
3106 s->
idsp.
idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3107 s->
idsp.
idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3109 s->
idsp.
idct_put(dest_cb + block_size, dct_linesize, block[8]);
3110 s->
idsp.
idct_put(dest_cr + block_size, dct_linesize, block[9]);
3111 s->
idsp.
idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3112 s->
idsp.
idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3168 s->
dest[0] += s->
mb_y * linesize << mb_size;
3172 s->
dest[0] += (s->
mb_y>>1) * linesize << mb_size;
3196 for(i=0; i<=last; i++){
3197 const int j= scantable[i];
3202 for(i=0; i<=last; i++){
3203 const int j= scantable[i];
3204 const int perm_j= permutation[j];
3205 block[perm_j]= temp[j];
3244 else if (qscale > 31)
int bitstream_buffer_size
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
#define FF_DEBUG_DCT_COEFF
static int init_duplicate_context(MpegEncContext *s)
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
int16_t(* b_bidir_back_mv_table_base)[2]
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
#define MAX_PICTURE_COUNT
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
ScanTable intra_v_scantable
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
static int shift(int a, int b)
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
This structure describes decoded (raw) audio or video data.
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
#define MAKE_WRITABLE(table)
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * mb_mean
Table for MB luminance.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define AV_LOG_WARNING
Something somehow does not look correct.
int16_t src_x
Absolute source position.
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
uint8_t * coded_block_base
static int update_picture_tables(Picture *dst, Picture *src)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
uint16_t * mb_var
Table for MB variances.
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
void * opaque
for some private data of the user
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
static void gray_frame(AVFrame *frame)
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
uint8_t * bitstream_buffer
static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
void(* clear_blocks)(int16_t *blocks)
int field_picture
whether or not the picture was encoded in separate fields
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
int16_t(* p_mv_table_base)[2]
static int make_tables_writable(Picture *pic)
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
uint32_t * score_map
map to store the scores
#define FF_ARRAY_ELEMS(a)
#define FF_DEBUG_VIS_MV_B_BACK
static void free_duplicate_context(MpegEncContext *s)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int mb_num
number of MBs of a picture
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
int h263_aic
Advanded INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
int encoding
true if we are encoding (vs decoding)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Macro definitions for various function/variable attributes.
int16_t(* b_back_mv_table_base)[2]
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int av_codec_is_encoder(const AVCodec *codec)
int alloc_mb_width
mb_width used to allocate tables
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define USES_LIST(a, list)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
static int alloc_frame_buffer(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int chroma_x_shift, int chroma_y_shift, int linesize, int uvlinesize)
Allocate a frame buffer.
const uint8_t ff_mpeg1_dc_scale_table[128]
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
enum OutputFormat out_format
output format
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
int ff_mpv_common_frame_size_change(MpegEncContext *s)
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Multithreading support functions.
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Motion estimation context.
qpel_mc_func(* qpel_put)[16]
int16_t dst_x
Absolute destination position.
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Picture current_picture
copy of the current picture structure.
void ff_mpv_common_init_ppc(MpegEncContext *s)
Structure to hold side data for an AVFrame.
#define PICT_BOTTOM_FIELD
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
int32_t source
Where the current macroblock comes from; negative value when it comes from the past, positive value when it comes from the future.
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
static int alloc_picture_tables(AVCodecContext *avctx, Picture *pic, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride)
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
uint16_t pp_time
time distance between the last 2 p,s,i frames
AVBufferRef * mb_type_buf
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
int interlaced_frame
The content of the picture is interlaced.
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
static av_always_inline void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
high precision timer, useful to profile code
int16_t(*[2][2] p_field_mv_table_base)[2]
static void ff_update_block_index(MpegEncContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define ROUNDED_DIV(a, b)
AVBufferRef * mb_mean_buf
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
int intra_only
if true, only intra pictures are generated
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
int h263_plus
h263 plus headers
int slice_context_count
number of used thread_contexts
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int last_dc[3]
last DC values for MPEG1
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
int mb_skipped
MUST BE SET only during DECODING.
int partitioned_frame
is current frame partitioned
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
int frame_skip_threshold
frame skip threshold
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define CODEC_FLAG2_EXPORT_MVS
Export motion vectors through frame side data.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
uint8_t w
Width and height of the block.
#define FF_DEBUG_VIS_MV_B_FOR
int capabilities
Codec capabilities.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw a line from (ex, ey) -> (sx, sy).
int low_delay
no reordering needed / has no b-frames
uint8_t *[2][2] b_field_select_table
static const uint8_t offset[127][2]
void ff_mpv_common_end(MpegEncContext *s)
Libavcodec external API header.
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
void ff_mpeg_flush(AVCodecContext *avctx)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
uint8_t * error_status_table
const uint8_t ff_alternate_horizontal_scan[64]
AVBufferRef * hwaccel_priv_buf
common internal API header
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color, int tail, int direction)
Draw an arrow from (ex, ey) -> (sx, sy).
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
AVBufferRef * motion_val_buf[2]
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
enum AVPictureType pict_type
Picture type of the frame.
#define UPDATE_PICTURE(pic)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
int overread
the number of bytes which where irreversibly read from the next frame
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture new_picture
copy of the source picture structure for encoding.
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
int width
picture width / height.
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
#define FF_CEIL_RSHIFT(a, b)
unsigned int allocated_bitstream_buffer_size
void * hwaccel_picture_private
hardware accelerator private data
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
int16_t(*[2][2][2] b_field_mv_table_base)[2]
int16_t(* b_forw_mv_table_base)[2]
int16_t(*[12] pblocks)[64]
int block_last_index[12]
last non zero coefficient in block
uint8_t idct_permutation[64]
IDCT input permutation.
int mb_decision
macroblock decision mode
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
preferred ID for MPEG-1/2 video decoding
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
int first_field
is 1 for the first field of a field picture 0 otherwise
void * av_memdup(const void *p, size_t size)
Duplicate the buffer p.
int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static const int8_t mv[256][2]
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
#define MV_TYPE_16X16
1 vector for the whole mb
int frame_skip_factor
frame skip factor
uint16_t * mc_mb_var
Table for motion compensated MB variances.
AVBufferRef * qscale_table_buf
int16_t(* b_bidir_forw_mv_table_base)[2]
int coded_picture_number
picture number in bitstream order
uint16_t inter_matrix[64]
int alloc_mb_height
mb_height used to allocate tables
struct MpegEncContext * thread_context[MAX_THREADS]
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
void ff_free_picture_tables(Picture *pic)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
ptrdiff_t linesize
line size, in bytes, may be different from width
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
ScanTable intra_scantable
uint8_t * data
The data buffer.
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
int height
picture size. must be a multiple of 16
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
uint32_t state
contains the last few bytes in MSB order
Picture * picture
main picture buffer
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
BYTE int const BYTE int int int height
#define FF_THREAD_FRAME
Decode more than one frame at once.
ScanTable intra_h_scantable
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
int closed_gop
MPEG1/2 GOP is closed.
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
#define UPDATE_TABLE(table)
unsigned int avpriv_toupper4(unsigned int x)
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
static int ff_h263_round_chroma(int x)
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
static int add_mb(AVMotionVector *mb, uint32_t mb_type, int dst_x, int dst_y, int src_x, int src_y, int direction)
int f_code
forward MV resolution
int max_b_frames
max number of b-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int size
Size of data in bytes.
int h263_pred
use mpeg4/h263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
static int pic_is_unused(Picture *pic)
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
qpel_mc_func(* qpel_avg)[16]
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
int noise_reduction
noise reduction strength
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
A reference to a data buffer.
discard all non reference
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
GLint GLenum GLboolean GLsizei stride
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
uint64_t flags
Extra flag information.
#define FF_MB_DECISION_RD
rate distortion
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
AVBufferRef * mbskip_table_buf
const uint8_t ff_default_chroma_qscale_table[32]
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
static av_cold int dct_init(MpegEncContext *s)
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture last_picture
copy of the previous picture structure.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Picture * last_picture_ptr
pointer to the previous picture.
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (h263)
const uint8_t ff_alternate_vertical_scan[64]
static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
uint32_t * map
map to avoid duplicate evaluations
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
H264ChromaContext h264chroma
int16_t(* blocks)[12][64]
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
int slices
Number of slices.
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
void ff_mpv_frame_end(MpegEncContext *s)
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
uint8_t * obmc_scratchpad
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
int16_t(* block)[64]
points to one of the following blocks
ParseContext parse_context
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Picture next_picture
copy of the next picture structure.
AVBufferRef * mc_mb_var_buf
int key_frame
1 -> keyframe, 0-> not
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
static int init_er(MpegEncContext *s)
int chroma_qscale
chroma QP
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int frame_number
Frame counter, set by libavcodec.
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
uint32_t * mb_type
types and macros are defined in mpegutils.h
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
ScanTable inter_scantable
if inter == intra then intra should be used to reduce tha cache usage
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
int debug_mv
debug Code outside libavcodec should access this field using AVOptions
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
#define FF_DEBUG_VIS_MV_P_FOR
int16_t(* b_direct_mv_table_base)[2]
int b_code
backward MV resolution for B Frames (mpeg4)
int64_t mb_var_sum
sum of MB variance for current frame
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void ff_mpv_report_decode_progress(MpegEncContext *s)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
AVBufferRef * ref_index_buf[2]