Go to the documentation of this file.
28 #define UNCHECKED_BITSTREAM_READER 1
75 #define MB_TYPE_ZERO_MV 0x20000000
128 #define MAX_INDEX (64 - 1)
129 #define check_scantable_index(ctx, x) \
131 if ((x) > MAX_INDEX) { \
132 av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
133 ctx->mb_x, ctx->mb_y); \
134 return AVERROR_INVALIDDATA; \
139 int16_t *
block,
int n)
143 uint8_t *
const scantable =
s->intra_scantable.permutated;
144 const uint16_t *quant_matrix =
s->inter_matrix;
145 const int qscale =
s->qscale;
153 level = (3 * qscale * quant_matrix[0]) >> 5;
173 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
188 }
else if (
level == 0) {
198 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
202 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
219 s->block_last_index[n] =
i;
228 int16_t *
block,
int n)
232 uint8_t *
const scantable =
s->intra_scantable.permutated;
233 const int qscale =
s->qscale;
241 level = (3 * qscale) >> 1;
277 }
else if (
level == 0) {
308 s->block_last_index[n] =
i;
313 int16_t *
block,
int n)
317 uint8_t *
const scantable =
s->intra_scantable.permutated;
318 const uint16_t *quant_matrix;
319 const int qscale =
s->qscale;
328 quant_matrix =
s->inter_matrix;
330 quant_matrix =
s->chroma_inter_matrix;
335 level = (3 * qscale * quant_matrix[0]) >> 5;
356 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
373 level = ((-
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
376 level = ((
level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
390 block[63] ^= (mismatch & 1);
394 s->block_last_index[n] =
i;
403 int16_t *
block,
int n)
407 uint8_t *
const scantable =
s->intra_scantable.permutated;
408 const int qscale =
s->qscale;
415 level = (3 * qscale) >> 1;
470 s->block_last_index[n] =
i;
475 int16_t *
block,
int n)
480 uint8_t *
const scantable =
s->intra_scantable.permutated;
481 const uint16_t *quant_matrix;
482 const int qscale =
s->qscale;
487 quant_matrix =
s->intra_matrix;
490 quant_matrix =
s->chroma_intra_matrix;
491 component = (n & 1) + 1;
496 dc =
s->last_dc[component];
498 s->last_dc[component] =
dc;
499 block[0] =
dc * (1 << (3 -
s->intra_dc_precision));
501 mismatch =
block[0] ^ 1;
503 if (
s->intra_vlc_format)
518 }
else if (
level != 0) {
523 level = (
level * qscale * quant_matrix[j]) >> 4;
539 level = (-
level * qscale * quant_matrix[j]) >> 4;
542 level = (
level * qscale * quant_matrix[j]) >> 4;
551 block[63] ^= mismatch & 1;
555 s->block_last_index[n] =
i;
564 int16_t *
block,
int n)
569 uint8_t *
const scantable =
s->intra_scantable.permutated;
570 const uint16_t *quant_matrix;
571 const int qscale =
s->qscale;
575 quant_matrix =
s->intra_matrix;
578 quant_matrix =
s->chroma_intra_matrix;
579 component = (n & 1) + 1;
584 dc =
s->last_dc[component];
586 s->last_dc[component] =
dc;
587 block[0] =
dc * (1 << (3 -
s->intra_dc_precision));
589 if (
s->intra_vlc_format)
602 if (
level >= 64 ||
i > 63) {
604 }
else if (
level != 0) {
607 level = (
level * qscale * quant_matrix[j]) >> 4;
621 level = (-
level * qscale * quant_matrix[j]) >> 4;
624 level = (
level * qscale * quant_matrix[j]) >> 4;
635 s->block_last_index[n] =
i;
658 int i, j, k, cbp,
val, mb_type, motion_type;
659 const int mb_block_count = 4 + (1 <<
s->chroma_format);
662 ff_tlog(
s->avctx,
"decode_mb: x=%d y=%d\n",
s->mb_x,
s->mb_y);
666 if (
s->mb_skip_run-- != 0) {
669 s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride] =
675 mb_type =
s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride - 1];
678 mb_type =
s->current_picture.mb_type[
s->mb_width + (
s->mb_y - 1) *
s->mb_stride - 1];
683 s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride] =
686 if ((
s->mv[0][0][0] |
s->mv[0][0][1] |
s->mv[1][0][0] |
s->mv[1][0][1]) == 0)
693 switch (
s->pict_type) {
699 "Invalid mb type in I-frame at %d %d\n",
712 "Invalid mb type in P-frame at %d %d\n",
s->mb_x,
s->mb_y);
721 "Invalid mb type in B-frame at %d %d\n",
s->mb_x,
s->mb_y);
727 ff_tlog(
s->avctx,
"mb_type=%x\n", mb_type);
730 s->bdsp.clear_blocks(
s->block[0]);
732 if (!
s->chroma_y_shift)
733 s->bdsp.clear_blocks(
s->block[6]);
738 !
s->frame_pred_frame_dct)
744 if (
s->concealment_motion_vectors) {
750 s->last_mv[0][0][0] =
752 s->last_mv[0][0][0]);
754 s->last_mv[0][0][1] =
756 s->last_mv[0][0][1]);
758 check_marker(
s->avctx, &
s->gb,
"after concealment_motion_vectors");
761 memset(
s->last_mv, 0,
sizeof(
s->last_mv));
765 if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) &&
s->pack_pblocks)
770 for (
i = 0;
i < 6;
i++)
773 for (
i = 0;
i < mb_block_count;
i++)
778 for (
i = 0;
i < 6;
i++) {
781 s->intra_scantable.permutated,
782 s->last_dc, *
s->pblocks[
i],
790 s->block_last_index[
i] =
ret;
800 && !
s->frame_pred_frame_dct)
806 s->field_select[0][0] =
s->picture_structure - 1;
812 s->last_mv[0][0][0] = 0;
813 s->last_mv[0][0][1] = 0;
814 s->last_mv[0][1][0] = 0;
815 s->last_mv[0][1][1] = 0;
822 if (
s->picture_structure ==
PICT_FRAME &&
s->frame_pred_frame_dct) {
834 s->mv_dir = (mb_type >> 13) & 3;
835 ff_tlog(
s->avctx,
"motion_type=%d\n", motion_type);
836 switch (motion_type) {
841 for (
i = 0;
i < 2;
i++) {
845 s->last_mv[
i][0][0] =
846 s->last_mv[
i][1][0] =
848 s->last_mv[
i][0][0]);
850 s->last_mv[
i][0][1] =
851 s->last_mv[
i][1][1] =
853 s->last_mv[
i][0][1]);
855 if (
s->full_pel[
i]) {
864 for (
i = 0;
i < 2;
i++) {
867 for (j = 0; j < 2; j++) {
869 for (k = 0; k < 2; k++) {
871 s->last_mv[
i][j][k]);
872 s->last_mv[
i][j][k] =
val;
873 s->mv[
i][j][k] =
val;
884 for (
i = 0;
i < 2;
i++) {
886 for (j = 0; j < 2; j++) {
889 s->last_mv[
i][j][0]);
890 s->last_mv[
i][j][0] =
val;
891 s->mv[
i][j][0] =
val;
894 s->last_mv[
i][j][1] >> 1);
895 s->last_mv[
i][j][1] = 2 *
val;
896 s->mv[
i][j][1] =
val;
904 for (
i = 0;
i < 2;
i++) {
907 for (k = 0; k < 2; k++) {
909 s->last_mv[
i][0][k]);
910 s->last_mv[
i][0][k] =
val;
911 s->last_mv[
i][1][k] =
val;
912 s->mv[
i][0][k] =
val;
919 if (
s->progressive_sequence){
924 for (
i = 0;
i < 2;
i++) {
926 int dmx, dmy, mx, my, m;
927 const int my_shift =
s->picture_structure ==
PICT_FRAME;
930 s->last_mv[
i][0][0]);
931 s->last_mv[
i][0][0] = mx;
932 s->last_mv[
i][1][0] = mx;
935 s->last_mv[
i][0][1] >> my_shift);
939 s->last_mv[
i][0][1] = my * (1 << my_shift);
940 s->last_mv[
i][1][1] = my * (1 << my_shift);
951 m =
s->top_field_first ? 1 : 3;
954 s->mv[
i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
955 s->mv[
i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
957 s->mv[
i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
958 s->mv[
i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
962 s->mv[
i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
963 s->mv[
i][2][1] = ((my + (my > 0)) >> 1) + dmy;
974 "00 motion_type at %d %d\n",
s->mb_x,
s->mb_y);
981 s->bdsp.clear_blocks(
s->block[0]);
984 if (mb_block_count > 6) {
985 cbp *= 1 << mb_block_count - 6;
986 cbp |=
get_bits(&
s->gb, mb_block_count - 6);
987 s->bdsp.clear_blocks(
s->block[6]);
991 "invalid cbp %d at %d %d\n", cbp,
s->mb_x,
s->mb_y);
996 if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) &&
s->pack_pblocks)
1001 for (
i = 0;
i < 6;
i++) {
1005 s->block_last_index[
i] = -1;
1009 cbp <<= 12 - mb_block_count;
1011 for (
i = 0;
i < mb_block_count;
i++) {
1012 if (cbp & (1 << 11)) {
1016 s->block_last_index[
i] = -1;
1023 for (
i = 0;
i < 6;
i++) {
1027 s->block_last_index[
i] = -1;
1031 for (
i = 0;
i < 6;
i++) {
1036 s->block_last_index[
i] = -1;
1043 for (
i = 0;
i < 12;
i++)
1044 s->block_last_index[
i] = -1;
1048 s->current_picture.mb_type[
s->mb_x +
s->mb_y *
s->mb_stride] = mb_type;
1065 s->mpeg_enc_ctx.avctx = avctx;
1073 s2->chroma_format = 1;
1074 s->mpeg_enc_ctx_allocated = 0;
1075 s->mpeg_enc_ctx.picture_number = 0;
1076 s->repeat_field = 0;
1077 s->mpeg_enc_ctx.codec_id = avctx->
codec->
id;
1083 static int mpeg_decode_update_thread_context(
AVCodecContext *avctx,
1090 if (avctx == avctx_from ||
1091 !ctx_from->mpeg_enc_ctx_allocated ||
1092 !
s1->context_initialized)
1099 if (!
ctx->mpeg_enc_ctx_allocated)
1103 s->picture_number++;
1112 uint16_t temp_matrix[64];
1115 memcpy(temp_matrix, matrix, 64 *
sizeof(uint16_t));
1117 for (
i = 0;
i < 64;
i++)
1118 matrix[new_perm[
i]] = temp_matrix[old_perm[
i]];
1122 #if CONFIG_MPEG1_NVDEC_HWACCEL
1125 #if CONFIG_MPEG1_XVMC_HWACCEL
1128 #if CONFIG_MPEG1_VDPAU_HWACCEL
1136 #if CONFIG_MPEG2_NVDEC_HWACCEL
1139 #if CONFIG_MPEG2_XVMC_HWACCEL
1142 #if CONFIG_MPEG2_VDPAU_HWACCEL
1145 #if CONFIG_MPEG2_DXVA2_HWACCEL
1148 #if CONFIG_MPEG2_D3D11VA_HWACCEL
1152 #if CONFIG_MPEG2_VAAPI_HWACCEL
1155 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
1181 if (
s->chroma_format < 2)
1185 else if (
s->chroma_format == 2)
1204 s->pack_pblocks = 1;
1223 if (
s->aspect_ratio_info > 1) {
1227 s1->pan_scan.height }),
1234 if ((
s1->pan_scan.width == 0) || (
s1->pan_scan.height == 0) ||
1237 s->avctx->sample_aspect_ratio =
1241 s->avctx->sample_aspect_ratio =
1243 (
AVRational) { s1->pan_scan.width, s1->pan_scan.height });
1248 ff_dlog(avctx,
"aspect A %d/%d\n",
1251 ff_dlog(avctx,
"aspect B %d/%d\n",
s->avctx->sample_aspect_ratio.num,
1252 s->avctx->sample_aspect_ratio.den);
1255 s->avctx->sample_aspect_ratio =
1268 if ((
s1->mpeg_enc_ctx_allocated == 0) ||
1271 s1->save_width !=
s->width ||
1272 s1->save_height !=
s->height ||
1273 av_cmp_q(
s1->save_aspect,
s->avctx->sample_aspect_ratio) ||
1274 (
s1->save_progressive_seq !=
s->progressive_sequence &&
FFALIGN(
s->height, 16) !=
FFALIGN(
s->height, 32)) ||
1276 if (
s1->mpeg_enc_ctx_allocated) {
1278 s->parse_context.buffer = 0;
1280 s->parse_context = pc;
1281 s1->mpeg_enc_ctx_allocated = 0;
1291 (
s->bit_rate != 0x3FFFF*400 ||
s->vbv_delay != 0xFFFF)) {
1294 s1->save_aspect =
s->avctx->sample_aspect_ratio;
1295 s1->save_width =
s->width;
1296 s1->save_height =
s->height;
1297 s1->save_progressive_seq =
s->progressive_sequence;
1312 &
s->avctx->framerate.den,
1318 switch (
s->chroma_format) {
1331 memcpy(old_permutation,
s->idsp.idct_permutation, 64 *
sizeof(
uint8_t));
1342 s1->mpeg_enc_ctx_allocated = 1;
1352 int ref, f_code, vbv_delay;
1358 if (
s->pict_type == 0 ||
s->pict_type > 3)
1362 s->vbv_delay = vbv_delay;
1370 s->mpeg_f_code[0][0] = f_code;
1371 s->mpeg_f_code[0][1] = f_code;
1379 s->mpeg_f_code[1][0] = f_code;
1380 s->mpeg_f_code[1][1] = f_code;
1382 s->current_picture.f->pict_type =
s->pict_type;
1387 "vbv_delay %d, ref %d type:%d\n", vbv_delay,
ref,
s->pict_type);
1397 int horiz_size_ext, vert_size_ext;
1407 if (!
s->chroma_format) {
1408 s->chroma_format = 1;
1414 s->width |= (horiz_size_ext << 12);
1415 s->height |= (vert_size_ext << 12);
1417 s->bit_rate += (bit_rate_ext << 18) * 400LL;
1419 s1->rc_buffer_size +=
get_bits(&
s->gb, 8) * 1024 * 16 << 10;
1428 ff_dlog(
s->avctx,
"sequence extension\n");
1433 if (
s->bit_rate != 0x3FFFF*400)
1439 "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64
"\n",
1440 s->avctx->profile,
s->avctx->level,
s->progressive_sequence,
s->chroma_format,
1441 s1->rc_buffer_size,
s->bit_rate);
1447 int color_description,
w,
h;
1451 if (color_description) {
1452 s->avctx->color_primaries =
get_bits(&
s->gb, 8);
1461 s1->pan_scan.width = 16 *
w;
1462 s1->pan_scan.height = 16 *
h;
1474 if (
s->progressive_sequence) {
1475 if (
s->repeat_first_field) {
1477 if (
s->top_field_first)
1483 if (
s->repeat_first_field)
1487 for (
i = 0;
i < nofco;
i++) {
1496 "pde (%"PRId16
",%"PRId16
") (%"PRId16
",%"PRId16
") (%"PRId16
",%"PRId16
")\n",
1497 s1->pan_scan.position[0][0],
s1->pan_scan.position[0][1],
1498 s1->pan_scan.position[1][0],
s1->pan_scan.position[1][1],
1499 s1->pan_scan.position[2][0],
s1->pan_scan.position[2][1]);
1503 uint16_t matrix1[64],
int intra)
1507 for (
i = 0;
i < 64;
i++) {
1514 if (intra &&
i == 0 && v != 8) {
1515 av_log(
s->avctx,
AV_LOG_DEBUG,
"intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1527 ff_dlog(
s->avctx,
"matrix extension\n");
1543 s->full_pel[0] =
s->full_pel[1] = 0;
1548 s->mpeg_f_code[0][0] += !
s->mpeg_f_code[0][0];
1549 s->mpeg_f_code[0][1] += !
s->mpeg_f_code[0][1];
1550 s->mpeg_f_code[1][0] += !
s->mpeg_f_code[1][0];
1551 s->mpeg_f_code[1][1] += !
s->mpeg_f_code[1][1];
1552 if (!
s->pict_type &&
s1->mpeg_enc_ctx_allocated) {
1554 "Missing picture start code, guessing missing values\n");
1555 if (
s->mpeg_f_code[1][0] == 15 &&
s->mpeg_f_code[1][1] == 15) {
1556 if (
s->mpeg_f_code[0][0] == 15 &&
s->mpeg_f_code[0][1] == 15)
1562 s->current_picture.f->pict_type =
s->pict_type;
1570 s->concealment_motion_vectors =
get_bits1(&
s->gb);
1578 if (
s->alternate_scan) {
1587 ff_dlog(
s->avctx,
"intra_dc_precision=%d\n",
s->intra_dc_precision);
1588 ff_dlog(
s->avctx,
"picture_structure=%d\n",
s->picture_structure);
1589 ff_dlog(
s->avctx,
"top field first=%d\n",
s->top_field_first);
1590 ff_dlog(
s->avctx,
"repeat first field=%d\n",
s->repeat_first_field);
1591 ff_dlog(
s->avctx,
"conceal=%d\n",
s->concealment_motion_vectors);
1592 ff_dlog(
s->avctx,
"intra_vlc_format=%d\n",
s->intra_vlc_format);
1593 ff_dlog(
s->avctx,
"alternate_scan=%d\n",
s->alternate_scan);
1594 ff_dlog(
s->avctx,
"frame_pred_frame_dct=%d\n",
s->frame_pred_frame_dct);
1595 ff_dlog(
s->avctx,
"progressive_frame=%d\n",
s->progressive_frame);
1605 if (
s->mb_width *
s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1610 if (
s->first_field ||
s->picture_structure ==
PICT_FRAME) {
1619 s->current_picture_ptr->f->repeat_pict = 0;
1620 if (
s->repeat_first_field) {
1621 if (
s->progressive_sequence) {
1622 if (
s->top_field_first)
1623 s->current_picture_ptr->f->repeat_pict = 4;
1625 s->current_picture_ptr->f->repeat_pict = 2;
1626 }
else if (
s->progressive_frame) {
1627 s->current_picture_ptr->f->repeat_pict = 1;
1633 sizeof(
s1->pan_scan));
1636 memcpy(pan_scan->
data, &
s1->pan_scan,
sizeof(
s1->pan_scan));
1638 if (
s1->a53_caption) {
1641 s1->a53_caption_size);
1643 memcpy(sd->
data,
s1->a53_caption,
s1->a53_caption_size);
1647 if (
s1->has_stereo3d) {
1652 *stereo =
s1->stereo3d;
1653 s1->has_stereo3d = 0;
1672 if (!
s->current_picture_ptr) {
1677 if (
s->avctx->hwaccel) {
1678 if ((
ret =
s->avctx->hwaccel->end_frame(
s->avctx)) < 0) {
1680 "hardware accelerator failed to decode first field\n");
1685 for (
i = 0;
i < 4;
i++) {
1686 s->current_picture.f->data[
i] =
s->current_picture_ptr->f->data[
i];
1688 s->current_picture.f->data[
i] +=
1689 s->current_picture_ptr->f->linesize[
i];
1701 #define DECODE_SLICE_ERROR -1
1702 #define DECODE_SLICE_OK 0
1711 const uint8_t **buf,
int buf_size)
1714 const int lowres =
s->avctx->lowres;
1715 const int field_pic =
s->picture_structure !=
PICT_FRAME;
1719 s->resync_mb_y = -1;
1728 s->interlaced_dct = 0;
1732 if (
s->qscale == 0) {
1743 if (mb_y == 0 &&
s->codec_tag ==
AV_RL32(
"SLIF")) {
1764 if (
s->mb_x >= (
unsigned)
s->mb_width) {
1770 const uint8_t *buf_end, *buf_start = *buf - 4;
1773 if (buf_end < *buf + buf_size)
1782 s->resync_mb_x =
s->mb_x;
1783 s->resync_mb_y =
s->mb_y = mb_y;
1787 if (
s->mb_y == 0 &&
s->mb_x == 0 && (
s->first_field ||
s->picture_structure ==
PICT_FRAME)) {
1790 "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1792 s->mpeg_f_code[0][0],
s->mpeg_f_code[0][1],
1793 s->mpeg_f_code[1][0],
s->mpeg_f_code[1][1],
1797 s->progressive_sequence ?
"ps" :
"",
1798 s->progressive_frame ?
"pf" :
"",
1799 s->alternate_scan ?
"alt" :
"",
1800 s->top_field_first ?
"top" :
"",
1801 s->intra_dc_precision,
s->picture_structure,
1802 s->frame_pred_frame_dct,
s->concealment_motion_vectors,
1803 s->q_scale_type,
s->intra_vlc_format,
1804 s->repeat_first_field,
s->chroma_420_type ?
"420" :
"");
1810 if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) &&
s->pack_pblocks)
1817 if (
s->current_picture.motion_val[0] && !
s->encoding) {
1818 const int wrap =
s->b8_stride;
1819 int xy =
s->mb_x * 2 +
s->mb_y * 2 *
wrap;
1820 int b8_xy = 4 * (
s->mb_x +
s->mb_y *
s->mb_stride);
1821 int motion_x, motion_y, dir,
i;
1823 for (
i = 0;
i < 2;
i++) {
1824 for (dir = 0; dir < 2; dir++) {
1827 motion_x = motion_y = 0;
1830 motion_x =
s->mv[dir][0][0];
1831 motion_y =
s->mv[dir][0][1];
1833 motion_x =
s->mv[dir][
i][0];
1834 motion_y =
s->mv[dir][
i][1];
1837 s->current_picture.motion_val[dir][xy][0] = motion_x;
1838 s->current_picture.motion_val[dir][xy][1] = motion_y;
1839 s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1840 s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1841 s->current_picture.ref_index [dir][b8_xy] =
1842 s->current_picture.ref_index [dir][b8_xy + 1] =
s->field_select[dir][
i];
1844 s->field_select[dir][
i] == 1);
1852 s->dest[1] +=(16 >>
lowres) >>
s->chroma_x_shift;
1853 s->dest[2] +=(16 >>
lowres) >>
s->chroma_x_shift;
1857 if (++
s->mb_x >=
s->mb_width) {
1858 const int mb_size = 16 >>
s->avctx->lowres;
1865 s->mb_y += 1 << field_pic;
1867 if (
s->mb_y >=
s->mb_height) {
1869 int is_d10 =
s->chroma_format == 2 &&
1872 s->intra_dc_precision == 2 &&
1873 s->q_scale_type == 1 &&
s->alternate_scan == 0 &&
1874 s->progressive_frame == 0
1877 if (
left >= 32 && !is_d10) {
1904 if (
s->mb_y >= ((
s->height + 15) >> 4) &&
1905 !
s->progressive_sequence &&
1908 s->mb_skip_run == -1 &&
1916 if (
s->mb_skip_run == -1) {
1928 s->mb_skip_run += 33;
1929 }
else if (
code == 35) {
1930 if (
s->mb_skip_run != 0 ||
show_bits(&
s->gb, 15) != 0) {
1938 s->mb_skip_run +=
code;
1942 if (
s->mb_skip_run) {
1946 "skipped MB in I-frame at %d %d\n",
s->mb_x,
s->mb_y);
1952 for (
i = 0;
i < 12;
i++)
1953 s->block_last_index[
i] = -1;
1961 s->mv[0][0][0] =
s->mv[0][0][1] = 0;
1962 s->last_mv[0][0][0] =
s->last_mv[0][0][1] = 0;
1963 s->last_mv[0][1][0] =
s->last_mv[0][1][1] = 0;
1964 s->field_select[0][0] = (
s->picture_structure - 1) & 1;
1967 s->mv[0][0][0] =
s->last_mv[0][0][0];
1968 s->mv[0][0][1] =
s->last_mv[0][0][1];
1969 s->mv[1][0][0] =
s->last_mv[1][0][0];
1970 s->mv[1][0][1] =
s->last_mv[1][0][1];
1971 s->field_select[0][0] = (
s->picture_structure - 1) & 1;
1972 s->field_select[1][0] = (
s->picture_structure - 1) & 1;
1983 ff_dlog(
s,
"Slice start:%d %d end:%d %d\n",
s->resync_mb_x,
s->resync_mb_y,
s->mb_x,
s->mb_y);
1991 int mb_y =
s->start_mb_y;
1992 const int field_pic =
s->picture_structure !=
PICT_FRAME;
1994 s->er.error_count = (3 * (
s->end_mb_y -
s->start_mb_y) *
s->mb_width) >> field_pic;
2002 ff_dlog(
c,
"ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
2003 ret,
s->resync_mb_x,
s->resync_mb_y,
s->mb_x,
s->mb_y,
2004 s->start_mb_y,
s->end_mb_y,
s->er.error_count);
2008 if (
s->resync_mb_x >= 0 &&
s->resync_mb_y >= 0)
2014 s->mb_x - 1,
s->mb_y,
2018 if (
s->mb_y ==
s->end_mb_y)
2027 mb_y += (*buf&0xE0)<<2;
2031 if (mb_y >=
s->end_mb_y)
2045 if (!
s1->mpeg_enc_ctx_allocated || !
s->current_picture_ptr)
2048 if (
s->avctx->hwaccel) {
2049 int ret =
s->avctx->hwaccel->end_frame(
s->avctx);
2052 "hardware accelerator failed to decode picture\n");
2058 if ( !
s->first_field && !
s1->first_slice) {
2073 s->picture_number++;
2076 if (
s->last_picture_ptr) {
2092 const uint8_t *buf,
int buf_size)
2105 "Invalid horizontal or vertical size value.\n");
2110 if (
s->aspect_ratio_info == 0) {
2116 if (
s->frame_rate_index == 0 ||
s->frame_rate_index > 13) {
2118 "frame_rate_index %d is invalid\n",
s->frame_rate_index);
2119 s->frame_rate_index = 1;
2126 s1->rc_buffer_size =
get_bits(&
s->gb, 10) * 1024 * 16;
2133 for (
i = 0;
i < 64;
i++) {
2134 j =
s->idsp.idct_permutation[
i];
2136 s->intra_matrix[j] = v;
2137 s->chroma_intra_matrix[j] = v;
2143 for (
i = 0;
i < 64;
i++) {
2144 int j =
s->idsp.idct_permutation[
i];
2146 s->inter_matrix[j] = v;
2147 s->chroma_inter_matrix[j] = v;
2160 s->progressive_sequence = 1;
2161 s->progressive_frame = 1;
2164 s->frame_pred_frame_dct = 1;
2165 s->chroma_format = 1;
2174 av_log(
s->avctx,
AV_LOG_DEBUG,
"vbv buffer: %d, bitrate:%"PRId64
", aspect_ratio_info: %d \n",
2175 s1->rc_buffer_size,
s->bit_rate,
s->aspect_ratio_info);
2188 if (
s1->mpeg_enc_ctx_allocated) {
2190 s1->mpeg_enc_ctx_allocated = 0;
2203 s1->mpeg_enc_ctx_allocated = 1;
2205 for (
i = 0;
i < 64;
i++) {
2206 int j =
s->idsp.idct_permutation[
i];
2208 s->intra_matrix[j] = v;
2209 s->chroma_intra_matrix[j] = v;
2212 s->inter_matrix[j] = v;
2213 s->chroma_inter_matrix[j] = v;
2216 s->progressive_sequence = 1;
2217 s->progressive_frame = 1;
2220 s->frame_pred_frame_dct = 1;
2221 s->chroma_format = 1;
2222 if (
s->codec_tag ==
AV_RL32(
"BW10")) {
2228 s1->save_width =
s->width;
2229 s1->save_height =
s->height;
2230 s1->save_progressive_seq =
s->progressive_sequence;
2235 const uint8_t *p,
int buf_size)
2239 if (buf_size >= 6 &&
2240 p[0] ==
'G' && p[1] ==
'A' && p[2] ==
'9' && p[3] ==
'4' &&
2241 p[4] == 3 && (p[5] & 0x40)) {
2243 int cc_count = p[5] & 0x1f;
2244 if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
2246 s1->a53_caption_size = cc_count * 3;
2248 if (!
s1->a53_caption) {
2249 s1->a53_caption_size = 0;
2251 memcpy(
s1->a53_caption, p + 7,
s1->a53_caption_size);
2256 }
else if (buf_size >= 2 &&
2257 p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
2267 s1->a53_caption_size = cc_count * 3;
2269 if (!
s1->a53_caption) {
2270 s1->a53_caption_size = 0;
2283 cap[0] = cap[1] = cap[2] = 0x00;
2287 cap[0] = 0x04 |
field;
2297 }
else if (buf_size >= 11 &&
2298 p[0] ==
'C' && p[1] ==
'C' && p[2] == 0x01 && p[3] == 0xf8) {
2328 for (
i = 5;
i + 6 <= buf_size && ((p[
i] & 0xfe) == 0xfe);
i += 6)
2333 s1->a53_caption_size = cc_count * 6;
2335 if (!
s1->a53_caption) {
2336 s1->a53_caption_size = 0;
2338 uint8_t field1 = !!(p[4] & 0x80);
2341 for (
i = 0;
i < cc_count;
i++) {
2342 cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2345 cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2360 const uint8_t *p,
int buf_size)
2363 const uint8_t *buf_end = p + buf_size;
2368 for(
i=0; !(!p[
i-2] && !p[
i-1] && p[
i]==1) &&
i<buf_size;
i++){
2377 if (!memcmp(p+
i,
"\0TMPGEXS\0", 9)){
2382 if (buf_end - p >= 5 &&
2383 p[0] ==
'D' && p[1] ==
'T' && p[2] ==
'G' && p[3] ==
'1') {
2391 if (buf_end - p < 1)
2394 s1->afd = p[0] & 0x0f;
2396 }
else if (buf_end - p >= 6 &&
2397 p[0] ==
'J' && p[1] ==
'P' && p[2] ==
'3' && p[3] ==
'D' &&
2400 const uint8_t S3D_video_format_type = p[5] & 0x7F;
2402 if (S3D_video_format_type == 0x03 ||
2403 S3D_video_format_type == 0x04 ||
2404 S3D_video_format_type == 0x08 ||
2405 S3D_video_format_type == 0x23) {
2407 s1->has_stereo3d = 1;
2409 switch (S3D_video_format_type) {
2430 const uint8_t *buf,
int buf_size)
2441 #if FF_API_PRIVATE_OPT
2457 "GOP (%s) closed_gop=%d broken_link=%d\n",
2458 tcbuf,
s->closed_gop, broken_link);
2463 int *got_output,
const uint8_t *buf,
int buf_size)
2468 const uint8_t *buf_end = buf + buf_size;
2469 int ret, input_size;
2470 int last_code = 0, skip_frame = 0;
2471 int picture_start_code_seen = 0;
2486 &
s2->thread_context[0],
NULL,
2487 s->slice_count,
sizeof(
void *));
2488 for (
i = 0;
i <
s->slice_count;
i++)
2489 s2->er.error_count +=
s2->thread_context[
i]->er.error_count;
2506 return FFMAX(0, buf_ptr - buf -
s2->parse_context.last_index);
2509 input_size = buf_end - buf_ptr;
2518 if (last_code == 0) {
2524 "ignoring SEQ_START_CODE after %X\n", last_code);
2531 if (picture_start_code_seen &&
s2->picture_structure ==
PICT_FRAME) {
2537 picture_start_code_seen = 1;
2539 if (
s2->width <= 0 ||
s2->height <= 0) {
2541 s2->width,
s2->height);
2546 s2->intra_dc_precision= 3;
2547 s2->intra_matrix[0]= 1;
2550 !avctx->
hwaccel &&
s->slice_count) {
2554 s2->thread_context,
NULL,
2555 s->slice_count,
sizeof(
void *));
2556 for (
i = 0;
i <
s->slice_count;
i++)
2557 s2->er.error_count +=
s2->thread_context[
i]->er.error_count;
2564 "mpeg_decode_postinit() failure\n");
2575 "ignoring pic after %X\n", last_code);
2585 if (last_code == 0) {
2589 "ignoring seq ext after %X\n", last_code);
2608 "ignoring pic cod ext after %X\n", last_code);
2619 if (last_code == 0) {
2620 s2->first_field = 0;
2625 "ignoring GOP_START_CODE after %X\n", last_code);
2633 if (
s2->progressive_sequence && !
s2->progressive_frame) {
2634 s2->progressive_frame = 1;
2636 "interlaced frame in progressive sequence, ignoring\n");
2639 if (
s2->picture_structure == 0 ||
2640 (
s2->progressive_frame &&
s2->picture_structure !=
PICT_FRAME)) {
2642 "picture_structure %d invalid, ignoring\n",
2643 s2->picture_structure);
2647 if (
s2->progressive_sequence && !
s2->frame_pred_frame_dct)
2651 s2->first_field = 0;
2652 s2->v_edge_pos = 16 *
s2->mb_height;
2654 s2->first_field ^= 1;
2655 s2->v_edge_pos = 8 *
s2->mb_height;
2656 memset(
s2->mbskip_table, 0,
s2->mb_stride *
s2->mb_height);
2661 const int field_pic =
s2->picture_structure !=
PICT_FRAME;
2665 mb_y += (*buf_ptr&0xE0)<<2;
2671 if (buf_end - buf_ptr < 2) {
2676 if (mb_y >=
s2->mb_height) {
2678 "slice below image (%d >= %d)\n", mb_y,
s2->mb_height);
2682 if (!
s2->last_picture_ptr) {
2686 if (!
s2->closed_gop) {
2689 "Skipping B slice due to open GOP\n");
2696 if (!
s2->next_picture_ptr) {
2702 "Skipping P slice due to !sync\n");
2715 if (!
s->mpeg_enc_ctx_allocated)
2719 if (mb_y < avctx->skip_top ||
2724 if (!
s2->pict_type) {
2731 if (
s->first_slice) {
2737 if (!
s2->current_picture_ptr) {
2739 "current_picture not initialized\n");
2746 int threshold = (
s2->mb_height *
s->slice_count +
2747 s2->slice_context_count / 2) /
2748 s2->slice_context_count;
2750 if (threshold <= mb_y) {
2755 if (
s->slice_count) {
2756 s2->thread_context[
s->slice_count - 1]->end_mb_y = mb_y;
2772 if (
s2->resync_mb_x >= 0 &&
s2->resync_mb_y >= 0)
2774 s2->resync_mb_y,
s2->mb_x,
s2->mb_y,
2778 s2->resync_mb_y,
s2->mb_x - 1,
s2->mb_y,
2793 int buf_size = avpkt->
size;
2800 if (
s2->low_delay == 0 &&
s2->next_picture_ptr) {
2805 s2->next_picture_ptr =
NULL;
2817 (
const uint8_t **) &buf, &buf_size) < 0)
2822 if (
s->mpeg_enc_ctx_allocated == 0 && (
s2->codec_tag ==
AV_RL32(
"VCR2")
2829 if (avctx->
extradata && !
s->extradata_decoded) {
2837 s->extradata_decoded = 1;
2839 s2->current_picture_ptr =
NULL;
2845 if (
ret<0 || *got_output) {
2846 s2->current_picture_ptr =
NULL;
2848 if (
s2->timecode_frame_start != -1 && *got_output) {
2854 memcpy(tcside->
data, &
s2->timecode_frame_start,
sizeof(int64_t));
2856 s2->timecode_frame_start = -1;
2876 if (
s->mpeg_enc_ctx_allocated)
2883 .
name =
"mpeg1video",
2899 #if CONFIG_MPEG1_NVDEC_HWACCEL
2902 #if CONFIG_MPEG1_VDPAU_HWACCEL
2905 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2908 #if CONFIG_MPEG1_XVMC_HWACCEL
2916 .
name =
"mpeg2video",
2932 #if CONFIG_MPEG2_DXVA2_HWACCEL
2935 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2938 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2941 #if CONFIG_MPEG2_NVDEC_HWACCEL
2944 #if CONFIG_MPEG2_VAAPI_HWACCEL
2947 #if CONFIG_MPEG2_VDPAU_HWACCEL
2950 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2953 #if CONFIG_MPEG2_XVMC_HWACCEL
2962 .
name =
"mpegvideo",
static int vcr2_init_sequence(AVCodecContext *avctx)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define FF_ENABLE_DEPRECATION_WARNINGS
#define MV_TYPE_16X16
1 vector for the whole mb
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_TIMECODE_STR_SIZE
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
AVPixelFormat
Pixel format.
const AVRational ff_mpeg2_aspect[16]
static av_cold int init(AVCodecContext *avctx)
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
static int mpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
#define check_scantable_index(ctx, x)
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
static int mpeg_get_qscale(MpegEncContext *s)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
static int get_bits_count(const GetBitContext *s)
static av_cold int end(AVCodecContext *avctx)
#define AV_CODEC_CAP_TRUNCATED
This structure describes decoded (raw) audio or video data.
av_cold void ff_mpeg12_common_init(MpegEncContext *s)
unsigned int avpriv_toupper4(unsigned int x)
#define HWACCEL_DXVA2(codec)
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
#define HWACCEL_D3D11VA2(codec)
const uint8_t ff_reverse[256]
#define HWACCEL_XVMC(codec)
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
static int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
#define PICT_BOTTOM_FIELD
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_init_block_index(MpegEncContext *s)
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
#define UPDATE_CACHE(name, gb)
static int mpeg_decode_postinit(AVCodecContext *avctx)
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define SLICE_MAX_START_CODE
#define FF_DEBUG_PICT_INFO
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
#define GET_CACHE(name, gb)
static void skip_bits(GetBitContext *s, int n)
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
const struct AVCodec * codec
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
enum AVDiscard skip_frame
Skip decoding for selected frames.
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, uint8_t *const scantable, int last_dc[3], int16_t *block, int index, int qscale)
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
void ff_xvmc_init_block(MpegEncContext *s)
Initialize the block field of the MpegEncContext pointer passed as parameter after making sure that t...
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
#define PICTURE_START_CODE
#define USES_LIST(a, list)
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
static int slice_decode_thread(AVCodecContext *c, void *arg)
void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
Fill individual block pointers, so there are no gaps in the data_block array in case not all blocks i...
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
#define HWACCEL_VDPAU(codec)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
#define AV_EF_BITSTREAM
detect bitstream specification deviations
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
void ff_mpv_common_end(MpegEncContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
AVCodec ff_mpeg2video_decoder
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
static void flush(AVCodecContext *avctx)
#define CLOSE_READER(name, gb)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
int has_b_frames
Size of the frame reordering buffer in the decoder.
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
const float ff_mpeg1_aspect[16]
#define FF_QSCALE_TYPE_MPEG2
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
int mpeg_enc_ctx_allocated
#define SHOW_SBITS(name, gb, num)
void ff_mpeg_er_frame_start(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
static int get_sbits(GetBitContext *s, int n)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
#define SKIP_BITS(name, gb, num)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int64_t rc_max_rate
maximum bitrate
This structure describes the bitrate properties of an encoded bitstream.
@ AVDISCARD_ALL
discard all
#define MB_PTYPE_VLC_BITS
#define PTRDIFF_SPECIFIER
enum AVColorRange color_range
MPEG vs JPEG YUV range.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Rational number (pair of numerator and denominator).
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
int64_t bit_rate
the average bitrate
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
@ AV_PIX_FMT_XVMC
XVideo Motion Acceleration via common packet passing.
#define LAST_SKIP_BITS(name, gb, num)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
#define MB_BTYPE_VLC_BITS
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_EXPLODE
abort decoding on minor error detection
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, const uint8_t *new_perm)
int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s)
Find the end of the current frame in the bitstream.
@ AVDISCARD_NONKEY
discard all frames except keyframes
int flags2
AV_CODEC_FLAG2_*.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define SLICE_MIN_START_CODE
void ff_mpeg1_clean_buffers(MpegEncContext *s)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
void ff_mpeg_flush(AVCodecContext *avctx)
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
const uint16_t ff_mpeg1_default_intra_matrix[256]
#define MB_TYPE_INTERLACED
#define OPEN_READER(name, gb)
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
int max_bitrate
Maximum bitrate of the stream, in bits per second.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define HWACCEL_D3D11VA(codec)
#define MV_TYPE_FIELD
2 vectors, one per field
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
static void skip_bits1(GetBitContext *s)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
#define HWACCEL_NVDEC(codec)
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
#define FF_THREAD_FRAME
Decode more than one frame at once.
const AVProfile ff_mpeg2_video_profiles[]
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
static void mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
attribute_deprecated int64_t timecode_frame_start
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
unsigned properties
Properties of the stream that gets decoded.
const uint8_t ff_alternate_vertical_scan[64]
static const uint32_t btype2mb_type[11]
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
av_cold void ff_mpeg12_init_vlcs(void)
#define FF_DEBUG_STARTCODE
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
AVCodec ff_mpegvideo_decoder
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
static const uint32_t ptype2mb_type[7]
const char * name
Name of the codec implementation.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void ff_mpv_frame_end(MpegEncContext *s)
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
const uint8_t ff_zigzag_direct[64]
const AVRational ff_mpeg12_frame_rate_tab[]
static const float pred[4]
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
static int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
static const uint8_t * align_get_bits(GetBitContext *s)
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
static int skip_1stop_8data_bits(GetBitContext *gb)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
static int decode_dc(GetBitContext *gb, int component)
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
#define SHOW_UBITS(name, gb, num)
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
AVCodec ff_mpeg1video_decoder
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
static av_const int sign_extend(int val, unsigned bits)
static int ref[MAX_W *MAX_W]
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
static const AVProfile profiles[]
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
#define FF_DISABLE_DEPRECATION_WARNINGS
static int shift(int a, int b)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int get_dmv(MpegEncContext *s)
@ AV_PICTURE_TYPE_P
Predicted.
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Structure to hold side data for an AVFrame.
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
MpegEncContext mpeg_enc_ctx
This structure stores compressed data.
void ff_er_frame_end(ERContext *s)
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
#define HWACCEL_VAAPI(codec)
#define flags(name, subs,...)
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
The exact code depends on how similar the blocks are and how related they are to the block
AVRational frame_rate_ext
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
void ff_mpv_report_decode_progress(MpegEncContext *s)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
@ AVDISCARD_NONREF
discard all non reference
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
#define DECODE_SLICE_ERROR
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
VLC_TYPE(* table)[2]
code, bits
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
RL_VLC_ELEM * rl_vlc[32]
decoding only
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.