Go to the documentation of this file.
48 #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
65 int block_last_index,
uint8_t scantable[64])
71 for (j = 1; j <= block_last_index; j++) {
72 const int index = scantable[j];
76 if ((
level & (~127)) == 0) {
77 if (j < block_last_index)
82 rate +=
s->ac_esc_length;
100 const int dir[6],
uint8_t *st[6],
101 const int zigzag_last_index[6])
104 memcpy(
s->block_last_index, zigzag_last_index,
sizeof(
int) * 6);
106 for (
n = 0;
n < 6;
n++) {
107 int16_t *ac_val = &
s->ac_val[0][0][0] +
s->block_index[
n] * 16;
109 st[
n] =
s->intra_scantable.permutated;
112 for (
i = 1;
i < 8;
i++)
113 block[
n][
s->idsp.idct_permutation[
i]] = ac_val[
i + 8];
116 for (
i = 1;
i < 8;
i++)
117 block[
n][
s->idsp.idct_permutation[
i << 3]] = ac_val[
i];
131 const int dir[6],
uint8_t *st[6],
132 int zigzag_last_index[6])
136 int8_t *
const qscale_table =
s->current_picture.qscale_table;
138 memcpy(zigzag_last_index,
s->block_last_index,
sizeof(
int) * 6);
140 for (
n = 0;
n < 6;
n++) {
141 int16_t *ac_val, *ac_val1;
144 s->intra_scantable.permutated);
146 ac_val = &
s->ac_val[0][0][0] +
s->block_index[
n] * 16;
149 const int xy =
s->mb_x +
s->mb_y *
s->mb_stride -
s->mb_stride;
151 ac_val -=
s->block_wrap[
n] * 16;
152 if (
s->mb_y == 0 ||
s->qscale == qscale_table[xy] ||
n == 2 ||
n == 3) {
154 for (
i = 1;
i < 8;
i++) {
157 ac_val1[
i] =
block[
n][
s->idsp.idct_permutation[
i << 3]];
162 for (
i = 1;
i < 8;
i++) {
165 ac_val1[
i] =
block[
n][
s->idsp.idct_permutation[
i << 3]];
169 st[
n] =
s->intra_h_scantable.permutated;
171 const int xy =
s->mb_x - 1 +
s->mb_y *
s->mb_stride;
174 if (
s->mb_x == 0 ||
s->qscale == qscale_table[xy] ||
n == 1 ||
n == 3) {
176 for (
i = 1;
i < 8;
i++) {
177 const int level =
block[
n][
s->idsp.idct_permutation[
i << 3]];
180 ac_val1[
i + 8] =
block[
n][
s->idsp.idct_permutation[
i]];
184 for (
i = 1;
i < 8;
i++) {
185 const int level =
block[
n][
s->idsp.idct_permutation[
i << 3]];
188 ac_val1[
i + 8] =
block[
n][
s->idsp.idct_permutation[
i]];
191 st[
n] =
s->intra_v_scantable.permutated;
194 for (
i = 63;
i > 0;
i--)
197 s->block_last_index[
n] =
i;
216 int8_t *
const qscale_table =
s->current_picture.qscale_table;
225 for (
i = 0;
i <
s->mb_num;
i++) {
226 int mb_xy =
s->mb_index2xy[
i];
227 odd += qscale_table[mb_xy] & 1;
230 if (2 * odd >
s->mb_num)
235 for (
i = 0;
i <
s->mb_num;
i++) {
236 int mb_xy =
s->mb_index2xy[
i];
237 if ((qscale_table[mb_xy] & 1) != odd)
238 qscale_table[mb_xy]++;
239 if (qscale_table[mb_xy] > 31)
240 qscale_table[mb_xy] = 31;
243 for (
i = 1;
i <
s->mb_num;
i++) {
244 int mb_xy =
s->mb_index2xy[
i];
245 if (qscale_table[mb_xy] != qscale_table[
s->mb_index2xy[
i - 1]] &&
283 int16_t *
block,
int n,
int intra_dc,
287 int i, last_non_zero;
290 const int last_index =
s->block_last_index[
n];
309 last_non_zero =
i - 1;
310 for (;
i < last_index;
i++) {
313 int run =
i - last_non_zero - 1;
315 if ((
level & (~127)) == 0) {
320 7 + 2 + 1 + 6 + 1 + 12 + 1,
321 (3 << 23) + (3 << 21) + (0 << 20) + (
run << 14) +
322 (1 << 13) + (((
level - 64) & 0xfff) << 1) + 1);
329 int run =
i - last_non_zero - 1;
331 if ((
level & (~127)) == 0) {
336 7 + 2 + 1 + 6 + 1 + 12 + 1,
337 (3 << 23) + (3 << 21) + (1 << 20) + (
run << 14) +
338 (1 << 13) + (((
level - 64) & 0xfff) << 1) + 1);
345 int intra_dc,
uint8_t *scan_table)
347 int i, last_non_zero;
349 const int last_index =
s->block_last_index[
n];
367 last_non_zero =
i - 1;
368 for (;
i < last_index;
i++) {
371 int run =
i - last_non_zero - 1;
373 if ((
level & (~127)) == 0) {
377 len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
384 int run =
i - last_non_zero - 1;
386 if ((
level & (~127)) == 0) {
390 len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
398 int intra_dc[6],
uint8_t **scan_table,
406 for (
i = 0;
i < 6;
i++)
409 intra_dc[
i], scan_table[
i]));
412 for (
i = 0;
i < 6;
i++)
414 intra_dc[
i], scan_table[
i], dc_pb, ac_pb);
418 for (
i = 0;
i < 6;
i++)
421 s->intra_scantable.permutated));
424 for (
i = 0;
i < 6;
i++)
426 s->intra_scantable.permutated, dc_pb, ac_pb);
432 int motion_x,
int motion_y,
int mb_type)
440 for (
i = 0;
i < 6;
i++) {
441 if (
s->coded_score[
i] < 0) {
442 score +=
s->coded_score[
i];
449 if ((motion_x | motion_y |
s->dquant | mb_type) == 0)
452 zero_score *= lambda;
453 if (zero_score <= score)
457 for (
i = 0;
i < 6;
i++) {
458 if (
s->block_last_index[
i] >= 0 && ((cbp >> (5 -
i)) & 1) == 0) {
459 s->block_last_index[
i] = -1;
460 s->bdsp.clear_block(
s->block[
i]);
464 for (
i = 0;
i < 6;
i++) {
465 if (
s->block_last_index[
i] >= 0)
476 int motion_x,
int motion_y)
478 int cbpc, cbpy, pred_x, pred_y;
482 const int interleaved_stats = (
s->avctx->flags &
AV_CODEC_FLAG_PASS1) && !
s->data_partitioning ? 1 : 0;
489 static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
490 int mb_type = mb_type_table[
s->mv_dir];
493 for (
i = 0;
i < 2;
i++)
494 s->last_mv[
i][0][0] =
495 s->last_mv[
i][0][1] =
496 s->last_mv[
i][1][0] =
497 s->last_mv[
i][1][1] = 0;
505 if (
s->next_picture.mbskip_table[
s->mb_y *
s->mb_stride +
s->mb_x]) {
512 s->qscale -=
s->dquant;
520 if ((cbp | motion_x | motion_y | mb_type) == 0) {
526 if (interleaved_stats) {
540 if (cbp && mb_type) {
546 s->qscale -=
s->dquant;
548 if (!
s->progressive_sequence) {
555 if (interleaved_stats)
568 s->mv[0][0][0] -
s->last_mv[0][0][0],
569 s->mv[0][0][1] -
s->last_mv[0][0][1],
571 s->last_mv[0][0][0] =
572 s->last_mv[0][1][0] =
s->mv[0][0][0];
573 s->last_mv[0][0][1] =
574 s->last_mv[0][1][1] =
s->mv[0][0][1];
579 s->mv[1][0][0] -
s->last_mv[1][0][0],
580 s->mv[1][0][1] -
s->last_mv[1][0][1],
582 s->last_mv[1][0][0] =
583 s->last_mv[1][1][0] =
s->mv[1][0][0];
584 s->last_mv[1][0][1] =
585 s->last_mv[1][1][1] =
s->mv[1][0][1];
598 for (
i = 0;
i < 2;
i++) {
600 s->mv[0][
i][0] -
s->last_mv[0][
i][0],
601 s->mv[0][
i][1] -
s->last_mv[0][
i][1] / 2,
603 s->last_mv[0][
i][0] =
s->mv[0][
i][0];
604 s->last_mv[0][
i][1] =
s->mv[0][
i][1] * 2;
609 for (
i = 0;
i < 2;
i++) {
611 s->mv[1][
i][0] -
s->last_mv[1][
i][0],
612 s->mv[1][
i][1] -
s->last_mv[1][
i][1] / 2,
614 s->last_mv[1][
i][0] =
s->mv[1][
i][0];
615 s->last_mv[1][
i][1] =
s->mv[1][
i][1] * 2;
622 if (interleaved_stats)
627 if (interleaved_stats)
632 if ((cbp | motion_x | motion_y |
s->dquant) == 0 &&
637 if (
s->max_b_frames > 0) {
646 p_pic =
s->new_picture.f->data[0] +
offset;
649 for (
i = 0;
i <
s->max_b_frames;
i++) {
652 Picture *pic =
s->reordered_input_picture[
i + 1];
661 if (x + 16 >
s->width || y + 16 >
s->height) {
663 int xe =
FFMIN(16,
s->width - x);
664 int ye =
FFMIN(16,
s->height - y);
666 for (y1 = 0; y1 < ye; y1++) {
667 for (x1 = 0; x1 < xe; x1++) {
668 diff +=
FFABS(p_pic[x1 + y1 *
s->linesize] - b_pic[x1 + y1 *
s->linesize]);
673 diff =
s->mecc.sad[0](
NULL, p_pic, b_pic,
s->linesize, 16);
675 if (
diff >
s->qscale * 70) {
683 if (
s->mb_skipped == 1) {
687 if (interleaved_stats) {
712 if (!
s->progressive_sequence) {
718 if (interleaved_stats)
744 if (interleaved_stats)
755 s->mv[0][0][0] - pred_x,
756 s->mv[0][0][1] - pred_y,
759 s->mv[0][1][0] - pred_x,
760 s->mv[0][1][1] - pred_y,
769 if (!
s->progressive_sequence && cbp)
772 if (interleaved_stats)
775 for (
i = 0;
i < 4;
i++) {
780 s->current_picture.motion_val[0][
s->block_index[
i]][0] - pred_x,
781 s->current_picture.motion_val[0][
s->block_index[
i]][1] - pred_y,
786 if (interleaved_stats)
791 if (interleaved_stats)
800 int zigzag_last_index[6];
804 for (
i = 0;
i < 6;
i++)
810 for (
i = 0;
i < 6;
i++)
811 scan_table[
i] =
s->intra_scantable.permutated;
816 for (
i = 0;
i < 6;
i++)
817 if (
s->block_last_index[
i] >= 1)
841 if (!
s->progressive_sequence)
844 if (interleaved_stats)
849 if (interleaved_stats)
878 s->last_time_base =
s->time_base;
879 s->time_base =
FFUDIV(
s->time,
s->avctx->time_base.den);
885 int64_t hours, minutes, seconds;
891 time =
s->current_picture_ptr->f->pts;
892 if (
s->reordered_input_picture[1])
893 time =
FFMIN(time,
s->reordered_input_picture[1]->f->pts);
894 time = time *
s->avctx->time_base.num;
895 s->last_time_base =
FFUDIV(time,
s->avctx->time_base.den);
897 seconds =
FFUDIV(time,
s->avctx->time_base.den);
898 minutes =
FFUDIV(seconds, 60); seconds =
FFUMOD(seconds, 60);
899 hours =
FFUDIV(minutes, 60); minutes =
FFUMOD(minutes, 60);
900 hours =
FFUMOD(hours , 24);
915 int profile_and_level_indication;
919 profile_and_level_indication =
s->avctx->profile << 4;
920 }
else if (
s->max_b_frames ||
s->quarter_sample) {
921 profile_and_level_indication = 0xF0;
923 profile_and_level_indication = 0x00;
927 profile_and_level_indication |=
s->avctx->level;
929 profile_and_level_indication |= 1;
931 if (profile_and_level_indication >> 4 == 0xF)
941 put_bits(&
s->pb, 8, profile_and_level_indication);
963 if (!CONFIG_MPEG4_ENCODER)
966 if (
s->max_b_frames ||
s->quarter_sample) {
977 put_bits(&
s->pb, 16, 0x120 + vol_number);
993 av_reduce(&
s->avctx->sample_aspect_ratio.num, &
s->avctx->sample_aspect_ratio.den,
994 s->avctx->sample_aspect_ratio.num,
s->avctx->sample_aspect_ratio.den, 255);
995 put_bits(&
s->pb, 8,
s->avctx->sample_aspect_ratio.num);
996 put_bits(&
s->pb, 8,
s->avctx->sample_aspect_ratio.den);
1011 put_bits(&
s->pb, 16,
s->avctx->time_base.den);
1012 if (
s->time_increment_bits < 1)
1013 s->time_increment_bits = 1;
1021 put_bits(&
s->pb, 1,
s->progressive_sequence ? 0 : 1);
1031 if (
s->mpeg_quant) {
1040 put_bits(&
s->pb, 1,
s->data_partitioning ? 1 : 0);
1041 if (
s->data_partitioning)
1044 if (vo_ver_id != 1) {
1064 int64_t time_div, time_mod;
1083 time_div =
FFUDIV(
s->time,
s->avctx->time_base.den);
1084 time_mod =
FFUMOD(
s->time,
s->avctx->time_base.den);
1085 time_incr = time_div -
s->last_time_base;
1088 if (time_incr > 3600) {
1098 put_bits(&
s->pb,
s->time_increment_bits, time_mod);
1105 if (!
s->progressive_sequence) {
1106 put_bits(&
s->pb, 1,
s->current_picture_ptr->f->top_field_first);
1123 int level, uni_code, uni_len;
1179 int slevel,
run, last;
1184 for (slevel = -64; slevel < 64; slevel++) {
1188 for (last = 0; last <= 1; last++) {
1190 int level = slevel < 0 ? -slevel : slevel;
1191 int sign = slevel < 0 ? 1 : 0;
1195 len_tab[
index] = 100;
1257 bits =
bits * 4096 + (slevel & 0xfff);
1275 static int done = 0;
1277 if (avctx->
width >= (1<<13) || avctx->
height >= (1<<13)) {
1296 s->min_qcoeff = -2048;
1297 s->max_qcoeff = 2047;
1303 s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
1309 if (!
s->avctx->extradata)
1329 int pb_size = (((intptr_t)
start +
size / 3) & (~3)) - (intptr_t)
start;
1330 int tex_size = (
size - 2 * pb_size) & (~3);
1345 s->misc_bits += 19 + pb2_len +
bits -
s->last_bits;
1346 s->i_tex_bits += tex_pb_len;
1349 s->misc_bits += 17 + pb2_len;
1350 s->mv_bits +=
bits -
s->last_bits;
1351 s->p_tex_bits += tex_pb_len;
1365 int mb_num_bits =
av_log2(
s->mb_num - 1) + 1;
1370 put_bits(&
s->pb, mb_num_bits,
s->mb_x +
s->mb_y *
s->mb_width);
1375 #define OFFSET(x) offsetof(MpegEncContext, x)
1376 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1378 {
"data_partitioning",
"Use data partitioning.",
OFFSET(data_partitioning),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
1379 {
"alternate_scan",
"Enable alternate scantable.",
OFFSET(alternate_scan),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
static int mpeg4_get_dc_length(int level, int n)
#define MV_TYPE_16X16
1 vector for the whole mb
static void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64], int intra_dc[6], uint8_t **scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
AVPixelFormat
Pixel format.
#define FF_ASPECT_EXTENDED
static void mpeg4_encode_vol_header(MpegEncContext *s, int vo_number, int vol_number)
#define VISUAL_OBJ_STARTCODE
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FF_COMPLIANCE_VERY_STRICT
Strictly conform to an older more strict version of the spec or reference software.
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
static av_cold int end(AVCodecContext *avctx)
#define UNI_AC_ENC_INDEX(run, level)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
void avpriv_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
#define ADV_SIMPLE_VO_TYPE
static av_cold void init_uni_dc_tab(void)
static void mpeg4_encode_visual_object_header(MpegEncContext *s)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void mpeg4_encode_block(MpegEncContext *s, int16_t *block, int n, int intra_dc, uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
Encode an 8x8 block.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
static uint32_t uni_mpeg4_intra_rl_bits[64 *64 *2 *2]
static uint8_t uni_mpeg4_intra_rl_len[64 *64 *2 *2]
const uint8_t ff_mpeg4_DCtab_chrom[13][2]
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
static int get_block_rate(MpegEncContext *s, int16_t block[64], int block_last_index, uint8_t scantable[64])
Return the number of bits that encoding the 8x8 block in block would need.
#define CANDIDATE_MB_TYPE_BIDIR
static void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6])
Restore the ac coefficients in block that have been changed by decide_ac_pred().
#define FF_BUG_MS
Work around various bugs in Microsoft's broken decoders.
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s)
static uint32_t uni_mpeg4_inter_rl_bits[64 *64 *2 *2]
static uint8_t uni_DCtab_chrom_len[512]
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static const int dquant_code[5]
int n
number of entries of table_vlc minus 1
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int8_t * max_level[2]
encoding & decoding
static uint8_t uni_mpeg4_inter_rl_len[64 *64 *2 *2]
int ff_mpv_encode_init(AVCodecContext *avctx)
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
static int get_rl_index(const RLTable *rl, int last, int run, int level)
static void skip_put_bits(PutBitContext *s, int n)
Skip the given number of bits.
RLTable ff_mpeg4_rl_intra
static uint16_t uni_DCtab_chrom_bits[512]
#define FF_PROFILE_UNKNOWN
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
#define UNI_MPEG4_ENC_INDEX(last, run, level)
static enum AVPixelFormat pix_fmts[]
static uint16_t uni_DCtab_lum_bits[512]
static int get_p_cbp(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab)
static void mpeg4_encode_gop_header(MpegEncContext *s)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
const uint8_t ff_mpeg4_DCtab_lum[13][2]
static int decide_ac_pred(MpegEncContext *s, int16_t block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6])
Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
#define LIBAVUTIL_VERSION_INT
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Describe the class of an AVClass context structure.
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
const uint16_t(* table_vlc)[2]
#define ROUNDED_DIV(a, b)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
void ff_mpeg4_init_partitions(MpegEncContext *s)
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
void ff_mpeg4_merge_partitions(MpegEncContext *s)
enum AVPictureType pict_type
Picture type of the frame.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
const uint8_t ff_mpeg4_y_dc_scale_table[32]
static int ff_mpeg4_pred_dc(MpegEncContext *s, int n, int level, int *dir_ptr, int encoding)
Predict the dc.
static int get_b_cbp(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y, int mb_type)
const uint8_t ff_h263_cbpy_tab[16][2]
uint8_t ff_mpeg4_static_rl_table_store[3][2][2 *MAX_RUN+MAX_LEVEL+3]
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
const uint8_t ff_h263_inter_MCBPC_bits[28]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int put_bits_count(PutBitContext *s)
int8_t * max_run[2]
encoding & decoding
static uint8_t uni_DCtab_lum_len[512]
static const AVClass mpeg4enc_class
static const AVOption options[]
int ff_mpv_encode_end(AVCodecContext *avctx)
const char * name
Name of the codec implementation.
av_const int ff_h263_aspect_to_info(AVRational aspect)
Return the 4 bit value that specifies the given aspect ratio.
#define CANDIDATE_MB_TYPE_DIRECT
static int get_bits_diff(MpegEncContext *s)
#define AV_CODEC_FLAG_CLOSED_GOP
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static av_cold int encode_init(AVCodecContext *avctx)
#define FF_MPV_FLAG_CBP_RD
void ff_mpeg4_init_direct_mv(MpegEncContext *s)
void ff_set_mpeg4_time(MpegEncContext *s)
const uint8_t ff_h263_intra_MCBPC_bits[9]
main external API structure.
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
const uint8_t ff_h263_intra_MCBPC_code[9]
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
static void mpeg4_encode_dc(PutBitContext *s, int level, int n)
Encode the dc value.
av_cold int ff_rl_init(RLTable *rl, uint8_t static_store[2][2 *MAX_RUN+MAX_LEVEL+3])
static int mpeg4_get_block_length(MpegEncContext *s, int16_t *block, int n, int intra_dc, uint8_t *scan_table)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
const uint8_t ff_h263_inter_MCBPC_code[28]
@ AV_PICTURE_TYPE_P
Predicted.
static void ff_h263_encode_motion_vector(MpegEncContext *s, int x, int y, int f_code)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
const uint8_t ff_mpeg4_c_dc_scale_table[32]
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int width
picture width / height.
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
The exact code depends on how similar the blocks are and how related they are to the block
#define FF_MPV_COMMON_OPTS
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.