00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/mathematics.h"
00032 #include "libavutil/opt.h"
00033 #include "avcodec.h"
00034 #include "dsputil.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "h263.h"
00038 #include "mjpegenc.h"
00039 #include "msmpeg4.h"
00040 #include "faandct.h"
00041 #include "thread.h"
00042 #include "aandcttab.h"
00043 #include "flv.h"
00044 #include "mpeg4video.h"
00045 #include "internal.h"
00046 #include "bytestream.h"
00047 #include <limits.h>
00048 #include "sp5x.h"
00049
00050
00051
00052
00053 static int encode_picture(MpegEncContext *s, int picture_number);
00054 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
00055 static int sse_mb(MpegEncContext *s);
00056 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
00057 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
00058
00059
00060
00061
00062
00063
00064 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
00065 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
00066
00067 const AVOption ff_mpv_generic_options[] = {
00068 FF_MPV_COMMON_OPTS
00069 { NULL },
00070 };
00071
00072 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
00073 uint16_t (*qmat16)[2][64],
00074 const uint16_t *quant_matrix,
00075 int bias, int qmin, int qmax, int intra)
00076 {
00077 int qscale;
00078 int shift = 0;
00079
00080 for (qscale = qmin; qscale <= qmax; qscale++) {
00081 int i;
00082 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
00083 dsp->fdct == ff_jpeg_fdct_islow_10 ||
00084 dsp->fdct == ff_faandct) {
00085 for (i = 0; i < 64; i++) {
00086 const int j = dsp->idct_permutation[i];
00087
00088
00089
00090
00091
00092
00093 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00094 (qscale * quant_matrix[j]));
00095 }
00096 } else if (dsp->fdct == ff_fdct_ifast) {
00097 for (i = 0; i < 64; i++) {
00098 const int j = dsp->idct_permutation[i];
00099
00100
00101
00102
00103
00104
00105 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
00106 (ff_aanscales[i] * qscale * quant_matrix[j]));
00107 }
00108 } else {
00109 for (i = 0; i < 64; i++) {
00110 const int j = dsp->idct_permutation[i];
00111
00112
00113
00114
00115
00116 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00117 (qscale * quant_matrix[j]));
00118
00119
00120 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
00121 (qscale * quant_matrix[j]);
00122
00123 if (qmat16[qscale][0][i] == 0 ||
00124 qmat16[qscale][0][i] == 128 * 256)
00125 qmat16[qscale][0][i] = 128 * 256 - 1;
00126 qmat16[qscale][1][i] =
00127 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
00128 qmat16[qscale][0][i]);
00129 }
00130 }
00131
00132 for (i = intra; i < 64; i++) {
00133 int64_t max = 8191;
00134 if (dsp->fdct == ff_fdct_ifast) {
00135 max = (8191LL * ff_aanscales[i]) >> 14;
00136 }
00137 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
00138 shift++;
00139 }
00140 }
00141 }
00142 if (shift) {
00143 av_log(NULL, AV_LOG_INFO,
00144 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
00145 QMAT_SHIFT - shift);
00146 }
00147 }
00148
00149 static inline void update_qscale(MpegEncContext *s)
00150 {
00151 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
00152 (FF_LAMBDA_SHIFT + 7);
00153 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
00154
00155 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
00156 FF_LAMBDA_SHIFT;
00157 }
00158
00159 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
00160 {
00161 int i;
00162
00163 if (matrix) {
00164 put_bits(pb, 1, 1);
00165 for (i = 0; i < 64; i++) {
00166 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
00167 }
00168 } else
00169 put_bits(pb, 1, 0);
00170 }
00171
00175 void ff_init_qscale_tab(MpegEncContext *s)
00176 {
00177 int8_t * const qscale_table = s->current_picture.f.qscale_table;
00178 int i;
00179
00180 for (i = 0; i < s->mb_num; i++) {
00181 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
00182 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
00183 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
00184 s->avctx->qmax);
00185 }
00186 }
00187
00188 static void copy_picture_attributes(MpegEncContext *s,
00189 AVFrame *dst,
00190 AVFrame *src)
00191 {
00192 int i;
00193
00194 dst->pict_type = src->pict_type;
00195 dst->quality = src->quality;
00196 dst->coded_picture_number = src->coded_picture_number;
00197 dst->display_picture_number = src->display_picture_number;
00198
00199 dst->pts = src->pts;
00200 dst->interlaced_frame = src->interlaced_frame;
00201 dst->top_field_first = src->top_field_first;
00202
00203 if (s->avctx->me_threshold) {
00204 if (!src->motion_val[0])
00205 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
00206 if (!src->mb_type)
00207 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
00208 if (!src->ref_index[0])
00209 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
00210 if (src->motion_subsample_log2 != dst->motion_subsample_log2)
00211 av_log(s->avctx, AV_LOG_ERROR,
00212 "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
00213 src->motion_subsample_log2, dst->motion_subsample_log2);
00214
00215 memcpy(dst->mb_type, src->mb_type,
00216 s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
00217
00218 for (i = 0; i < 2; i++) {
00219 int stride = ((16 * s->mb_width ) >>
00220 src->motion_subsample_log2) + 1;
00221 int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
00222
00223 if (src->motion_val[i] &&
00224 src->motion_val[i] != dst->motion_val[i]) {
00225 memcpy(dst->motion_val[i], src->motion_val[i],
00226 2 * stride * height * sizeof(int16_t));
00227 }
00228 if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
00229 memcpy(dst->ref_index[i], src->ref_index[i],
00230 s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
00231 }
00232 }
00233 }
00234 }
00235
00236 static void update_duplicate_context_after_me(MpegEncContext *dst,
00237 MpegEncContext *src)
00238 {
00239 #define COPY(a) dst->a= src->a
00240 COPY(pict_type);
00241 COPY(current_picture);
00242 COPY(f_code);
00243 COPY(b_code);
00244 COPY(qscale);
00245 COPY(lambda);
00246 COPY(lambda2);
00247 COPY(picture_in_gop_number);
00248 COPY(gop_picture_number);
00249 COPY(frame_pred_frame_dct);
00250 COPY(progressive_frame);
00251 COPY(partitioned_frame);
00252 #undef COPY
00253 }
00254
00259 static void MPV_encode_defaults(MpegEncContext *s)
00260 {
00261 int i;
00262 ff_MPV_common_defaults(s);
00263
00264 for (i = -16; i < 16; i++) {
00265 default_fcode_tab[i + MAX_MV] = 1;
00266 }
00267 s->me.mv_penalty = default_mv_penalty;
00268 s->fcode_tab = default_fcode_tab;
00269 }
00270
00271
00272 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
00273 {
00274 MpegEncContext *s = avctx->priv_data;
00275 int i;
00276 int chroma_h_shift, chroma_v_shift;
00277
00278 MPV_encode_defaults(s);
00279
00280 switch (avctx->codec_id) {
00281 case CODEC_ID_MPEG2VIDEO:
00282 if (avctx->pix_fmt != PIX_FMT_YUV420P &&
00283 avctx->pix_fmt != PIX_FMT_YUV422P) {
00284 av_log(avctx, AV_LOG_ERROR,
00285 "only YUV420 and YUV422 are supported\n");
00286 return -1;
00287 }
00288 break;
00289 case CODEC_ID_LJPEG:
00290 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00291 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00292 avctx->pix_fmt != PIX_FMT_YUVJ444P &&
00293 avctx->pix_fmt != PIX_FMT_BGR0 &&
00294 avctx->pix_fmt != PIX_FMT_BGRA &&
00295 avctx->pix_fmt != PIX_FMT_BGR24 &&
00296 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00297 avctx->pix_fmt != PIX_FMT_YUV422P &&
00298 avctx->pix_fmt != PIX_FMT_YUV444P) ||
00299 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00300 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
00301 return -1;
00302 }
00303 break;
00304 case CODEC_ID_MJPEG:
00305 case CODEC_ID_AMV:
00306 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00307 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00308 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00309 avctx->pix_fmt != PIX_FMT_YUV422P) ||
00310 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00311 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
00312 return -1;
00313 }
00314 break;
00315 default:
00316 if (avctx->pix_fmt != PIX_FMT_YUV420P) {
00317 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
00318 return -1;
00319 }
00320 }
00321
00322 switch (avctx->pix_fmt) {
00323 case PIX_FMT_YUVJ422P:
00324 case PIX_FMT_YUV422P:
00325 s->chroma_format = CHROMA_422;
00326 break;
00327 case PIX_FMT_YUVJ420P:
00328 case PIX_FMT_YUV420P:
00329 default:
00330 s->chroma_format = CHROMA_420;
00331 break;
00332 }
00333
00334 s->bit_rate = avctx->bit_rate;
00335 s->width = avctx->width;
00336 s->height = avctx->height;
00337 if (avctx->gop_size > 600 &&
00338 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
00339 av_log(avctx, AV_LOG_WARNING,
00340 "keyframe interval too large!, reducing it from %d to %d\n",
00341 avctx->gop_size, 600);
00342 avctx->gop_size = 600;
00343 }
00344 s->gop_size = avctx->gop_size;
00345 s->avctx = avctx;
00346 s->flags = avctx->flags;
00347 s->flags2 = avctx->flags2;
00348 s->max_b_frames = avctx->max_b_frames;
00349 s->codec_id = avctx->codec->id;
00350 #if FF_API_MPV_GLOBAL_OPTS
00351 if (avctx->luma_elim_threshold)
00352 s->luma_elim_threshold = avctx->luma_elim_threshold;
00353 if (avctx->chroma_elim_threshold)
00354 s->chroma_elim_threshold = avctx->chroma_elim_threshold;
00355 #endif
00356 s->strict_std_compliance = avctx->strict_std_compliance;
00357 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
00358 s->mpeg_quant = avctx->mpeg_quant;
00359 s->rtp_mode = !!avctx->rtp_payload_size;
00360 s->intra_dc_precision = avctx->intra_dc_precision;
00361 s->user_specified_pts = AV_NOPTS_VALUE;
00362
00363 if (s->gop_size <= 1) {
00364 s->intra_only = 1;
00365 s->gop_size = 12;
00366 } else {
00367 s->intra_only = 0;
00368 }
00369
00370 s->me_method = avctx->me_method;
00371
00372
00373 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
00374
00375 #if FF_API_MPV_GLOBAL_OPTS
00376 if (s->flags & CODEC_FLAG_QP_RD)
00377 s->mpv_flags |= FF_MPV_FLAG_QP_RD;
00378 #endif
00379
00380 s->adaptive_quant = (s->avctx->lumi_masking ||
00381 s->avctx->dark_masking ||
00382 s->avctx->temporal_cplx_masking ||
00383 s->avctx->spatial_cplx_masking ||
00384 s->avctx->p_masking ||
00385 s->avctx->border_masking ||
00386 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
00387 !s->fixed_qscale;
00388
00389 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
00390
00391 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
00392 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
00393 if (avctx->rc_max_rate && !avctx->rc_buffer_size)
00394 return -1;
00395 }
00396
00397 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
00398 av_log(avctx, AV_LOG_INFO,
00399 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
00400 }
00401
00402 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
00403 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
00404 return -1;
00405 }
00406
00407 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
00408 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
00409 return -1;
00410 }
00411
00412 if (avctx->rc_max_rate &&
00413 avctx->rc_max_rate == avctx->bit_rate &&
00414 avctx->rc_max_rate != avctx->rc_min_rate) {
00415 av_log(avctx, AV_LOG_INFO,
00416 "impossible bitrate constraints, this will fail\n");
00417 }
00418
00419 if (avctx->rc_buffer_size &&
00420 avctx->bit_rate * (int64_t)avctx->time_base.num >
00421 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
00422 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
00423 return -1;
00424 }
00425
00426 if (!s->fixed_qscale &&
00427 avctx->bit_rate * av_q2d(avctx->time_base) >
00428 avctx->bit_rate_tolerance) {
00429 av_log(avctx, AV_LOG_ERROR,
00430 "bitrate tolerance too small for bitrate\n");
00431 return -1;
00432 }
00433
00434 if (s->avctx->rc_max_rate &&
00435 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
00436 (s->codec_id == CODEC_ID_MPEG1VIDEO ||
00437 s->codec_id == CODEC_ID_MPEG2VIDEO) &&
00438 90000LL * (avctx->rc_buffer_size - 1) >
00439 s->avctx->rc_max_rate * 0xFFFFLL) {
00440 av_log(avctx, AV_LOG_INFO,
00441 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
00442 "specified vbv buffer is too large for the given bitrate!\n");
00443 }
00444
00445 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 &&
00446 s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P &&
00447 s->codec_id != CODEC_ID_FLV1) {
00448 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
00449 return -1;
00450 }
00451
00452 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
00453 av_log(avctx, AV_LOG_ERROR,
00454 "OBMC is only supported with simple mb decision\n");
00455 return -1;
00456 }
00457
00458 if (s->quarter_sample && s->codec_id != CODEC_ID_MPEG4) {
00459 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
00460 return -1;
00461 }
00462
00463 if (s->max_b_frames &&
00464 s->codec_id != CODEC_ID_MPEG4 &&
00465 s->codec_id != CODEC_ID_MPEG1VIDEO &&
00466 s->codec_id != CODEC_ID_MPEG2VIDEO) {
00467 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
00468 return -1;
00469 }
00470
00471 if ((s->codec_id == CODEC_ID_MPEG4 ||
00472 s->codec_id == CODEC_ID_H263 ||
00473 s->codec_id == CODEC_ID_H263P) &&
00474 (avctx->sample_aspect_ratio.num > 255 ||
00475 avctx->sample_aspect_ratio.den > 255)) {
00476 av_log(avctx, AV_LOG_WARNING,
00477 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
00478 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
00479 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
00480 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
00481 }
00482
00483 if ((s->codec_id == CODEC_ID_H263 ||
00484 s->codec_id == CODEC_ID_H263P) &&
00485 (avctx->width > 2048 ||
00486 avctx->height > 1152 )) {
00487 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
00488 return -1;
00489 }
00490 if ((s->codec_id == CODEC_ID_H263 ||
00491 s->codec_id == CODEC_ID_H263P) &&
00492 ((avctx->width &3) ||
00493 (avctx->height&3) )) {
00494 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
00495 return -1;
00496 }
00497
00498 if (s->codec_id == CODEC_ID_MPEG1VIDEO &&
00499 (avctx->width > 4095 ||
00500 avctx->height > 4095 )) {
00501 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
00502 return -1;
00503 }
00504
00505 if (s->codec_id == CODEC_ID_MPEG2VIDEO &&
00506 (avctx->width > 16383 ||
00507 avctx->height > 16383 )) {
00508 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
00509 return -1;
00510 }
00511
00512 if ((s->codec_id == CODEC_ID_WMV1 ||
00513 s->codec_id == CODEC_ID_WMV2) &&
00514 avctx->width & 1) {
00515 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
00516 return -1;
00517 }
00518
00519 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
00520 s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO) {
00521 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
00522 return -1;
00523 }
00524
00525
00526 if (s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4) {
00527 av_log(avctx, AV_LOG_ERROR,
00528 "mpeg2 style quantization not supported by codec\n");
00529 return -1;
00530 }
00531
00532 #if FF_API_MPV_GLOBAL_OPTS
00533 if (s->flags & CODEC_FLAG_CBP_RD)
00534 s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
00535 #endif
00536
00537 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
00538 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
00539 return -1;
00540 }
00541
00542 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
00543 s->avctx->mb_decision != FF_MB_DECISION_RD) {
00544 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
00545 return -1;
00546 }
00547
00548 if (s->avctx->scenechange_threshold < 1000000000 &&
00549 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
00550 av_log(avctx, AV_LOG_ERROR,
00551 "closed gop with scene change detection are not supported yet, "
00552 "set threshold to 1000000000\n");
00553 return -1;
00554 }
00555
00556 if (s->flags & CODEC_FLAG_LOW_DELAY) {
00557 if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
00558 av_log(avctx, AV_LOG_ERROR,
00559 "low delay forcing is only available for mpeg2\n");
00560 return -1;
00561 }
00562 if (s->max_b_frames != 0) {
00563 av_log(avctx, AV_LOG_ERROR,
00564 "b frames cannot be used with low delay\n");
00565 return -1;
00566 }
00567 }
00568
00569 if (s->q_scale_type == 1) {
00570 if (avctx->qmax > 12) {
00571 av_log(avctx, AV_LOG_ERROR,
00572 "non linear quant only supports qmax <= 12 currently\n");
00573 return -1;
00574 }
00575 }
00576
00577 if (s->avctx->thread_count > 1 &&
00578 s->codec_id != CODEC_ID_MPEG4 &&
00579 s->codec_id != CODEC_ID_MPEG1VIDEO &&
00580 s->codec_id != CODEC_ID_MPEG2VIDEO &&
00581 (s->codec_id != CODEC_ID_H263P)) {
00582 av_log(avctx, AV_LOG_ERROR,
00583 "multi threaded encoding not supported by codec\n");
00584 return -1;
00585 }
00586
00587 if (s->avctx->thread_count < 1) {
00588 av_log(avctx, AV_LOG_ERROR,
00589 "automatic thread number detection not supported by codec, "
00590 "patch welcome\n");
00591 return -1;
00592 }
00593
00594 if (s->avctx->thread_count > 1)
00595 s->rtp_mode = 1;
00596
00597 if (!avctx->time_base.den || !avctx->time_base.num) {
00598 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
00599 return -1;
00600 }
00601
00602 i = (INT_MAX / 2 + 128) >> 8;
00603 if (avctx->me_threshold >= i) {
00604 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
00605 i - 1);
00606 return -1;
00607 }
00608 if (avctx->mb_threshold >= i) {
00609 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
00610 i - 1);
00611 return -1;
00612 }
00613
00614 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
00615 av_log(avctx, AV_LOG_INFO,
00616 "notice: b_frame_strategy only affects the first pass\n");
00617 avctx->b_frame_strategy = 0;
00618 }
00619
00620 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
00621 if (i > 1) {
00622 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
00623 avctx->time_base.den /= i;
00624 avctx->time_base.num /= i;
00625
00626 }
00627
00628 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id==CODEC_ID_AMV) {
00629
00630 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
00631 s->inter_quant_bias = 0;
00632 } else {
00633 s->intra_quant_bias = 0;
00634
00635 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
00636 }
00637
00638 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00639 s->intra_quant_bias = avctx->intra_quant_bias;
00640 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
00641 s->inter_quant_bias = avctx->inter_quant_bias;
00642
00643 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
00644
00645 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
00646 &chroma_v_shift);
00647
00648 if (avctx->codec_id == CODEC_ID_MPEG4 &&
00649 s->avctx->time_base.den > (1 << 16) - 1) {
00650 av_log(avctx, AV_LOG_ERROR,
00651 "timebase %d/%d not supported by MPEG 4 standard, "
00652 "the maximum admitted value for the timebase denominator "
00653 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
00654 (1 << 16) - 1);
00655 return -1;
00656 }
00657 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
00658
00659 #if FF_API_MPV_GLOBAL_OPTS
00660 if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
00661 s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
00662 if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
00663 s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
00664 if (avctx->quantizer_noise_shaping)
00665 s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
00666 #endif
00667
00668 switch (avctx->codec->id) {
00669 case CODEC_ID_MPEG1VIDEO:
00670 s->out_format = FMT_MPEG1;
00671 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00672 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00673 break;
00674 case CODEC_ID_MPEG2VIDEO:
00675 s->out_format = FMT_MPEG1;
00676 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00677 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00678 s->rtp_mode = 1;
00679 break;
00680 case CODEC_ID_LJPEG:
00681 case CODEC_ID_MJPEG:
00682 case CODEC_ID_AMV:
00683 s->out_format = FMT_MJPEG;
00684 s->intra_only = 1;
00685 if (avctx->codec->id == CODEC_ID_LJPEG &&
00686 (avctx->pix_fmt == PIX_FMT_BGR0
00687 || s->avctx->pix_fmt == PIX_FMT_BGRA
00688 || s->avctx->pix_fmt == PIX_FMT_BGR24)) {
00689 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
00690 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
00691 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
00692 } else {
00693 s->mjpeg_vsample[0] = 2;
00694 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
00695 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
00696 s->mjpeg_hsample[0] = 2;
00697 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
00698 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
00699 }
00700 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
00701 ff_mjpeg_encode_init(s) < 0)
00702 return -1;
00703 avctx->delay = 0;
00704 s->low_delay = 1;
00705 break;
00706 case CODEC_ID_H261:
00707 if (!CONFIG_H261_ENCODER)
00708 return -1;
00709 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
00710 av_log(avctx, AV_LOG_ERROR,
00711 "The specified picture size of %dx%d is not valid for the "
00712 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
00713 s->width, s->height);
00714 return -1;
00715 }
00716 s->out_format = FMT_H261;
00717 avctx->delay = 0;
00718 s->low_delay = 1;
00719 break;
00720 case CODEC_ID_H263:
00721 if (!CONFIG_H263_ENCODER)
00722 return -1;
00723 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
00724 s->width, s->height) == 8) {
00725 av_log(avctx, AV_LOG_ERROR,
00726 "The specified picture size of %dx%d is not valid for "
00727 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
00728 "352x288, 704x576, and 1408x1152. "
00729 "Try H.263+.\n", s->width, s->height);
00730 return -1;
00731 }
00732 s->out_format = FMT_H263;
00733 avctx->delay = 0;
00734 s->low_delay = 1;
00735 break;
00736 case CODEC_ID_H263P:
00737 s->out_format = FMT_H263;
00738 s->h263_plus = 1;
00739
00740 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
00741 s->modified_quant = s->h263_aic;
00742 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
00743 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
00744
00745
00746
00747 avctx->delay = 0;
00748 s->low_delay = 1;
00749 break;
00750 case CODEC_ID_FLV1:
00751 s->out_format = FMT_H263;
00752 s->h263_flv = 2;
00753 s->unrestricted_mv = 1;
00754 s->rtp_mode = 0;
00755 avctx->delay = 0;
00756 s->low_delay = 1;
00757 break;
00758 case CODEC_ID_RV10:
00759 s->out_format = FMT_H263;
00760 avctx->delay = 0;
00761 s->low_delay = 1;
00762 break;
00763 case CODEC_ID_RV20:
00764 s->out_format = FMT_H263;
00765 avctx->delay = 0;
00766 s->low_delay = 1;
00767 s->modified_quant = 1;
00768 s->h263_aic = 1;
00769 s->h263_plus = 1;
00770 s->loop_filter = 1;
00771 s->unrestricted_mv = 0;
00772 break;
00773 case CODEC_ID_MPEG4:
00774 s->out_format = FMT_H263;
00775 s->h263_pred = 1;
00776 s->unrestricted_mv = 1;
00777 s->low_delay = s->max_b_frames ? 0 : 1;
00778 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00779 break;
00780 case CODEC_ID_MSMPEG4V2:
00781 s->out_format = FMT_H263;
00782 s->h263_pred = 1;
00783 s->unrestricted_mv = 1;
00784 s->msmpeg4_version = 2;
00785 avctx->delay = 0;
00786 s->low_delay = 1;
00787 break;
00788 case CODEC_ID_MSMPEG4V3:
00789 s->out_format = FMT_H263;
00790 s->h263_pred = 1;
00791 s->unrestricted_mv = 1;
00792 s->msmpeg4_version = 3;
00793 s->flipflop_rounding = 1;
00794 avctx->delay = 0;
00795 s->low_delay = 1;
00796 break;
00797 case CODEC_ID_WMV1:
00798 s->out_format = FMT_H263;
00799 s->h263_pred = 1;
00800 s->unrestricted_mv = 1;
00801 s->msmpeg4_version = 4;
00802 s->flipflop_rounding = 1;
00803 avctx->delay = 0;
00804 s->low_delay = 1;
00805 break;
00806 case CODEC_ID_WMV2:
00807 s->out_format = FMT_H263;
00808 s->h263_pred = 1;
00809 s->unrestricted_mv = 1;
00810 s->msmpeg4_version = 5;
00811 s->flipflop_rounding = 1;
00812 avctx->delay = 0;
00813 s->low_delay = 1;
00814 break;
00815 default:
00816 return -1;
00817 }
00818
00819 avctx->has_b_frames = !s->low_delay;
00820
00821 s->encoding = 1;
00822
00823 s->progressive_frame =
00824 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
00825 CODEC_FLAG_INTERLACED_ME) ||
00826 s->alternate_scan);
00827
00828
00829 if (ff_MPV_common_init(s) < 0)
00830 return -1;
00831
00832 if (!s->dct_quantize)
00833 s->dct_quantize = ff_dct_quantize_c;
00834 if (!s->denoise_dct)
00835 s->denoise_dct = denoise_dct_c;
00836 s->fast_dct_quantize = s->dct_quantize;
00837 if (avctx->trellis)
00838 s->dct_quantize = dct_quantize_trellis_c;
00839
00840 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
00841 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
00842
00843 s->quant_precision = 5;
00844
00845 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
00846 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
00847
00848 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
00849 ff_h261_encode_init(s);
00850 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
00851 ff_h263_encode_init(s);
00852 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
00853 ff_msmpeg4_encode_init(s);
00854 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
00855 && s->out_format == FMT_MPEG1)
00856 ff_mpeg1_encode_init(s);
00857
00858
00859 for (i = 0; i < 64; i++) {
00860 int j = s->dsp.idct_permutation[i];
00861 if (CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4 &&
00862 s->mpeg_quant) {
00863 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
00864 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
00865 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
00866 s->intra_matrix[j] =
00867 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00868 } else {
00869
00870 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
00871 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00872 }
00873 if (s->avctx->intra_matrix)
00874 s->intra_matrix[j] = s->avctx->intra_matrix[i];
00875 if (s->avctx->inter_matrix)
00876 s->inter_matrix[j] = s->avctx->inter_matrix[i];
00877 }
00878
00879
00880
00881 if (s->out_format != FMT_MJPEG) {
00882 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
00883 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
00884 31, 1);
00885 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
00886 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
00887 31, 0);
00888 }
00889
00890 if (ff_rate_control_init(s) < 0)
00891 return -1;
00892
00893 return 0;
00894 }
00895
00896 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
00897 {
00898 MpegEncContext *s = avctx->priv_data;
00899
00900 ff_rate_control_uninit(s);
00901
00902 ff_MPV_common_end(s);
00903 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
00904 s->out_format == FMT_MJPEG)
00905 ff_mjpeg_encode_close(s);
00906
00907 av_freep(&avctx->extradata);
00908
00909 return 0;
00910 }
00911
00912 static int get_sae(uint8_t *src, int ref, int stride)
00913 {
00914 int x,y;
00915 int acc = 0;
00916
00917 for (y = 0; y < 16; y++) {
00918 for (x = 0; x < 16; x++) {
00919 acc += FFABS(src[x + y * stride] - ref);
00920 }
00921 }
00922
00923 return acc;
00924 }
00925
00926 static int get_intra_count(MpegEncContext *s, uint8_t *src,
00927 uint8_t *ref, int stride)
00928 {
00929 int x, y, w, h;
00930 int acc = 0;
00931
00932 w = s->width & ~15;
00933 h = s->height & ~15;
00934
00935 for (y = 0; y < h; y += 16) {
00936 for (x = 0; x < w; x += 16) {
00937 int offset = x + y * stride;
00938 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
00939 16);
00940 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
00941 int sae = get_sae(src + offset, mean, stride);
00942
00943 acc += sae + 500 < sad;
00944 }
00945 }
00946 return acc;
00947 }
00948
00949
00950 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
00951 {
00952 AVFrame *pic = NULL;
00953 int64_t pts;
00954 int i;
00955 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
00956 (s->low_delay ? 0 : 1);
00957 int direct = 1;
00958
00959 if (pic_arg) {
00960 pts = pic_arg->pts;
00961 pic_arg->display_picture_number = s->input_picture_number++;
00962
00963 if (pts != AV_NOPTS_VALUE) {
00964 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00965 int64_t time = pts;
00966 int64_t last = s->user_specified_pts;
00967
00968 if (time <= last) {
00969 av_log(s->avctx, AV_LOG_ERROR,
00970 "Error, Invalid timestamp=%"PRId64", "
00971 "last=%"PRId64"\n", pts, s->user_specified_pts);
00972 return -1;
00973 }
00974
00975 if (!s->low_delay && pic_arg->display_picture_number == 1)
00976 s->dts_delta = time - last;
00977 }
00978 s->user_specified_pts = pts;
00979 } else {
00980 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00981 s->user_specified_pts =
00982 pts = s->user_specified_pts + 1;
00983 av_log(s->avctx, AV_LOG_INFO,
00984 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
00985 pts);
00986 } else {
00987 pts = pic_arg->display_picture_number;
00988 }
00989 }
00990 }
00991
00992 if (pic_arg) {
00993 if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
00994 direct = 0;
00995 if (pic_arg->linesize[0] != s->linesize)
00996 direct = 0;
00997 if (pic_arg->linesize[1] != s->uvlinesize)
00998 direct = 0;
00999 if (pic_arg->linesize[2] != s->uvlinesize)
01000 direct = 0;
01001
01002
01003
01004
01005 if (direct) {
01006 i = ff_find_unused_picture(s, 1);
01007 if (i < 0)
01008 return i;
01009
01010 pic = &s->picture[i].f;
01011 pic->reference = 3;
01012
01013 for (i = 0; i < 4; i++) {
01014 pic->data[i] = pic_arg->data[i];
01015 pic->linesize[i] = pic_arg->linesize[i];
01016 }
01017 if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
01018 return -1;
01019 }
01020 } else {
01021 i = ff_find_unused_picture(s, 0);
01022 if (i < 0)
01023 return i;
01024
01025 pic = &s->picture[i].f;
01026 pic->reference = 3;
01027
01028 if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
01029 return -1;
01030 }
01031
01032 if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
01033 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
01034 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
01035
01036 } else {
01037 int h_chroma_shift, v_chroma_shift;
01038 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
01039 &v_chroma_shift);
01040
01041 for (i = 0; i < 3; i++) {
01042 int src_stride = pic_arg->linesize[i];
01043 int dst_stride = i ? s->uvlinesize : s->linesize;
01044 int h_shift = i ? h_chroma_shift : 0;
01045 int v_shift = i ? v_chroma_shift : 0;
01046 int w = s->width >> h_shift;
01047 int h = s->height >> v_shift;
01048 uint8_t *src = pic_arg->data[i];
01049 uint8_t *dst = pic->data[i];
01050
01051 if(s->codec_id == CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)){
01052 h= ((s->height+15)/16*16)>>v_shift;
01053 }
01054
01055 if (!s->avctx->rc_buffer_size)
01056 dst += INPLACE_OFFSET;
01057
01058 if (src_stride == dst_stride)
01059 memcpy(dst, src, src_stride * h);
01060 else {
01061 while (h--) {
01062 memcpy(dst, src, w);
01063 dst += dst_stride;
01064 src += src_stride;
01065 }
01066 }
01067 }
01068 }
01069 }
01070 copy_picture_attributes(s, pic, pic_arg);
01071 pic->pts = pts;
01072 }
01073
01074
01075 for (i = 1; i < MAX_PICTURE_COUNT ; i++)
01076 s->input_picture[i - 1] = s->input_picture[i];
01077
01078 s->input_picture[encoding_delay] = (Picture*) pic;
01079
01080 return 0;
01081 }
01082
01083 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
01084 {
01085 int x, y, plane;
01086 int score = 0;
01087 int64_t score64 = 0;
01088
01089 for (plane = 0; plane < 3; plane++) {
01090 const int stride = p->f.linesize[plane];
01091 const int bw = plane ? 1 : 2;
01092 for (y = 0; y < s->mb_height * bw; y++) {
01093 for (x = 0; x < s->mb_width * bw; x++) {
01094 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
01095 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
01096 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
01097 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
01098
01099 switch (s->avctx->frame_skip_exp) {
01100 case 0: score = FFMAX(score, v); break;
01101 case 1: score += FFABS(v); break;
01102 case 2: score += v * v; break;
01103 case 3: score64 += FFABS(v * v * (int64_t)v); break;
01104 case 4: score64 += v * v * (int64_t)(v * v); break;
01105 }
01106 }
01107 }
01108 }
01109
01110 if (score)
01111 score64 = score;
01112
01113 if (score64 < s->avctx->frame_skip_threshold)
01114 return 1;
01115 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
01116 return 1;
01117 return 0;
01118 }
01119
01120 static int estimate_best_b_count(MpegEncContext *s)
01121 {
01122 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
01123 AVCodecContext *c = avcodec_alloc_context3(NULL);
01124 AVFrame input[FF_MAX_B_FRAMES + 2];
01125 const int scale = s->avctx->brd_scale;
01126 int i, j, out_size, p_lambda, b_lambda, lambda2;
01127 int outbuf_size = s->width * s->height;
01128 uint8_t *outbuf = av_malloc(outbuf_size);
01129 int64_t best_rd = INT64_MAX;
01130 int best_b_count = -1;
01131
01132 assert(scale >= 0 && scale <= 3);
01133
01134
01135
01136 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
01137
01138 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
01139 if (!b_lambda)
01140 b_lambda = p_lambda;
01141 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
01142 FF_LAMBDA_SHIFT;
01143
01144 c->width = s->width >> scale;
01145 c->height = s->height >> scale;
01146 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
01147 CODEC_FLAG_INPUT_PRESERVED ;
01148 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
01149 c->mb_decision = s->avctx->mb_decision;
01150 c->me_cmp = s->avctx->me_cmp;
01151 c->mb_cmp = s->avctx->mb_cmp;
01152 c->me_sub_cmp = s->avctx->me_sub_cmp;
01153 c->pix_fmt = PIX_FMT_YUV420P;
01154 c->time_base = s->avctx->time_base;
01155 c->max_b_frames = s->max_b_frames;
01156
01157 if (avcodec_open2(c, codec, NULL) < 0)
01158 return -1;
01159
01160 for (i = 0; i < s->max_b_frames + 2; i++) {
01161 int ysize = c->width * c->height;
01162 int csize = (c->width / 2) * (c->height / 2);
01163 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
01164 s->next_picture_ptr;
01165
01166 avcodec_get_frame_defaults(&input[i]);
01167 input[i].data[0] = av_malloc(ysize + 2 * csize);
01168 input[i].data[1] = input[i].data[0] + ysize;
01169 input[i].data[2] = input[i].data[1] + csize;
01170 input[i].linesize[0] = c->width;
01171 input[i].linesize[1] =
01172 input[i].linesize[2] = c->width / 2;
01173
01174 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
01175 pre_input = *pre_input_ptr;
01176
01177 if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
01178 pre_input.f.data[0] += INPLACE_OFFSET;
01179 pre_input.f.data[1] += INPLACE_OFFSET;
01180 pre_input.f.data[2] += INPLACE_OFFSET;
01181 }
01182
01183 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
01184 pre_input.f.data[0], pre_input.f.linesize[0],
01185 c->width, c->height);
01186 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
01187 pre_input.f.data[1], pre_input.f.linesize[1],
01188 c->width >> 1, c->height >> 1);
01189 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
01190 pre_input.f.data[2], pre_input.f.linesize[2],
01191 c->width >> 1, c->height >> 1);
01192 }
01193 }
01194
01195 for (j = 0; j < s->max_b_frames + 1; j++) {
01196 int64_t rd = 0;
01197
01198 if (!s->input_picture[j])
01199 break;
01200
01201 c->error[0] = c->error[1] = c->error[2] = 0;
01202
01203 input[0].pict_type = AV_PICTURE_TYPE_I;
01204 input[0].quality = 1 * FF_QP2LAMBDA;
01205 out_size = avcodec_encode_video(c, outbuf,
01206 outbuf_size, &input[0]);
01207
01208
01209 for (i = 0; i < s->max_b_frames + 1; i++) {
01210 int is_p = i % (j + 1) == j || i == s->max_b_frames;
01211
01212 input[i + 1].pict_type = is_p ?
01213 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
01214 input[i + 1].quality = is_p ? p_lambda : b_lambda;
01215 out_size = avcodec_encode_video(c, outbuf, outbuf_size,
01216 &input[i + 1]);
01217 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01218 }
01219
01220
01221 while (out_size) {
01222 out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
01223 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01224 }
01225
01226 rd += c->error[0] + c->error[1] + c->error[2];
01227
01228 if (rd < best_rd) {
01229 best_rd = rd;
01230 best_b_count = j;
01231 }
01232 }
01233
01234 av_freep(&outbuf);
01235 avcodec_close(c);
01236 av_freep(&c);
01237
01238 for (i = 0; i < s->max_b_frames + 2; i++) {
01239 av_freep(&input[i].data[0]);
01240 }
01241
01242 return best_b_count;
01243 }
01244
01245 static int select_input_picture(MpegEncContext *s)
01246 {
01247 int i;
01248
01249 for (i = 1; i < MAX_PICTURE_COUNT; i++)
01250 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
01251 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
01252
01253
01254 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
01255 if (
01256 s->next_picture_ptr == NULL || s->intra_only) {
01257 s->reordered_input_picture[0] = s->input_picture[0];
01258 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
01259 s->reordered_input_picture[0]->f.coded_picture_number =
01260 s->coded_picture_number++;
01261 } else {
01262 int b_frames;
01263
01264 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
01265 if (s->picture_in_gop_number < s->gop_size &&
01266 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
01267
01268
01269
01270
01271
01272 if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
01273 for (i = 0; i < 4; i++)
01274 s->input_picture[0]->f.data[i] = NULL;
01275 s->input_picture[0]->f.type = 0;
01276 } else {
01277 assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
01278 s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
01279
01280 s->avctx->release_buffer(s->avctx,
01281 &s->input_picture[0]->f);
01282 }
01283
01284 emms_c();
01285 ff_vbv_update(s, 0);
01286
01287 goto no_output_pic;
01288 }
01289 }
01290
01291 if (s->flags & CODEC_FLAG_PASS2) {
01292 for (i = 0; i < s->max_b_frames + 1; i++) {
01293 int pict_num = s->input_picture[0]->f.display_picture_number + i;
01294
01295 if (pict_num >= s->rc_context.num_entries)
01296 break;
01297 if (!s->input_picture[i]) {
01298 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
01299 break;
01300 }
01301
01302 s->input_picture[i]->f.pict_type =
01303 s->rc_context.entry[pict_num].new_pict_type;
01304 }
01305 }
01306
01307 if (s->avctx->b_frame_strategy == 0) {
01308 b_frames = s->max_b_frames;
01309 while (b_frames && !s->input_picture[b_frames])
01310 b_frames--;
01311 } else if (s->avctx->b_frame_strategy == 1) {
01312 for (i = 1; i < s->max_b_frames + 1; i++) {
01313 if (s->input_picture[i] &&
01314 s->input_picture[i]->b_frame_score == 0) {
01315 s->input_picture[i]->b_frame_score =
01316 get_intra_count(s,
01317 s->input_picture[i ]->f.data[0],
01318 s->input_picture[i - 1]->f.data[0],
01319 s->linesize) + 1;
01320 }
01321 }
01322 for (i = 0; i < s->max_b_frames + 1; i++) {
01323 if (s->input_picture[i] == NULL ||
01324 s->input_picture[i]->b_frame_score - 1 >
01325 s->mb_num / s->avctx->b_sensitivity)
01326 break;
01327 }
01328
01329 b_frames = FFMAX(0, i - 1);
01330
01331
01332 for (i = 0; i < b_frames + 1; i++) {
01333 s->input_picture[i]->b_frame_score = 0;
01334 }
01335 } else if (s->avctx->b_frame_strategy == 2) {
01336 b_frames = estimate_best_b_count(s);
01337 } else {
01338 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
01339 b_frames = 0;
01340 }
01341
01342 emms_c();
01343
01344
01345
01346
01347 for (i = b_frames - 1; i >= 0; i--) {
01348 int type = s->input_picture[i]->f.pict_type;
01349 if (type && type != AV_PICTURE_TYPE_B)
01350 b_frames = i;
01351 }
01352 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
01353 b_frames == s->max_b_frames) {
01354 av_log(s->avctx, AV_LOG_ERROR,
01355 "warning, too many b frames in a row\n");
01356 }
01357
01358 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
01359 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
01360 s->gop_size > s->picture_in_gop_number) {
01361 b_frames = s->gop_size - s->picture_in_gop_number - 1;
01362 } else {
01363 if (s->flags & CODEC_FLAG_CLOSED_GOP)
01364 b_frames = 0;
01365 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
01366 }
01367 }
01368
01369 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
01370 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
01371 b_frames--;
01372
01373 s->reordered_input_picture[0] = s->input_picture[b_frames];
01374 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
01375 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
01376 s->reordered_input_picture[0]->f.coded_picture_number =
01377 s->coded_picture_number++;
01378 for (i = 0; i < b_frames; i++) {
01379 s->reordered_input_picture[i + 1] = s->input_picture[i];
01380 s->reordered_input_picture[i + 1]->f.pict_type =
01381 AV_PICTURE_TYPE_B;
01382 s->reordered_input_picture[i + 1]->f.coded_picture_number =
01383 s->coded_picture_number++;
01384 }
01385 }
01386 }
01387 no_output_pic:
01388 if (s->reordered_input_picture[0]) {
01389 s->reordered_input_picture[0]->f.reference =
01390 s->reordered_input_picture[0]->f.pict_type !=
01391 AV_PICTURE_TYPE_B ? 3 : 0;
01392
01393 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
01394
01395 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
01396 s->avctx->rc_buffer_size) {
01397
01398
01399
01400 Picture *pic;
01401 int i = ff_find_unused_picture(s, 0);
01402 if (i < 0)
01403 return i;
01404 pic = &s->picture[i];
01405
01406 pic->f.reference = s->reordered_input_picture[0]->f.reference;
01407 if (ff_alloc_picture(s, pic, 0) < 0) {
01408 return -1;
01409 }
01410
01411
01412 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
01413 s->avctx->release_buffer(s->avctx,
01414 &s->reordered_input_picture[0]->f);
01415 for (i = 0; i < 4; i++)
01416 s->reordered_input_picture[0]->f.data[i] = NULL;
01417 s->reordered_input_picture[0]->f.type = 0;
01418
01419 copy_picture_attributes(s, &pic->f,
01420 &s->reordered_input_picture[0]->f);
01421
01422 s->current_picture_ptr = pic;
01423 } else {
01424
01425
01426 assert(s->reordered_input_picture[0]->f.type ==
01427 FF_BUFFER_TYPE_USER ||
01428 s->reordered_input_picture[0]->f.type ==
01429 FF_BUFFER_TYPE_INTERNAL);
01430
01431 s->current_picture_ptr = s->reordered_input_picture[0];
01432 for (i = 0; i < 4; i++) {
01433 s->new_picture.f.data[i] += INPLACE_OFFSET;
01434 }
01435 }
01436 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01437
01438 s->picture_number = s->new_picture.f.display_picture_number;
01439
01440 } else {
01441 memset(&s->new_picture, 0, sizeof(Picture));
01442 }
01443 return 0;
01444 }
01445
01446 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
01447 AVFrame *pic_arg, int *got_packet)
01448 {
01449 MpegEncContext *s = avctx->priv_data;
01450 int i, stuffing_count, ret;
01451 int context_count = s->slice_context_count;
01452
01453 s->picture_in_gop_number++;
01454
01455 if (load_input_picture(s, pic_arg) < 0)
01456 return -1;
01457
01458 if (select_input_picture(s) < 0) {
01459 return -1;
01460 }
01461
01462
01463 if (s->new_picture.f.data[0]) {
01464 if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
01465 return ret;
01466 if (s->mb_info) {
01467 s->mb_info_ptr = av_packet_new_side_data(pkt,
01468 AV_PKT_DATA_H263_MB_INFO,
01469 s->mb_width*s->mb_height*12);
01470 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
01471 }
01472
01473 for (i = 0; i < context_count; i++) {
01474 int start_y = s->thread_context[i]->start_mb_y;
01475 int end_y = s->thread_context[i]-> end_mb_y;
01476 int h = s->mb_height;
01477 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
01478 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
01479
01480 init_put_bits(&s->thread_context[i]->pb, start, end - start);
01481 }
01482
01483 s->pict_type = s->new_picture.f.pict_type;
01484
01485
01486
01487 ff_MPV_frame_start(s, avctx);
01488 vbv_retry:
01489 if (encode_picture(s, s->picture_number) < 0)
01490 return -1;
01491
01492 avctx->header_bits = s->header_bits;
01493 avctx->mv_bits = s->mv_bits;
01494 avctx->misc_bits = s->misc_bits;
01495 avctx->i_tex_bits = s->i_tex_bits;
01496 avctx->p_tex_bits = s->p_tex_bits;
01497 avctx->i_count = s->i_count;
01498
01499 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
01500 avctx->skip_count = s->skip_count;
01501
01502 ff_MPV_frame_end(s);
01503
01504 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
01505 ff_mjpeg_encode_picture_trailer(s);
01506
01507 if (avctx->rc_buffer_size) {
01508 RateControlContext *rcc = &s->rc_context;
01509 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
01510
01511 if (put_bits_count(&s->pb) > max_size &&
01512 s->lambda < s->avctx->lmax) {
01513 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
01514 (s->qscale + 1) / s->qscale);
01515 if (s->adaptive_quant) {
01516 int i;
01517 for (i = 0; i < s->mb_height * s->mb_stride; i++)
01518 s->lambda_table[i] =
01519 FFMAX(s->lambda_table[i] + 1,
01520 s->lambda_table[i] * (s->qscale + 1) /
01521 s->qscale);
01522 }
01523 s->mb_skipped = 0;
01524
01525 if (s->pict_type == AV_PICTURE_TYPE_P) {
01526 if (s->flipflop_rounding ||
01527 s->codec_id == CODEC_ID_H263P ||
01528 s->codec_id == CODEC_ID_MPEG4)
01529 s->no_rounding ^= 1;
01530 }
01531 if (s->pict_type != AV_PICTURE_TYPE_B) {
01532 s->time_base = s->last_time_base;
01533 s->last_non_b_time = s->time - s->pp_time;
01534 }
01535
01536 for (i = 0; i < context_count; i++) {
01537 PutBitContext *pb = &s->thread_context[i]->pb;
01538 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
01539 }
01540 goto vbv_retry;
01541 }
01542
01543 assert(s->avctx->rc_max_rate);
01544 }
01545
01546 if (s->flags & CODEC_FLAG_PASS1)
01547 ff_write_pass1_stats(s);
01548
01549 for (i = 0; i < 4; i++) {
01550 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
01551 avctx->error[i] += s->current_picture_ptr->f.error[i];
01552 }
01553
01554 if (s->flags & CODEC_FLAG_PASS1)
01555 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
01556 avctx->i_tex_bits + avctx->p_tex_bits ==
01557 put_bits_count(&s->pb));
01558 flush_put_bits(&s->pb);
01559 s->frame_bits = put_bits_count(&s->pb);
01560
01561 stuffing_count = ff_vbv_update(s, s->frame_bits);
01562 if (stuffing_count) {
01563 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
01564 stuffing_count + 50) {
01565 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
01566 return -1;
01567 }
01568
01569 switch (s->codec_id) {
01570 case CODEC_ID_MPEG1VIDEO:
01571 case CODEC_ID_MPEG2VIDEO:
01572 while (stuffing_count--) {
01573 put_bits(&s->pb, 8, 0);
01574 }
01575 break;
01576 case CODEC_ID_MPEG4:
01577 put_bits(&s->pb, 16, 0);
01578 put_bits(&s->pb, 16, 0x1C3);
01579 stuffing_count -= 4;
01580 while (stuffing_count--) {
01581 put_bits(&s->pb, 8, 0xFF);
01582 }
01583 break;
01584 default:
01585 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
01586 }
01587 flush_put_bits(&s->pb);
01588 s->frame_bits = put_bits_count(&s->pb);
01589 }
01590
01591
01592 if (s->avctx->rc_max_rate &&
01593 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
01594 s->out_format == FMT_MPEG1 &&
01595 90000LL * (avctx->rc_buffer_size - 1) <=
01596 s->avctx->rc_max_rate * 0xFFFFLL) {
01597 int vbv_delay, min_delay;
01598 double inbits = s->avctx->rc_max_rate *
01599 av_q2d(s->avctx->time_base);
01600 int minbits = s->frame_bits - 8 *
01601 (s->vbv_delay_ptr - s->pb.buf - 1);
01602 double bits = s->rc_context.buffer_index + minbits - inbits;
01603
01604 if (bits < 0)
01605 av_log(s->avctx, AV_LOG_ERROR,
01606 "Internal error, negative bits\n");
01607
01608 assert(s->repeat_first_field == 0);
01609
01610 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
01611 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
01612 s->avctx->rc_max_rate;
01613
01614 vbv_delay = FFMAX(vbv_delay, min_delay);
01615
01616 assert(vbv_delay < 0xFFFF);
01617
01618 s->vbv_delay_ptr[0] &= 0xF8;
01619 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
01620 s->vbv_delay_ptr[1] = vbv_delay >> 5;
01621 s->vbv_delay_ptr[2] &= 0x07;
01622 s->vbv_delay_ptr[2] |= vbv_delay << 3;
01623 avctx->vbv_delay = vbv_delay * 300;
01624 }
01625 s->total_bits += s->frame_bits;
01626 avctx->frame_bits = s->frame_bits;
01627
01628 pkt->pts = s->current_picture.f.pts;
01629 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
01630 if (!s->current_picture.f.coded_picture_number)
01631 pkt->dts = pkt->pts - s->dts_delta;
01632 else
01633 pkt->dts = s->reordered_pts;
01634 s->reordered_pts = pkt->pts;
01635 } else
01636 pkt->dts = pkt->pts;
01637 if (s->current_picture.f.key_frame)
01638 pkt->flags |= AV_PKT_FLAG_KEY;
01639 if (s->mb_info)
01640 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
01641 } else {
01642 assert((put_bits_ptr(&s->pb) == s->pb.buf));
01643 s->frame_bits = 0;
01644 }
01645 assert((s->frame_bits & 7) == 0);
01646
01647 pkt->size = s->frame_bits / 8;
01648 *got_packet = !!pkt->size;
01649 return 0;
01650 }
01651
01652 static inline void dct_single_coeff_elimination(MpegEncContext *s,
01653 int n, int threshold)
01654 {
01655 static const char tab[64] = {
01656 3, 2, 2, 1, 1, 1, 1, 1,
01657 1, 1, 1, 1, 1, 1, 1, 1,
01658 1, 1, 1, 1, 1, 1, 1, 1,
01659 0, 0, 0, 0, 0, 0, 0, 0,
01660 0, 0, 0, 0, 0, 0, 0, 0,
01661 0, 0, 0, 0, 0, 0, 0, 0,
01662 0, 0, 0, 0, 0, 0, 0, 0,
01663 0, 0, 0, 0, 0, 0, 0, 0
01664 };
01665 int score = 0;
01666 int run = 0;
01667 int i;
01668 DCTELEM *block = s->block[n];
01669 const int last_index = s->block_last_index[n];
01670 int skip_dc;
01671
01672 if (threshold < 0) {
01673 skip_dc = 0;
01674 threshold = -threshold;
01675 } else
01676 skip_dc = 1;
01677
01678
01679 if (last_index <= skip_dc - 1)
01680 return;
01681
01682 for (i = 0; i <= last_index; i++) {
01683 const int j = s->intra_scantable.permutated[i];
01684 const int level = FFABS(block[j]);
01685 if (level == 1) {
01686 if (skip_dc && i == 0)
01687 continue;
01688 score += tab[run];
01689 run = 0;
01690 } else if (level > 1) {
01691 return;
01692 } else {
01693 run++;
01694 }
01695 }
01696 if (score >= threshold)
01697 return;
01698 for (i = skip_dc; i <= last_index; i++) {
01699 const int j = s->intra_scantable.permutated[i];
01700 block[j] = 0;
01701 }
01702 if (block[0])
01703 s->block_last_index[n] = 0;
01704 else
01705 s->block_last_index[n] = -1;
01706 }
01707
01708 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
01709 int last_index)
01710 {
01711 int i;
01712 const int maxlevel = s->max_qcoeff;
01713 const int minlevel = s->min_qcoeff;
01714 int overflow = 0;
01715
01716 if (s->mb_intra) {
01717 i = 1;
01718 } else
01719 i = 0;
01720
01721 for (; i <= last_index; i++) {
01722 const int j = s->intra_scantable.permutated[i];
01723 int level = block[j];
01724
01725 if (level > maxlevel) {
01726 level = maxlevel;
01727 overflow++;
01728 } else if (level < minlevel) {
01729 level = minlevel;
01730 overflow++;
01731 }
01732
01733 block[j] = level;
01734 }
01735
01736 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
01737 av_log(s->avctx, AV_LOG_INFO,
01738 "warning, clipping %d dct coefficients to %d..%d\n",
01739 overflow, minlevel, maxlevel);
01740 }
01741
01742 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
01743 {
01744 int x, y;
01745
01746 for (y = 0; y < 8; y++) {
01747 for (x = 0; x < 8; x++) {
01748 int x2, y2;
01749 int sum = 0;
01750 int sqr = 0;
01751 int count = 0;
01752
01753 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
01754 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
01755 int v = ptr[x2 + y2 * stride];
01756 sum += v;
01757 sqr += v * v;
01758 count++;
01759 }
01760 }
01761 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
01762 }
01763 }
01764 }
01765
01766 static av_always_inline void encode_mb_internal(MpegEncContext *s,
01767 int motion_x, int motion_y,
01768 int mb_block_height,
01769 int mb_block_count)
01770 {
01771 int16_t weight[8][64];
01772 DCTELEM orig[8][64];
01773 const int mb_x = s->mb_x;
01774 const int mb_y = s->mb_y;
01775 int i;
01776 int skip_dct[8];
01777 int dct_offset = s->linesize * 8;
01778 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01779 int wrap_y, wrap_c;
01780
01781 for (i = 0; i < mb_block_count; i++)
01782 skip_dct[i] = s->skipdct;
01783
01784 if (s->adaptive_quant) {
01785 const int last_qp = s->qscale;
01786 const int mb_xy = mb_x + mb_y * s->mb_stride;
01787
01788 s->lambda = s->lambda_table[mb_xy];
01789 update_qscale(s);
01790
01791 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
01792 s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
01793 s->dquant = s->qscale - last_qp;
01794
01795 if (s->out_format == FMT_H263) {
01796 s->dquant = av_clip(s->dquant, -2, 2);
01797
01798 if (s->codec_id == CODEC_ID_MPEG4) {
01799 if (!s->mb_intra) {
01800 if (s->pict_type == AV_PICTURE_TYPE_B) {
01801 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
01802 s->dquant = 0;
01803 }
01804 if (s->mv_type == MV_TYPE_8X8)
01805 s->dquant = 0;
01806 }
01807 }
01808 }
01809 }
01810 ff_set_qscale(s, last_qp + s->dquant);
01811 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
01812 ff_set_qscale(s, s->qscale + s->dquant);
01813
01814 wrap_y = s->linesize;
01815 wrap_c = s->uvlinesize;
01816 ptr_y = s->new_picture.f.data[0] +
01817 (mb_y * 16 * wrap_y) + mb_x * 16;
01818 ptr_cb = s->new_picture.f.data[1] +
01819 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01820 ptr_cr = s->new_picture.f.data[2] +
01821 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01822
01823 if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != CODEC_ID_AMV){
01824 uint8_t *ebuf = s->edge_emu_buffer + 32;
01825 s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
01826 mb_y * 16, s->width, s->height);
01827 ptr_y = ebuf;
01828 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
01829 mb_block_height, mb_x * 8, mb_y * 8,
01830 (s->width+1) >> 1, (s->height+1) >> 1);
01831 ptr_cb = ebuf + 18 * wrap_y;
01832 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
01833 mb_block_height, mb_x * 8, mb_y * 8,
01834 (s->width+1) >> 1, (s->height+1) >> 1);
01835 ptr_cr = ebuf + 18 * wrap_y + 8;
01836 }
01837
01838 if (s->mb_intra) {
01839 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01840 int progressive_score, interlaced_score;
01841
01842 s->interlaced_dct = 0;
01843 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
01844 NULL, wrap_y, 8) +
01845 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
01846 NULL, wrap_y, 8) - 400;
01847
01848 if (progressive_score > 0) {
01849 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
01850 NULL, wrap_y * 2, 8) +
01851 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
01852 NULL, wrap_y * 2, 8);
01853 if (progressive_score > interlaced_score) {
01854 s->interlaced_dct = 1;
01855
01856 dct_offset = wrap_y;
01857 wrap_y <<= 1;
01858 if (s->chroma_format == CHROMA_422)
01859 wrap_c <<= 1;
01860 }
01861 }
01862 }
01863
01864 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
01865 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
01866 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
01867 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
01868
01869 if (s->flags & CODEC_FLAG_GRAY) {
01870 skip_dct[4] = 1;
01871 skip_dct[5] = 1;
01872 } else {
01873 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
01874 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
01875 if (!s->chroma_y_shift) {
01876 s->dsp.get_pixels(s->block[6],
01877 ptr_cb + (dct_offset >> 1), wrap_c);
01878 s->dsp.get_pixels(s->block[7],
01879 ptr_cr + (dct_offset >> 1), wrap_c);
01880 }
01881 }
01882 } else {
01883 op_pixels_func (*op_pix)[4];
01884 qpel_mc_func (*op_qpix)[16];
01885 uint8_t *dest_y, *dest_cb, *dest_cr;
01886
01887 dest_y = s->dest[0];
01888 dest_cb = s->dest[1];
01889 dest_cr = s->dest[2];
01890
01891 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
01892 op_pix = s->dsp.put_pixels_tab;
01893 op_qpix = s->dsp.put_qpel_pixels_tab;
01894 } else {
01895 op_pix = s->dsp.put_no_rnd_pixels_tab;
01896 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
01897 }
01898
01899 if (s->mv_dir & MV_DIR_FORWARD) {
01900 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data,
01901 op_pix, op_qpix);
01902 op_pix = s->dsp.avg_pixels_tab;
01903 op_qpix = s->dsp.avg_qpel_pixels_tab;
01904 }
01905 if (s->mv_dir & MV_DIR_BACKWARD) {
01906 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data,
01907 op_pix, op_qpix);
01908 }
01909
01910 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01911 int progressive_score, interlaced_score;
01912
01913 s->interlaced_dct = 0;
01914 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
01915 ptr_y, wrap_y,
01916 8) +
01917 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
01918 ptr_y + wrap_y * 8, wrap_y,
01919 8) - 400;
01920
01921 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
01922 progressive_score -= 400;
01923
01924 if (progressive_score > 0) {
01925 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
01926 ptr_y,
01927 wrap_y * 2, 8) +
01928 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
01929 ptr_y + wrap_y,
01930 wrap_y * 2, 8);
01931
01932 if (progressive_score > interlaced_score) {
01933 s->interlaced_dct = 1;
01934
01935 dct_offset = wrap_y;
01936 wrap_y <<= 1;
01937 if (s->chroma_format == CHROMA_422)
01938 wrap_c <<= 1;
01939 }
01940 }
01941 }
01942
01943 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
01944 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
01945 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
01946 dest_y + dct_offset, wrap_y);
01947 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
01948 dest_y + dct_offset + 8, wrap_y);
01949
01950 if (s->flags & CODEC_FLAG_GRAY) {
01951 skip_dct[4] = 1;
01952 skip_dct[5] = 1;
01953 } else {
01954 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
01955 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
01956 if (!s->chroma_y_shift) {
01957 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
01958 dest_cb + (dct_offset >> 1), wrap_c);
01959 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
01960 dest_cr + (dct_offset >> 1), wrap_c);
01961 }
01962 }
01963
01964 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
01965 2 * s->qscale * s->qscale) {
01966
01967 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
01968 wrap_y, 8) < 20 * s->qscale)
01969 skip_dct[0] = 1;
01970 if (s->dsp.sad[1](NULL, ptr_y + 8,
01971 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
01972 skip_dct[1] = 1;
01973 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
01974 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
01975 skip_dct[2] = 1;
01976 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
01977 dest_y + dct_offset + 8,
01978 wrap_y, 8) < 20 * s->qscale)
01979 skip_dct[3] = 1;
01980 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
01981 wrap_c, 8) < 20 * s->qscale)
01982 skip_dct[4] = 1;
01983 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
01984 wrap_c, 8) < 20 * s->qscale)
01985 skip_dct[5] = 1;
01986 if (!s->chroma_y_shift) {
01987 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
01988 dest_cb + (dct_offset >> 1),
01989 wrap_c, 8) < 20 * s->qscale)
01990 skip_dct[6] = 1;
01991 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
01992 dest_cr + (dct_offset >> 1),
01993 wrap_c, 8) < 20 * s->qscale)
01994 skip_dct[7] = 1;
01995 }
01996 }
01997 }
01998
01999 if (s->quantizer_noise_shaping) {
02000 if (!skip_dct[0])
02001 get_visual_weight(weight[0], ptr_y , wrap_y);
02002 if (!skip_dct[1])
02003 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
02004 if (!skip_dct[2])
02005 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
02006 if (!skip_dct[3])
02007 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
02008 if (!skip_dct[4])
02009 get_visual_weight(weight[4], ptr_cb , wrap_c);
02010 if (!skip_dct[5])
02011 get_visual_weight(weight[5], ptr_cr , wrap_c);
02012 if (!s->chroma_y_shift) {
02013 if (!skip_dct[6])
02014 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
02015 wrap_c);
02016 if (!skip_dct[7])
02017 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
02018 wrap_c);
02019 }
02020 memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
02021 }
02022
02023
02024 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
02025 {
02026 for (i = 0; i < mb_block_count; i++) {
02027 if (!skip_dct[i]) {
02028 int overflow;
02029 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
02030
02031
02032
02033
02034
02035 if (overflow)
02036 clip_coeffs(s, s->block[i], s->block_last_index[i]);
02037 } else
02038 s->block_last_index[i] = -1;
02039 }
02040 if (s->quantizer_noise_shaping) {
02041 for (i = 0; i < mb_block_count; i++) {
02042 if (!skip_dct[i]) {
02043 s->block_last_index[i] =
02044 dct_quantize_refine(s, s->block[i], weight[i],
02045 orig[i], i, s->qscale);
02046 }
02047 }
02048 }
02049
02050 if (s->luma_elim_threshold && !s->mb_intra)
02051 for (i = 0; i < 4; i++)
02052 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
02053 if (s->chroma_elim_threshold && !s->mb_intra)
02054 for (i = 4; i < mb_block_count; i++)
02055 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
02056
02057 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
02058 for (i = 0; i < mb_block_count; i++) {
02059 if (s->block_last_index[i] == -1)
02060 s->coded_score[i] = INT_MAX / 256;
02061 }
02062 }
02063 }
02064
02065 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
02066 s->block_last_index[4] =
02067 s->block_last_index[5] = 0;
02068 s->block[4][0] =
02069 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
02070 }
02071
02072
02073 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
02074 for (i = 0; i < mb_block_count; i++) {
02075 int j;
02076 if (s->block_last_index[i] > 0) {
02077 for (j = 63; j > 0; j--) {
02078 if (s->block[i][s->intra_scantable.permutated[j]])
02079 break;
02080 }
02081 s->block_last_index[i] = j;
02082 }
02083 }
02084 }
02085
02086
02087 switch(s->codec_id){
02088 case CODEC_ID_MPEG1VIDEO:
02089 case CODEC_ID_MPEG2VIDEO:
02090 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
02091 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
02092 break;
02093 case CODEC_ID_MPEG4:
02094 if (CONFIG_MPEG4_ENCODER)
02095 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
02096 break;
02097 case CODEC_ID_MSMPEG4V2:
02098 case CODEC_ID_MSMPEG4V3:
02099 case CODEC_ID_WMV1:
02100 if (CONFIG_MSMPEG4_ENCODER)
02101 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
02102 break;
02103 case CODEC_ID_WMV2:
02104 if (CONFIG_WMV2_ENCODER)
02105 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
02106 break;
02107 case CODEC_ID_H261:
02108 if (CONFIG_H261_ENCODER)
02109 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
02110 break;
02111 case CODEC_ID_H263:
02112 case CODEC_ID_H263P:
02113 case CODEC_ID_FLV1:
02114 case CODEC_ID_RV10:
02115 case CODEC_ID_RV20:
02116 if (CONFIG_H263_ENCODER)
02117 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
02118 break;
02119 case CODEC_ID_MJPEG:
02120 case CODEC_ID_AMV:
02121 if (CONFIG_MJPEG_ENCODER)
02122 ff_mjpeg_encode_mb(s, s->block);
02123 break;
02124 default:
02125 assert(0);
02126 }
02127 }
02128
02129 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
02130 {
02131 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
02132 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
02133 }
02134
02135 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
02136 int i;
02137
02138 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02139
02140
02141 d->mb_skip_run= s->mb_skip_run;
02142 for(i=0; i<3; i++)
02143 d->last_dc[i] = s->last_dc[i];
02144
02145
02146 d->mv_bits= s->mv_bits;
02147 d->i_tex_bits= s->i_tex_bits;
02148 d->p_tex_bits= s->p_tex_bits;
02149 d->i_count= s->i_count;
02150 d->f_count= s->f_count;
02151 d->b_count= s->b_count;
02152 d->skip_count= s->skip_count;
02153 d->misc_bits= s->misc_bits;
02154 d->last_bits= 0;
02155
02156 d->mb_skipped= 0;
02157 d->qscale= s->qscale;
02158 d->dquant= s->dquant;
02159
02160 d->esc3_level_length= s->esc3_level_length;
02161 }
02162
02163 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
02164 int i;
02165
02166 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
02167 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02168
02169
02170 d->mb_skip_run= s->mb_skip_run;
02171 for(i=0; i<3; i++)
02172 d->last_dc[i] = s->last_dc[i];
02173
02174
02175 d->mv_bits= s->mv_bits;
02176 d->i_tex_bits= s->i_tex_bits;
02177 d->p_tex_bits= s->p_tex_bits;
02178 d->i_count= s->i_count;
02179 d->f_count= s->f_count;
02180 d->b_count= s->b_count;
02181 d->skip_count= s->skip_count;
02182 d->misc_bits= s->misc_bits;
02183
02184 d->mb_intra= s->mb_intra;
02185 d->mb_skipped= s->mb_skipped;
02186 d->mv_type= s->mv_type;
02187 d->mv_dir= s->mv_dir;
02188 d->pb= s->pb;
02189 if(s->data_partitioning){
02190 d->pb2= s->pb2;
02191 d->tex_pb= s->tex_pb;
02192 }
02193 d->block= s->block;
02194 for(i=0; i<8; i++)
02195 d->block_last_index[i]= s->block_last_index[i];
02196 d->interlaced_dct= s->interlaced_dct;
02197 d->qscale= s->qscale;
02198
02199 d->esc3_level_length= s->esc3_level_length;
02200 }
02201
02202 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
02203 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
02204 int *dmin, int *next_block, int motion_x, int motion_y)
02205 {
02206 int score;
02207 uint8_t *dest_backup[3];
02208
02209 copy_context_before_encode(s, backup, type);
02210
02211 s->block= s->blocks[*next_block];
02212 s->pb= pb[*next_block];
02213 if(s->data_partitioning){
02214 s->pb2 = pb2 [*next_block];
02215 s->tex_pb= tex_pb[*next_block];
02216 }
02217
02218 if(*next_block){
02219 memcpy(dest_backup, s->dest, sizeof(s->dest));
02220 s->dest[0] = s->rd_scratchpad;
02221 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
02222 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
02223 assert(s->linesize >= 32);
02224 }
02225
02226 encode_mb(s, motion_x, motion_y);
02227
02228 score= put_bits_count(&s->pb);
02229 if(s->data_partitioning){
02230 score+= put_bits_count(&s->pb2);
02231 score+= put_bits_count(&s->tex_pb);
02232 }
02233
02234 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
02235 ff_MPV_decode_mb(s, s->block);
02236
02237 score *= s->lambda2;
02238 score += sse_mb(s) << FF_LAMBDA_SHIFT;
02239 }
02240
02241 if(*next_block){
02242 memcpy(s->dest, dest_backup, sizeof(s->dest));
02243 }
02244
02245 if(score<*dmin){
02246 *dmin= score;
02247 *next_block^=1;
02248
02249 copy_context_after_encode(best, s, type);
02250 }
02251 }
02252
02253 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
02254 uint32_t *sq = ff_squareTbl + 256;
02255 int acc=0;
02256 int x,y;
02257
02258 if(w==16 && h==16)
02259 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
02260 else if(w==8 && h==8)
02261 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
02262
02263 for(y=0; y<h; y++){
02264 for(x=0; x<w; x++){
02265 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
02266 }
02267 }
02268
02269 assert(acc>=0);
02270
02271 return acc;
02272 }
02273
02274 static int sse_mb(MpegEncContext *s){
02275 int w= 16;
02276 int h= 16;
02277
02278 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02279 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02280
02281 if(w==16 && h==16)
02282 if(s->avctx->mb_cmp == FF_CMP_NSSE){
02283 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02284 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02285 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02286 }else{
02287 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02288 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02289 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02290 }
02291 else
02292 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
02293 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
02294 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
02295 }
02296
02297 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
02298 MpegEncContext *s= *(void**)arg;
02299
02300
02301 s->me.pre_pass=1;
02302 s->me.dia_size= s->avctx->pre_dia_size;
02303 s->first_slice_line=1;
02304 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
02305 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
02306 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02307 }
02308 s->first_slice_line=0;
02309 }
02310
02311 s->me.pre_pass=0;
02312
02313 return 0;
02314 }
02315
02316 static int estimate_motion_thread(AVCodecContext *c, void *arg){
02317 MpegEncContext *s= *(void**)arg;
02318
02319 ff_check_alignment();
02320
02321 s->me.dia_size= s->avctx->dia_size;
02322 s->first_slice_line=1;
02323 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
02324 s->mb_x=0;
02325 ff_init_block_index(s);
02326 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
02327 s->block_index[0]+=2;
02328 s->block_index[1]+=2;
02329 s->block_index[2]+=2;
02330 s->block_index[3]+=2;
02331
02332
02333 if(s->pict_type==AV_PICTURE_TYPE_B)
02334 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
02335 else
02336 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02337 }
02338 s->first_slice_line=0;
02339 }
02340 return 0;
02341 }
02342
02343 static int mb_var_thread(AVCodecContext *c, void *arg){
02344 MpegEncContext *s= *(void**)arg;
02345 int mb_x, mb_y;
02346
02347 ff_check_alignment();
02348
02349 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02350 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02351 int xx = mb_x * 16;
02352 int yy = mb_y * 16;
02353 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
02354 int varc;
02355 int sum = s->dsp.pix_sum(pix, s->linesize);
02356
02357 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
02358
02359 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
02360 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
02361 s->me.mb_var_sum_temp += varc;
02362 }
02363 }
02364 return 0;
02365 }
02366
02367 static void write_slice_end(MpegEncContext *s){
02368 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){
02369 if(s->partitioned_frame){
02370 ff_mpeg4_merge_partitions(s);
02371 }
02372
02373 ff_mpeg4_stuffing(&s->pb);
02374 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
02375 ff_mjpeg_encode_stuffing(&s->pb);
02376 }
02377
02378 avpriv_align_put_bits(&s->pb);
02379 flush_put_bits(&s->pb);
02380
02381 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
02382 s->misc_bits+= get_bits_diff(s);
02383 }
02384
02385 static void write_mb_info(MpegEncContext *s)
02386 {
02387 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
02388 int offset = put_bits_count(&s->pb);
02389 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
02390 int gobn = s->mb_y / s->gob_index;
02391 int pred_x, pred_y;
02392 if (CONFIG_H263_ENCODER)
02393 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
02394 bytestream_put_le32(&ptr, offset);
02395 bytestream_put_byte(&ptr, s->qscale);
02396 bytestream_put_byte(&ptr, gobn);
02397 bytestream_put_le16(&ptr, mba);
02398 bytestream_put_byte(&ptr, pred_x);
02399 bytestream_put_byte(&ptr, pred_y);
02400
02401 bytestream_put_byte(&ptr, 0);
02402 bytestream_put_byte(&ptr, 0);
02403 }
02404
02405 static void update_mb_info(MpegEncContext *s, int startcode)
02406 {
02407 if (!s->mb_info)
02408 return;
02409 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
02410 s->mb_info_size += 12;
02411 s->prev_mb_info = s->last_mb_info;
02412 }
02413 if (startcode) {
02414 s->prev_mb_info = put_bits_count(&s->pb)/8;
02415
02416
02417
02418
02419 return;
02420 }
02421
02422 s->last_mb_info = put_bits_count(&s->pb)/8;
02423 if (!s->mb_info_size)
02424 s->mb_info_size += 12;
02425 write_mb_info(s);
02426 }
02427
02428 static int encode_thread(AVCodecContext *c, void *arg){
02429 MpegEncContext *s= *(void**)arg;
02430 int mb_x, mb_y, pdif = 0;
02431 int chr_h= 16>>s->chroma_y_shift;
02432 int i, j;
02433 MpegEncContext best_s, backup_s;
02434 uint8_t bit_buf[2][MAX_MB_BYTES];
02435 uint8_t bit_buf2[2][MAX_MB_BYTES];
02436 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
02437 PutBitContext pb[2], pb2[2], tex_pb[2];
02438
02439
02440 ff_check_alignment();
02441
02442 for(i=0; i<2; i++){
02443 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
02444 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
02445 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
02446 }
02447
02448 s->last_bits= put_bits_count(&s->pb);
02449 s->mv_bits=0;
02450 s->misc_bits=0;
02451 s->i_tex_bits=0;
02452 s->p_tex_bits=0;
02453 s->i_count=0;
02454 s->f_count=0;
02455 s->b_count=0;
02456 s->skip_count=0;
02457
02458 for(i=0; i<3; i++){
02459
02460
02461 s->last_dc[i] = 128 << s->intra_dc_precision;
02462
02463 s->current_picture.f.error[i] = 0;
02464 }
02465 if(s->codec_id==CODEC_ID_AMV){
02466 s->last_dc[0] = 128*8/13;
02467 s->last_dc[1] = 128*8/14;
02468 s->last_dc[2] = 128*8/14;
02469 }
02470 s->mb_skip_run = 0;
02471 memset(s->last_mv, 0, sizeof(s->last_mv));
02472
02473 s->last_mv_dir = 0;
02474
02475 switch(s->codec_id){
02476 case CODEC_ID_H263:
02477 case CODEC_ID_H263P:
02478 case CODEC_ID_FLV1:
02479 if (CONFIG_H263_ENCODER)
02480 s->gob_index = ff_h263_get_gob_height(s);
02481 break;
02482 case CODEC_ID_MPEG4:
02483 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
02484 ff_mpeg4_init_partitions(s);
02485 break;
02486 }
02487
02488 s->resync_mb_x=0;
02489 s->resync_mb_y=0;
02490 s->first_slice_line = 1;
02491 s->ptr_lastgob = s->pb.buf;
02492 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02493
02494 s->mb_x=0;
02495 s->mb_y= mb_y;
02496
02497 ff_set_qscale(s, s->qscale);
02498 ff_init_block_index(s);
02499
02500 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02501 int xy= mb_y*s->mb_stride + mb_x;
02502 int mb_type= s->mb_type[xy];
02503
02504 int dmin= INT_MAX;
02505 int dir;
02506
02507 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
02508 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02509 return -1;
02510 }
02511 if(s->data_partitioning){
02512 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
02513 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
02514 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
02515 return -1;
02516 }
02517 }
02518
02519 s->mb_x = mb_x;
02520 s->mb_y = mb_y;
02521 ff_update_block_index(s);
02522
02523 if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){
02524 ff_h261_reorder_mb_index(s);
02525 xy= s->mb_y*s->mb_stride + s->mb_x;
02526 mb_type= s->mb_type[xy];
02527 }
02528
02529
02530 if(s->rtp_mode){
02531 int current_packet_size, is_gob_start;
02532
02533 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
02534
02535 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
02536
02537 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
02538
02539 switch(s->codec_id){
02540 case CODEC_ID_H263:
02541 case CODEC_ID_H263P:
02542 if(!s->h263_slice_structured)
02543 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
02544 break;
02545 case CODEC_ID_MPEG2VIDEO:
02546 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02547 case CODEC_ID_MPEG1VIDEO:
02548 if(s->mb_skip_run) is_gob_start=0;
02549 break;
02550 }
02551
02552 if(is_gob_start){
02553 if(s->start_mb_y != mb_y || mb_x!=0){
02554 write_slice_end(s);
02555
02556 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
02557 ff_mpeg4_init_partitions(s);
02558 }
02559 }
02560
02561 assert((put_bits_count(&s->pb)&7) == 0);
02562 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
02563
02564 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
02565 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
02566 int d= 100 / s->avctx->error_rate;
02567 if(r % d == 0){
02568 current_packet_size=0;
02569 s->pb.buf_ptr= s->ptr_lastgob;
02570 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
02571 }
02572 }
02573
02574 if (s->avctx->rtp_callback){
02575 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
02576 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
02577 }
02578 update_mb_info(s, 1);
02579
02580 switch(s->codec_id){
02581 case CODEC_ID_MPEG4:
02582 if (CONFIG_MPEG4_ENCODER) {
02583 ff_mpeg4_encode_video_packet_header(s);
02584 ff_mpeg4_clean_buffers(s);
02585 }
02586 break;
02587 case CODEC_ID_MPEG1VIDEO:
02588 case CODEC_ID_MPEG2VIDEO:
02589 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
02590 ff_mpeg1_encode_slice_header(s);
02591 ff_mpeg1_clean_buffers(s);
02592 }
02593 break;
02594 case CODEC_ID_H263:
02595 case CODEC_ID_H263P:
02596 if (CONFIG_H263_ENCODER)
02597 ff_h263_encode_gob_header(s, mb_y);
02598 break;
02599 }
02600
02601 if(s->flags&CODEC_FLAG_PASS1){
02602 int bits= put_bits_count(&s->pb);
02603 s->misc_bits+= bits - s->last_bits;
02604 s->last_bits= bits;
02605 }
02606
02607 s->ptr_lastgob += current_packet_size;
02608 s->first_slice_line=1;
02609 s->resync_mb_x=mb_x;
02610 s->resync_mb_y=mb_y;
02611 }
02612 }
02613
02614 if( (s->resync_mb_x == s->mb_x)
02615 && s->resync_mb_y+1 == s->mb_y){
02616 s->first_slice_line=0;
02617 }
02618
02619 s->mb_skipped=0;
02620 s->dquant=0;
02621
02622 update_mb_info(s, 0);
02623
02624 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
02625 int next_block=0;
02626 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
02627
02628 copy_context_before_encode(&backup_s, s, -1);
02629 backup_s.pb= s->pb;
02630 best_s.data_partitioning= s->data_partitioning;
02631 best_s.partitioned_frame= s->partitioned_frame;
02632 if(s->data_partitioning){
02633 backup_s.pb2= s->pb2;
02634 backup_s.tex_pb= s->tex_pb;
02635 }
02636
02637 if(mb_type&CANDIDATE_MB_TYPE_INTER){
02638 s->mv_dir = MV_DIR_FORWARD;
02639 s->mv_type = MV_TYPE_16X16;
02640 s->mb_intra= 0;
02641 s->mv[0][0][0] = s->p_mv_table[xy][0];
02642 s->mv[0][0][1] = s->p_mv_table[xy][1];
02643 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
02644 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02645 }
02646 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
02647 s->mv_dir = MV_DIR_FORWARD;
02648 s->mv_type = MV_TYPE_FIELD;
02649 s->mb_intra= 0;
02650 for(i=0; i<2; i++){
02651 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02652 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02653 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02654 }
02655 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
02656 &dmin, &next_block, 0, 0);
02657 }
02658 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
02659 s->mv_dir = MV_DIR_FORWARD;
02660 s->mv_type = MV_TYPE_16X16;
02661 s->mb_intra= 0;
02662 s->mv[0][0][0] = 0;
02663 s->mv[0][0][1] = 0;
02664 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
02665 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02666 }
02667 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
02668 s->mv_dir = MV_DIR_FORWARD;
02669 s->mv_type = MV_TYPE_8X8;
02670 s->mb_intra= 0;
02671 for(i=0; i<4; i++){
02672 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02673 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02674 }
02675 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
02676 &dmin, &next_block, 0, 0);
02677 }
02678 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
02679 s->mv_dir = MV_DIR_FORWARD;
02680 s->mv_type = MV_TYPE_16X16;
02681 s->mb_intra= 0;
02682 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02683 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02684 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
02685 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02686 }
02687 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
02688 s->mv_dir = MV_DIR_BACKWARD;
02689 s->mv_type = MV_TYPE_16X16;
02690 s->mb_intra= 0;
02691 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02692 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02693 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
02694 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
02695 }
02696 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
02697 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02698 s->mv_type = MV_TYPE_16X16;
02699 s->mb_intra= 0;
02700 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02701 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02702 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02703 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02704 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
02705 &dmin, &next_block, 0, 0);
02706 }
02707 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
02708 s->mv_dir = MV_DIR_FORWARD;
02709 s->mv_type = MV_TYPE_FIELD;
02710 s->mb_intra= 0;
02711 for(i=0; i<2; i++){
02712 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02713 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02714 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02715 }
02716 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
02717 &dmin, &next_block, 0, 0);
02718 }
02719 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
02720 s->mv_dir = MV_DIR_BACKWARD;
02721 s->mv_type = MV_TYPE_FIELD;
02722 s->mb_intra= 0;
02723 for(i=0; i<2; i++){
02724 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02725 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02726 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02727 }
02728 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
02729 &dmin, &next_block, 0, 0);
02730 }
02731 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
02732 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02733 s->mv_type = MV_TYPE_FIELD;
02734 s->mb_intra= 0;
02735 for(dir=0; dir<2; dir++){
02736 for(i=0; i<2; i++){
02737 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02738 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02739 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02740 }
02741 }
02742 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
02743 &dmin, &next_block, 0, 0);
02744 }
02745 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
02746 s->mv_dir = 0;
02747 s->mv_type = MV_TYPE_16X16;
02748 s->mb_intra= 1;
02749 s->mv[0][0][0] = 0;
02750 s->mv[0][0][1] = 0;
02751 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
02752 &dmin, &next_block, 0, 0);
02753 if(s->h263_pred || s->h263_aic){
02754 if(best_s.mb_intra)
02755 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
02756 else
02757 ff_clean_intra_table_entries(s);
02758 }
02759 }
02760
02761 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
02762 if(best_s.mv_type==MV_TYPE_16X16){
02763 const int last_qp= backup_s.qscale;
02764 int qpi, qp, dc[6];
02765 DCTELEM ac[6][16];
02766 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
02767 static const int dquant_tab[4]={-1,1,-2,2};
02768
02769 assert(backup_s.dquant == 0);
02770
02771
02772 s->mv_dir= best_s.mv_dir;
02773 s->mv_type = MV_TYPE_16X16;
02774 s->mb_intra= best_s.mb_intra;
02775 s->mv[0][0][0] = best_s.mv[0][0][0];
02776 s->mv[0][0][1] = best_s.mv[0][0][1];
02777 s->mv[1][0][0] = best_s.mv[1][0][0];
02778 s->mv[1][0][1] = best_s.mv[1][0][1];
02779
02780 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
02781 for(; qpi<4; qpi++){
02782 int dquant= dquant_tab[qpi];
02783 qp= last_qp + dquant;
02784 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
02785 continue;
02786 backup_s.dquant= dquant;
02787 if(s->mb_intra && s->dc_val[0]){
02788 for(i=0; i<6; i++){
02789 dc[i]= s->dc_val[0][ s->block_index[i] ];
02790 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
02791 }
02792 }
02793
02794 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02795 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
02796 if(best_s.qscale != qp){
02797 if(s->mb_intra && s->dc_val[0]){
02798 for(i=0; i<6; i++){
02799 s->dc_val[0][ s->block_index[i] ]= dc[i];
02800 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
02801 }
02802 }
02803 }
02804 }
02805 }
02806 }
02807 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
02808 int mx= s->b_direct_mv_table[xy][0];
02809 int my= s->b_direct_mv_table[xy][1];
02810
02811 backup_s.dquant = 0;
02812 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02813 s->mb_intra= 0;
02814 ff_mpeg4_set_direct_mv(s, mx, my);
02815 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02816 &dmin, &next_block, mx, my);
02817 }
02818 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
02819 backup_s.dquant = 0;
02820 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02821 s->mb_intra= 0;
02822 ff_mpeg4_set_direct_mv(s, 0, 0);
02823 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02824 &dmin, &next_block, 0, 0);
02825 }
02826 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
02827 int coded=0;
02828 for(i=0; i<6; i++)
02829 coded |= s->block_last_index[i];
02830 if(coded){
02831 int mx,my;
02832 memcpy(s->mv, best_s.mv, sizeof(s->mv));
02833 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
02834 mx=my=0;
02835 ff_mpeg4_set_direct_mv(s, mx, my);
02836 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
02837 mx= s->mv[1][0][0];
02838 my= s->mv[1][0][1];
02839 }else{
02840 mx= s->mv[0][0][0];
02841 my= s->mv[0][0][1];
02842 }
02843
02844 s->mv_dir= best_s.mv_dir;
02845 s->mv_type = best_s.mv_type;
02846 s->mb_intra= 0;
02847
02848
02849
02850
02851 backup_s.dquant= 0;
02852 s->skipdct=1;
02853 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02854 &dmin, &next_block, mx, my);
02855 s->skipdct=0;
02856 }
02857 }
02858
02859 s->current_picture.f.qscale_table[xy] = best_s.qscale;
02860
02861 copy_context_after_encode(s, &best_s, -1);
02862
02863 pb_bits_count= put_bits_count(&s->pb);
02864 flush_put_bits(&s->pb);
02865 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
02866 s->pb= backup_s.pb;
02867
02868 if(s->data_partitioning){
02869 pb2_bits_count= put_bits_count(&s->pb2);
02870 flush_put_bits(&s->pb2);
02871 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
02872 s->pb2= backup_s.pb2;
02873
02874 tex_pb_bits_count= put_bits_count(&s->tex_pb);
02875 flush_put_bits(&s->tex_pb);
02876 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
02877 s->tex_pb= backup_s.tex_pb;
02878 }
02879 s->last_bits= put_bits_count(&s->pb);
02880
02881 if (CONFIG_H263_ENCODER &&
02882 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
02883 ff_h263_update_motion_val(s);
02884
02885 if(next_block==0){
02886 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
02887 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
02888 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
02889 }
02890
02891 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
02892 ff_MPV_decode_mb(s, s->block);
02893 } else {
02894 int motion_x = 0, motion_y = 0;
02895 s->mv_type=MV_TYPE_16X16;
02896
02897
02898 switch(mb_type){
02899 case CANDIDATE_MB_TYPE_INTRA:
02900 s->mv_dir = 0;
02901 s->mb_intra= 1;
02902 motion_x= s->mv[0][0][0] = 0;
02903 motion_y= s->mv[0][0][1] = 0;
02904 break;
02905 case CANDIDATE_MB_TYPE_INTER:
02906 s->mv_dir = MV_DIR_FORWARD;
02907 s->mb_intra= 0;
02908 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
02909 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
02910 break;
02911 case CANDIDATE_MB_TYPE_INTER_I:
02912 s->mv_dir = MV_DIR_FORWARD;
02913 s->mv_type = MV_TYPE_FIELD;
02914 s->mb_intra= 0;
02915 for(i=0; i<2; i++){
02916 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02917 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02918 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02919 }
02920 break;
02921 case CANDIDATE_MB_TYPE_INTER4V:
02922 s->mv_dir = MV_DIR_FORWARD;
02923 s->mv_type = MV_TYPE_8X8;
02924 s->mb_intra= 0;
02925 for(i=0; i<4; i++){
02926 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02927 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02928 }
02929 break;
02930 case CANDIDATE_MB_TYPE_DIRECT:
02931 if (CONFIG_MPEG4_ENCODER) {
02932 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02933 s->mb_intra= 0;
02934 motion_x=s->b_direct_mv_table[xy][0];
02935 motion_y=s->b_direct_mv_table[xy][1];
02936 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
02937 }
02938 break;
02939 case CANDIDATE_MB_TYPE_DIRECT0:
02940 if (CONFIG_MPEG4_ENCODER) {
02941 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02942 s->mb_intra= 0;
02943 ff_mpeg4_set_direct_mv(s, 0, 0);
02944 }
02945 break;
02946 case CANDIDATE_MB_TYPE_BIDIR:
02947 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02948 s->mb_intra= 0;
02949 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02950 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02951 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02952 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02953 break;
02954 case CANDIDATE_MB_TYPE_BACKWARD:
02955 s->mv_dir = MV_DIR_BACKWARD;
02956 s->mb_intra= 0;
02957 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02958 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02959 break;
02960 case CANDIDATE_MB_TYPE_FORWARD:
02961 s->mv_dir = MV_DIR_FORWARD;
02962 s->mb_intra= 0;
02963 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02964 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02965
02966 break;
02967 case CANDIDATE_MB_TYPE_FORWARD_I:
02968 s->mv_dir = MV_DIR_FORWARD;
02969 s->mv_type = MV_TYPE_FIELD;
02970 s->mb_intra= 0;
02971 for(i=0; i<2; i++){
02972 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02973 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02974 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02975 }
02976 break;
02977 case CANDIDATE_MB_TYPE_BACKWARD_I:
02978 s->mv_dir = MV_DIR_BACKWARD;
02979 s->mv_type = MV_TYPE_FIELD;
02980 s->mb_intra= 0;
02981 for(i=0; i<2; i++){
02982 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02983 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02984 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02985 }
02986 break;
02987 case CANDIDATE_MB_TYPE_BIDIR_I:
02988 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02989 s->mv_type = MV_TYPE_FIELD;
02990 s->mb_intra= 0;
02991 for(dir=0; dir<2; dir++){
02992 for(i=0; i<2; i++){
02993 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02994 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02995 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02996 }
02997 }
02998 break;
02999 default:
03000 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
03001 }
03002
03003 encode_mb(s, motion_x, motion_y);
03004
03005
03006 s->last_mv_dir = s->mv_dir;
03007
03008 if (CONFIG_H263_ENCODER &&
03009 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
03010 ff_h263_update_motion_val(s);
03011
03012 ff_MPV_decode_mb(s, s->block);
03013 }
03014
03015
03016 if(s->mb_intra ){
03017 s->p_mv_table[xy][0]=0;
03018 s->p_mv_table[xy][1]=0;
03019 }
03020
03021 if(s->flags&CODEC_FLAG_PSNR){
03022 int w= 16;
03023 int h= 16;
03024
03025 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
03026 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
03027
03028 s->current_picture.f.error[0] += sse(
03029 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
03030 s->dest[0], w, h, s->linesize);
03031 s->current_picture.f.error[1] += sse(
03032 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
03033 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
03034 s->current_picture.f.error[2] += sse(
03035 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
03036 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
03037 }
03038 if(s->loop_filter){
03039 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
03040 ff_h263_loop_filter(s);
03041 }
03042
03043 }
03044 }
03045
03046
03047 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
03048 ff_msmpeg4_encode_ext_header(s);
03049
03050 write_slice_end(s);
03051
03052
03053 if (s->avctx->rtp_callback) {
03054 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
03055 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
03056
03057 emms_c();
03058 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
03059 }
03060
03061 return 0;
03062 }
03063
03064 #define MERGE(field) dst->field += src->field; src->field=0
03065 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
03066 MERGE(me.scene_change_score);
03067 MERGE(me.mc_mb_var_sum_temp);
03068 MERGE(me.mb_var_sum_temp);
03069 }
03070
03071 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
03072 int i;
03073
03074 MERGE(dct_count[0]);
03075 MERGE(dct_count[1]);
03076 MERGE(mv_bits);
03077 MERGE(i_tex_bits);
03078 MERGE(p_tex_bits);
03079 MERGE(i_count);
03080 MERGE(f_count);
03081 MERGE(b_count);
03082 MERGE(skip_count);
03083 MERGE(misc_bits);
03084 MERGE(error_count);
03085 MERGE(padding_bug_score);
03086 MERGE(current_picture.f.error[0]);
03087 MERGE(current_picture.f.error[1]);
03088 MERGE(current_picture.f.error[2]);
03089
03090 if(dst->avctx->noise_reduction){
03091 for(i=0; i<64; i++){
03092 MERGE(dct_error_sum[0][i]);
03093 MERGE(dct_error_sum[1][i]);
03094 }
03095 }
03096
03097 assert(put_bits_count(&src->pb) % 8 ==0);
03098 assert(put_bits_count(&dst->pb) % 8 ==0);
03099 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
03100 flush_put_bits(&dst->pb);
03101 }
03102
03103 static int estimate_qp(MpegEncContext *s, int dry_run){
03104 if (s->next_lambda){
03105 s->current_picture_ptr->f.quality =
03106 s->current_picture.f.quality = s->next_lambda;
03107 if(!dry_run) s->next_lambda= 0;
03108 } else if (!s->fixed_qscale) {
03109 s->current_picture_ptr->f.quality =
03110 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
03111 if (s->current_picture.f.quality < 0)
03112 return -1;
03113 }
03114
03115 if(s->adaptive_quant){
03116 switch(s->codec_id){
03117 case CODEC_ID_MPEG4:
03118 if (CONFIG_MPEG4_ENCODER)
03119 ff_clean_mpeg4_qscales(s);
03120 break;
03121 case CODEC_ID_H263:
03122 case CODEC_ID_H263P:
03123 case CODEC_ID_FLV1:
03124 if (CONFIG_H263_ENCODER)
03125 ff_clean_h263_qscales(s);
03126 break;
03127 default:
03128 ff_init_qscale_tab(s);
03129 }
03130
03131 s->lambda= s->lambda_table[0];
03132
03133 }else
03134 s->lambda = s->current_picture.f.quality;
03135
03136 update_qscale(s);
03137 return 0;
03138 }
03139
03140
03141 static void set_frame_distances(MpegEncContext * s){
03142 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
03143 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
03144
03145 if(s->pict_type==AV_PICTURE_TYPE_B){
03146 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
03147 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
03148 }else{
03149 s->pp_time= s->time - s->last_non_b_time;
03150 s->last_non_b_time= s->time;
03151 assert(s->picture_number==0 || s->pp_time > 0);
03152 }
03153 }
03154
03155 static int encode_picture(MpegEncContext *s, int picture_number)
03156 {
03157 int i;
03158 int bits;
03159 int context_count = s->slice_context_count;
03160
03161 s->picture_number = picture_number;
03162
03163
03164 s->me.mb_var_sum_temp =
03165 s->me.mc_mb_var_sum_temp = 0;
03166
03167
03168
03169 if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
03170 set_frame_distances(s);
03171 if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
03172 ff_set_mpeg4_time(s);
03173
03174 s->me.scene_change_score=0;
03175
03176
03177
03178 if(s->pict_type==AV_PICTURE_TYPE_I){
03179 if(s->msmpeg4_version >= 3) s->no_rounding=1;
03180 else s->no_rounding=0;
03181 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
03182 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
03183 s->no_rounding ^= 1;
03184 }
03185
03186 if(s->flags & CODEC_FLAG_PASS2){
03187 if (estimate_qp(s,1) < 0)
03188 return -1;
03189 ff_get_2pass_fcode(s);
03190 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
03191 if(s->pict_type==AV_PICTURE_TYPE_B)
03192 s->lambda= s->last_lambda_for[s->pict_type];
03193 else
03194 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
03195 update_qscale(s);
03196 }
03197
03198 if(s->codec_id != CODEC_ID_AMV){
03199 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
03200 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
03201 s->q_chroma_intra_matrix = s->q_intra_matrix;
03202 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
03203 }
03204
03205 s->mb_intra=0;
03206 for(i=1; i<context_count; i++){
03207 ff_update_duplicate_context(s->thread_context[i], s);
03208 }
03209
03210 if(ff_init_me(s)<0)
03211 return -1;
03212
03213
03214 if(s->pict_type != AV_PICTURE_TYPE_I){
03215 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
03216 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
03217 if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
03218 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
03219 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03220 }
03221 }
03222
03223 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03224 }else {
03225
03226 for(i=0; i<s->mb_stride*s->mb_height; i++)
03227 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03228
03229 if(!s->fixed_qscale){
03230
03231 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03232 }
03233 }
03234 for(i=1; i<context_count; i++){
03235 merge_context_after_me(s, s->thread_context[i]);
03236 }
03237 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
03238 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
03239 emms_c();
03240
03241 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
03242 s->pict_type= AV_PICTURE_TYPE_I;
03243 for(i=0; i<s->mb_stride*s->mb_height; i++)
03244 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03245
03246 }
03247
03248 if(!s->umvplus){
03249 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
03250 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
03251
03252 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03253 int a,b;
03254 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I);
03255 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
03256 s->f_code= FFMAX3(s->f_code, a, b);
03257 }
03258
03259 ff_fix_long_p_mvs(s);
03260 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
03261 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03262 int j;
03263 for(i=0; i<2; i++){
03264 for(j=0; j<2; j++)
03265 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
03266 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
03267 }
03268 }
03269 }
03270
03271 if(s->pict_type==AV_PICTURE_TYPE_B){
03272 int a, b;
03273
03274 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
03275 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03276 s->f_code = FFMAX(a, b);
03277
03278 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
03279 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03280 s->b_code = FFMAX(a, b);
03281
03282 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
03283 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
03284 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03285 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03286 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03287 int dir, j;
03288 for(dir=0; dir<2; dir++){
03289 for(i=0; i<2; i++){
03290 for(j=0; j<2; j++){
03291 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
03292 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
03293 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
03294 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
03295 }
03296 }
03297 }
03298 }
03299 }
03300 }
03301
03302 if (estimate_qp(s, 0) < 0)
03303 return -1;
03304
03305 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
03306 s->qscale= 3;
03307
03308 if (s->out_format == FMT_MJPEG) {
03309
03310 for(i=1;i<64;i++){
03311 int j= s->dsp.idct_permutation[i];
03312
03313 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
03314 }
03315 s->y_dc_scale_table=
03316 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
03317 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
03318 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03319 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03320 s->qscale= 8;
03321 }
03322 if(s->codec_id == CODEC_ID_AMV){
03323 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
03324 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
03325 for(i=1;i<64;i++){
03326 int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
03327
03328 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
03329 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
03330 }
03331 s->y_dc_scale_table= y;
03332 s->c_dc_scale_table= c;
03333 s->intra_matrix[0] = 13;
03334 s->chroma_intra_matrix[0] = 14;
03335 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03336 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03337 ff_convert_matrix(&s->dsp, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
03338 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
03339 s->qscale= 8;
03340 }
03341
03342
03343 s->current_picture_ptr->f.key_frame =
03344 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
03345 s->current_picture_ptr->f.pict_type =
03346 s->current_picture.f.pict_type = s->pict_type;
03347
03348 if (s->current_picture.f.key_frame)
03349 s->picture_in_gop_number=0;
03350
03351 s->last_bits= put_bits_count(&s->pb);
03352 switch(s->out_format) {
03353 case FMT_MJPEG:
03354 if (CONFIG_MJPEG_ENCODER)
03355 ff_mjpeg_encode_picture_header(s);
03356 break;
03357 case FMT_H261:
03358 if (CONFIG_H261_ENCODER)
03359 ff_h261_encode_picture_header(s, picture_number);
03360 break;
03361 case FMT_H263:
03362 if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
03363 ff_wmv2_encode_picture_header(s, picture_number);
03364 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
03365 ff_msmpeg4_encode_picture_header(s, picture_number);
03366 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
03367 ff_mpeg4_encode_picture_header(s, picture_number);
03368 else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
03369 ff_rv10_encode_picture_header(s, picture_number);
03370 else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
03371 ff_rv20_encode_picture_header(s, picture_number);
03372 else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
03373 ff_flv_encode_picture_header(s, picture_number);
03374 else if (CONFIG_H263_ENCODER)
03375 ff_h263_encode_picture_header(s, picture_number);
03376 break;
03377 case FMT_MPEG1:
03378 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
03379 ff_mpeg1_encode_picture_header(s, picture_number);
03380 break;
03381 case FMT_H264:
03382 break;
03383 default:
03384 assert(0);
03385 }
03386 bits= put_bits_count(&s->pb);
03387 s->header_bits= bits - s->last_bits;
03388
03389 for(i=1; i<context_count; i++){
03390 update_duplicate_context_after_me(s->thread_context[i], s);
03391 }
03392 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03393 for(i=1; i<context_count; i++){
03394 merge_context_after_encode(s, s->thread_context[i]);
03395 }
03396 emms_c();
03397 return 0;
03398 }
03399
03400 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
03401 const int intra= s->mb_intra;
03402 int i;
03403
03404 s->dct_count[intra]++;
03405
03406 for(i=0; i<64; i++){
03407 int level= block[i];
03408
03409 if(level){
03410 if(level>0){
03411 s->dct_error_sum[intra][i] += level;
03412 level -= s->dct_offset[intra][i];
03413 if(level<0) level=0;
03414 }else{
03415 s->dct_error_sum[intra][i] -= level;
03416 level += s->dct_offset[intra][i];
03417 if(level>0) level=0;
03418 }
03419 block[i]= level;
03420 }
03421 }
03422 }
03423
03424 static int dct_quantize_trellis_c(MpegEncContext *s,
03425 DCTELEM *block, int n,
03426 int qscale, int *overflow){
03427 const int *qmat;
03428 const uint8_t *scantable= s->intra_scantable.scantable;
03429 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03430 int max=0;
03431 unsigned int threshold1, threshold2;
03432 int bias=0;
03433 int run_tab[65];
03434 int level_tab[65];
03435 int score_tab[65];
03436 int survivor[65];
03437 int survivor_count;
03438 int last_run=0;
03439 int last_level=0;
03440 int last_score= 0;
03441 int last_i;
03442 int coeff[2][64];
03443 int coeff_count[64];
03444 int qmul, qadd, start_i, last_non_zero, i, dc;
03445 const int esc_length= s->ac_esc_length;
03446 uint8_t * length;
03447 uint8_t * last_length;
03448 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
03449
03450 s->dsp.fdct (block);
03451
03452 if(s->dct_error_sum)
03453 s->denoise_dct(s, block);
03454 qmul= qscale*16;
03455 qadd= ((qscale-1)|1)*8;
03456
03457 if (s->mb_intra) {
03458 int q;
03459 if (!s->h263_aic) {
03460 if (n < 4)
03461 q = s->y_dc_scale;
03462 else
03463 q = s->c_dc_scale;
03464 q = q << 3;
03465 } else{
03466
03467 q = 1 << 3;
03468 qadd=0;
03469 }
03470
03471
03472 block[0] = (block[0] + (q >> 1)) / q;
03473 start_i = 1;
03474 last_non_zero = 0;
03475 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
03476 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
03477 bias= 1<<(QMAT_SHIFT-1);
03478 length = s->intra_ac_vlc_length;
03479 last_length= s->intra_ac_vlc_last_length;
03480 } else {
03481 start_i = 0;
03482 last_non_zero = -1;
03483 qmat = s->q_inter_matrix[qscale];
03484 length = s->inter_ac_vlc_length;
03485 last_length= s->inter_ac_vlc_last_length;
03486 }
03487 last_i= start_i;
03488
03489 threshold1= (1<<QMAT_SHIFT) - bias - 1;
03490 threshold2= (threshold1<<1);
03491
03492 for(i=63; i>=start_i; i--) {
03493 const int j = scantable[i];
03494 int level = block[j] * qmat[j];
03495
03496 if(((unsigned)(level+threshold1))>threshold2){
03497 last_non_zero = i;
03498 break;
03499 }
03500 }
03501
03502 for(i=start_i; i<=last_non_zero; i++) {
03503 const int j = scantable[i];
03504 int level = block[j] * qmat[j];
03505
03506
03507
03508 if(((unsigned)(level+threshold1))>threshold2){
03509 if(level>0){
03510 level= (bias + level)>>QMAT_SHIFT;
03511 coeff[0][i]= level;
03512 coeff[1][i]= level-1;
03513
03514 }else{
03515 level= (bias - level)>>QMAT_SHIFT;
03516 coeff[0][i]= -level;
03517 coeff[1][i]= -level+1;
03518
03519 }
03520 coeff_count[i]= FFMIN(level, 2);
03521 assert(coeff_count[i]);
03522 max |=level;
03523 }else{
03524 coeff[0][i]= (level>>31)|1;
03525 coeff_count[i]= 1;
03526 }
03527 }
03528
03529 *overflow= s->max_qcoeff < max;
03530
03531 if(last_non_zero < start_i){
03532 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03533 return last_non_zero;
03534 }
03535
03536 score_tab[start_i]= 0;
03537 survivor[0]= start_i;
03538 survivor_count= 1;
03539
03540 for(i=start_i; i<=last_non_zero; i++){
03541 int level_index, j, zero_distortion;
03542 int dct_coeff= FFABS(block[ scantable[i] ]);
03543 int best_score=256*256*256*120;
03544
03545 if (s->dsp.fdct == ff_fdct_ifast)
03546 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
03547 zero_distortion= dct_coeff*dct_coeff;
03548
03549 for(level_index=0; level_index < coeff_count[i]; level_index++){
03550 int distortion;
03551 int level= coeff[level_index][i];
03552 const int alevel= FFABS(level);
03553 int unquant_coeff;
03554
03555 assert(level);
03556
03557 if(s->out_format == FMT_H263){
03558 unquant_coeff= alevel*qmul + qadd;
03559 }else{
03560 j= s->dsp.idct_permutation[ scantable[i] ];
03561 if(s->mb_intra){
03562 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
03563 unquant_coeff = (unquant_coeff - 1) | 1;
03564 }else{
03565 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
03566 unquant_coeff = (unquant_coeff - 1) | 1;
03567 }
03568 unquant_coeff<<= 3;
03569 }
03570
03571 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
03572 level+=64;
03573 if((level&(~127)) == 0){
03574 for(j=survivor_count-1; j>=0; j--){
03575 int run= i - survivor[j];
03576 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03577 score += score_tab[i-run];
03578
03579 if(score < best_score){
03580 best_score= score;
03581 run_tab[i+1]= run;
03582 level_tab[i+1]= level-64;
03583 }
03584 }
03585
03586 if(s->out_format == FMT_H263){
03587 for(j=survivor_count-1; j>=0; j--){
03588 int run= i - survivor[j];
03589 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03590 score += score_tab[i-run];
03591 if(score < last_score){
03592 last_score= score;
03593 last_run= run;
03594 last_level= level-64;
03595 last_i= i+1;
03596 }
03597 }
03598 }
03599 }else{
03600 distortion += esc_length*lambda;
03601 for(j=survivor_count-1; j>=0; j--){
03602 int run= i - survivor[j];
03603 int score= distortion + score_tab[i-run];
03604
03605 if(score < best_score){
03606 best_score= score;
03607 run_tab[i+1]= run;
03608 level_tab[i+1]= level-64;
03609 }
03610 }
03611
03612 if(s->out_format == FMT_H263){
03613 for(j=survivor_count-1; j>=0; j--){
03614 int run= i - survivor[j];
03615 int score= distortion + score_tab[i-run];
03616 if(score < last_score){
03617 last_score= score;
03618 last_run= run;
03619 last_level= level-64;
03620 last_i= i+1;
03621 }
03622 }
03623 }
03624 }
03625 }
03626
03627 score_tab[i+1]= best_score;
03628
03629
03630 if(last_non_zero <= 27){
03631 for(; survivor_count; survivor_count--){
03632 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
03633 break;
03634 }
03635 }else{
03636 for(; survivor_count; survivor_count--){
03637 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
03638 break;
03639 }
03640 }
03641
03642 survivor[ survivor_count++ ]= i+1;
03643 }
03644
03645 if(s->out_format != FMT_H263){
03646 last_score= 256*256*256*120;
03647 for(i= survivor[0]; i<=last_non_zero + 1; i++){
03648 int score= score_tab[i];
03649 if(i) score += lambda*2;
03650
03651 if(score < last_score){
03652 last_score= score;
03653 last_i= i;
03654 last_level= level_tab[i];
03655 last_run= run_tab[i];
03656 }
03657 }
03658 }
03659
03660 s->coded_score[n] = last_score;
03661
03662 dc= FFABS(block[0]);
03663 last_non_zero= last_i - 1;
03664 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03665
03666 if(last_non_zero < start_i)
03667 return last_non_zero;
03668
03669 if(last_non_zero == 0 && start_i == 0){
03670 int best_level= 0;
03671 int best_score= dc * dc;
03672
03673 for(i=0; i<coeff_count[0]; i++){
03674 int level= coeff[i][0];
03675 int alevel= FFABS(level);
03676 int unquant_coeff, score, distortion;
03677
03678 if(s->out_format == FMT_H263){
03679 unquant_coeff= (alevel*qmul + qadd)>>3;
03680 }else{
03681 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
03682 unquant_coeff = (unquant_coeff - 1) | 1;
03683 }
03684 unquant_coeff = (unquant_coeff + 4) >> 3;
03685 unquant_coeff<<= 3 + 3;
03686
03687 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
03688 level+=64;
03689 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
03690 else score= distortion + esc_length*lambda;
03691
03692 if(score < best_score){
03693 best_score= score;
03694 best_level= level - 64;
03695 }
03696 }
03697 block[0]= best_level;
03698 s->coded_score[n] = best_score - dc*dc;
03699 if(best_level == 0) return -1;
03700 else return last_non_zero;
03701 }
03702
03703 i= last_i;
03704 assert(last_level);
03705
03706 block[ perm_scantable[last_non_zero] ]= last_level;
03707 i -= last_run + 1;
03708
03709 for(; i>start_i; i -= run_tab[i] + 1){
03710 block[ perm_scantable[i-1] ]= level_tab[i];
03711 }
03712
03713 return last_non_zero;
03714 }
03715
03716
03717 static int16_t basis[64][64];
03718
03719 static void build_basis(uint8_t *perm){
03720 int i, j, x, y;
03721 emms_c();
03722 for(i=0; i<8; i++){
03723 for(j=0; j<8; j++){
03724 for(y=0; y<8; y++){
03725 for(x=0; x<8; x++){
03726 double s= 0.25*(1<<BASIS_SHIFT);
03727 int index= 8*i + j;
03728 int perm_index= perm[index];
03729 if(i==0) s*= sqrt(0.5);
03730 if(j==0) s*= sqrt(0.5);
03731 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
03732 }
03733 }
03734 }
03735 }
03736 }
03737
03738 static int dct_quantize_refine(MpegEncContext *s,
03739 DCTELEM *block, int16_t *weight, DCTELEM *orig,
03740 int n, int qscale){
03741 int16_t rem[64];
03742 LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
03743 const uint8_t *scantable= s->intra_scantable.scantable;
03744 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03745
03746
03747 int run_tab[65];
03748 int prev_run=0;
03749 int prev_level=0;
03750 int qmul, qadd, start_i, last_non_zero, i, dc;
03751 uint8_t * length;
03752 uint8_t * last_length;
03753 int lambda;
03754 int rle_index, run, q = 1, sum;
03755 #ifdef REFINE_STATS
03756 static int count=0;
03757 static int after_last=0;
03758 static int to_zero=0;
03759 static int from_zero=0;
03760 static int raise=0;
03761 static int lower=0;
03762 static int messed_sign=0;
03763 #endif
03764
03765 if(basis[0][0] == 0)
03766 build_basis(s->dsp.idct_permutation);
03767
03768 qmul= qscale*2;
03769 qadd= (qscale-1)|1;
03770 if (s->mb_intra) {
03771 if (!s->h263_aic) {
03772 if (n < 4)
03773 q = s->y_dc_scale;
03774 else
03775 q = s->c_dc_scale;
03776 } else{
03777
03778 q = 1;
03779 qadd=0;
03780 }
03781 q <<= RECON_SHIFT-3;
03782
03783 dc= block[0]*q;
03784
03785 start_i = 1;
03786
03787
03788 length = s->intra_ac_vlc_length;
03789 last_length= s->intra_ac_vlc_last_length;
03790 } else {
03791 dc= 0;
03792 start_i = 0;
03793 length = s->inter_ac_vlc_length;
03794 last_length= s->inter_ac_vlc_last_length;
03795 }
03796 last_non_zero = s->block_last_index[n];
03797
03798 #ifdef REFINE_STATS
03799 {START_TIMER
03800 #endif
03801 dc += (1<<(RECON_SHIFT-1));
03802 for(i=0; i<64; i++){
03803 rem[i]= dc - (orig[i]<<RECON_SHIFT);
03804 }
03805 #ifdef REFINE_STATS
03806 STOP_TIMER("memset rem[]")}
03807 #endif
03808 sum=0;
03809 for(i=0; i<64; i++){
03810 int one= 36;
03811 int qns=4;
03812 int w;
03813
03814 w= FFABS(weight[i]) + qns*one;
03815 w= 15 + (48*qns*one + w/2)/w;
03816
03817 weight[i] = w;
03818
03819
03820 assert(w>0);
03821 assert(w<(1<<6));
03822 sum += w*w;
03823 }
03824 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
03825 #ifdef REFINE_STATS
03826 {START_TIMER
03827 #endif
03828 run=0;
03829 rle_index=0;
03830 for(i=start_i; i<=last_non_zero; i++){
03831 int j= perm_scantable[i];
03832 const int level= block[j];
03833 int coeff;
03834
03835 if(level){
03836 if(level<0) coeff= qmul*level - qadd;
03837 else coeff= qmul*level + qadd;
03838 run_tab[rle_index++]=run;
03839 run=0;
03840
03841 s->dsp.add_8x8basis(rem, basis[j], coeff);
03842 }else{
03843 run++;
03844 }
03845 }
03846 #ifdef REFINE_STATS
03847 if(last_non_zero>0){
03848 STOP_TIMER("init rem[]")
03849 }
03850 }
03851
03852 {START_TIMER
03853 #endif
03854 for(;;){
03855 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
03856 int best_coeff=0;
03857 int best_change=0;
03858 int run2, best_unquant_change=0, analyze_gradient;
03859 #ifdef REFINE_STATS
03860 {START_TIMER
03861 #endif
03862 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
03863
03864 if(analyze_gradient){
03865 #ifdef REFINE_STATS
03866 {START_TIMER
03867 #endif
03868 for(i=0; i<64; i++){
03869 int w= weight[i];
03870
03871 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
03872 }
03873 #ifdef REFINE_STATS
03874 STOP_TIMER("rem*w*w")}
03875 {START_TIMER
03876 #endif
03877 s->dsp.fdct(d1);
03878 #ifdef REFINE_STATS
03879 STOP_TIMER("dct")}
03880 #endif
03881 }
03882
03883 if(start_i){
03884 const int level= block[0];
03885 int change, old_coeff;
03886
03887 assert(s->mb_intra);
03888
03889 old_coeff= q*level;
03890
03891 for(change=-1; change<=1; change+=2){
03892 int new_level= level + change;
03893 int score, new_coeff;
03894
03895 new_coeff= q*new_level;
03896 if(new_coeff >= 2048 || new_coeff < 0)
03897 continue;
03898
03899 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
03900 if(score<best_score){
03901 best_score= score;
03902 best_coeff= 0;
03903 best_change= change;
03904 best_unquant_change= new_coeff - old_coeff;
03905 }
03906 }
03907 }
03908
03909 run=0;
03910 rle_index=0;
03911 run2= run_tab[rle_index++];
03912 prev_level=0;
03913 prev_run=0;
03914
03915 for(i=start_i; i<64; i++){
03916 int j= perm_scantable[i];
03917 const int level= block[j];
03918 int change, old_coeff;
03919
03920 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
03921 break;
03922
03923 if(level){
03924 if(level<0) old_coeff= qmul*level - qadd;
03925 else old_coeff= qmul*level + qadd;
03926 run2= run_tab[rle_index++];
03927 }else{
03928 old_coeff=0;
03929 run2--;
03930 assert(run2>=0 || i >= last_non_zero );
03931 }
03932
03933 for(change=-1; change<=1; change+=2){
03934 int new_level= level + change;
03935 int score, new_coeff, unquant_change;
03936
03937 score=0;
03938 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
03939 continue;
03940
03941 if(new_level){
03942 if(new_level<0) new_coeff= qmul*new_level - qadd;
03943 else new_coeff= qmul*new_level + qadd;
03944 if(new_coeff >= 2048 || new_coeff <= -2048)
03945 continue;
03946
03947
03948 if(level){
03949 if(level < 63 && level > -63){
03950 if(i < last_non_zero)
03951 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
03952 - length[UNI_AC_ENC_INDEX(run, level+64)];
03953 else
03954 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
03955 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
03956 }
03957 }else{
03958 assert(FFABS(new_level)==1);
03959
03960 if(analyze_gradient){
03961 int g= d1[ scantable[i] ];
03962 if(g && (g^new_level) >= 0)
03963 continue;
03964 }
03965
03966 if(i < last_non_zero){
03967 int next_i= i + run2 + 1;
03968 int next_level= block[ perm_scantable[next_i] ] + 64;
03969
03970 if(next_level&(~127))
03971 next_level= 0;
03972
03973 if(next_i < last_non_zero)
03974 score += length[UNI_AC_ENC_INDEX(run, 65)]
03975 + length[UNI_AC_ENC_INDEX(run2, next_level)]
03976 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03977 else
03978 score += length[UNI_AC_ENC_INDEX(run, 65)]
03979 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
03980 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03981 }else{
03982 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
03983 if(prev_level){
03984 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
03985 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
03986 }
03987 }
03988 }
03989 }else{
03990 new_coeff=0;
03991 assert(FFABS(level)==1);
03992
03993 if(i < last_non_zero){
03994 int next_i= i + run2 + 1;
03995 int next_level= block[ perm_scantable[next_i] ] + 64;
03996
03997 if(next_level&(~127))
03998 next_level= 0;
03999
04000 if(next_i < last_non_zero)
04001 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
04002 - length[UNI_AC_ENC_INDEX(run2, next_level)]
04003 - length[UNI_AC_ENC_INDEX(run, 65)];
04004 else
04005 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
04006 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
04007 - length[UNI_AC_ENC_INDEX(run, 65)];
04008 }else{
04009 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
04010 if(prev_level){
04011 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
04012 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
04013 }
04014 }
04015 }
04016
04017 score *= lambda;
04018
04019 unquant_change= new_coeff - old_coeff;
04020 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
04021
04022 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
04023 if(score<best_score){
04024 best_score= score;
04025 best_coeff= i;
04026 best_change= change;
04027 best_unquant_change= unquant_change;
04028 }
04029 }
04030 if(level){
04031 prev_level= level + 64;
04032 if(prev_level&(~127))
04033 prev_level= 0;
04034 prev_run= run;
04035 run=0;
04036 }else{
04037 run++;
04038 }
04039 }
04040 #ifdef REFINE_STATS
04041 STOP_TIMER("iterative step")}
04042 #endif
04043
04044 if(best_change){
04045 int j= perm_scantable[ best_coeff ];
04046
04047 block[j] += best_change;
04048
04049 if(best_coeff > last_non_zero){
04050 last_non_zero= best_coeff;
04051 assert(block[j]);
04052 #ifdef REFINE_STATS
04053 after_last++;
04054 #endif
04055 }else{
04056 #ifdef REFINE_STATS
04057 if(block[j]){
04058 if(block[j] - best_change){
04059 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
04060 raise++;
04061 }else{
04062 lower++;
04063 }
04064 }else{
04065 from_zero++;
04066 }
04067 }else{
04068 to_zero++;
04069 }
04070 #endif
04071 for(; last_non_zero>=start_i; last_non_zero--){
04072 if(block[perm_scantable[last_non_zero]])
04073 break;
04074 }
04075 }
04076 #ifdef REFINE_STATS
04077 count++;
04078 if(256*256*256*64 % count == 0){
04079 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
04080 }
04081 #endif
04082 run=0;
04083 rle_index=0;
04084 for(i=start_i; i<=last_non_zero; i++){
04085 int j= perm_scantable[i];
04086 const int level= block[j];
04087
04088 if(level){
04089 run_tab[rle_index++]=run;
04090 run=0;
04091 }else{
04092 run++;
04093 }
04094 }
04095
04096 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
04097 }else{
04098 break;
04099 }
04100 }
04101 #ifdef REFINE_STATS
04102 if(last_non_zero>0){
04103 STOP_TIMER("iterative search")
04104 }
04105 }
04106 #endif
04107
04108 return last_non_zero;
04109 }
04110
04111 int ff_dct_quantize_c(MpegEncContext *s,
04112 DCTELEM *block, int n,
04113 int qscale, int *overflow)
04114 {
04115 int i, j, level, last_non_zero, q, start_i;
04116 const int *qmat;
04117 const uint8_t *scantable= s->intra_scantable.scantable;
04118 int bias;
04119 int max=0;
04120 unsigned int threshold1, threshold2;
04121
04122 s->dsp.fdct (block);
04123
04124 if(s->dct_error_sum)
04125 s->denoise_dct(s, block);
04126
04127 if (s->mb_intra) {
04128 if (!s->h263_aic) {
04129 if (n < 4)
04130 q = s->y_dc_scale;
04131 else
04132 q = s->c_dc_scale;
04133 q = q << 3;
04134 } else
04135
04136 q = 1 << 3;
04137
04138
04139 block[0] = (block[0] + (q >> 1)) / q;
04140 start_i = 1;
04141 last_non_zero = 0;
04142 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
04143 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04144 } else {
04145 start_i = 0;
04146 last_non_zero = -1;
04147 qmat = s->q_inter_matrix[qscale];
04148 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04149 }
04150 threshold1= (1<<QMAT_SHIFT) - bias - 1;
04151 threshold2= (threshold1<<1);
04152 for(i=63;i>=start_i;i--) {
04153 j = scantable[i];
04154 level = block[j] * qmat[j];
04155
04156 if(((unsigned)(level+threshold1))>threshold2){
04157 last_non_zero = i;
04158 break;
04159 }else{
04160 block[j]=0;
04161 }
04162 }
04163 for(i=start_i; i<=last_non_zero; i++) {
04164 j = scantable[i];
04165 level = block[j] * qmat[j];
04166
04167
04168
04169 if(((unsigned)(level+threshold1))>threshold2){
04170 if(level>0){
04171 level= (bias + level)>>QMAT_SHIFT;
04172 block[j]= level;
04173 }else{
04174 level= (bias - level)>>QMAT_SHIFT;
04175 block[j]= -level;
04176 }
04177 max |=level;
04178 }else{
04179 block[j]=0;
04180 }
04181 }
04182 *overflow= s->max_qcoeff < max;
04183
04184
04185 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
04186 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
04187
04188 return last_non_zero;
04189 }
04190
04191 #define OFFSET(x) offsetof(MpegEncContext, x)
04192 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
04193 static const AVOption h263_options[] = {
04194 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04195 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
04196 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, VE },
04197 FF_MPV_COMMON_OPTS
04198 { NULL },
04199 };
04200
04201 static const AVClass h263_class = {
04202 .class_name = "H.263 encoder",
04203 .item_name = av_default_item_name,
04204 .option = h263_options,
04205 .version = LIBAVUTIL_VERSION_INT,
04206 };
04207
04208 AVCodec ff_h263_encoder = {
04209 .name = "h263",
04210 .type = AVMEDIA_TYPE_VIDEO,
04211 .id = CODEC_ID_H263,
04212 .priv_data_size = sizeof(MpegEncContext),
04213 .init = ff_MPV_encode_init,
04214 .encode2 = ff_MPV_encode_picture,
04215 .close = ff_MPV_encode_end,
04216 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04217 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
04218 .priv_class = &h263_class,
04219 };
04220
04221 static const AVOption h263p_options[] = {
04222 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04223 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04224 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04225 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
04226 FF_MPV_COMMON_OPTS
04227 { NULL },
04228 };
04229 static const AVClass h263p_class = {
04230 .class_name = "H.263p encoder",
04231 .item_name = av_default_item_name,
04232 .option = h263p_options,
04233 .version = LIBAVUTIL_VERSION_INT,
04234 };
04235
04236 AVCodec ff_h263p_encoder = {
04237 .name = "h263p",
04238 .type = AVMEDIA_TYPE_VIDEO,
04239 .id = CODEC_ID_H263P,
04240 .priv_data_size = sizeof(MpegEncContext),
04241 .init = ff_MPV_encode_init,
04242 .encode2 = ff_MPV_encode_picture,
04243 .close = ff_MPV_encode_end,
04244 .capabilities = CODEC_CAP_SLICE_THREADS,
04245 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04246 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
04247 .priv_class = &h263p_class,
04248 };
04249
04250 FF_MPV_GENERIC_CLASS(msmpeg4v2)
04251
04252 AVCodec ff_msmpeg4v2_encoder = {
04253 .name = "msmpeg4v2",
04254 .type = AVMEDIA_TYPE_VIDEO,
04255 .id = CODEC_ID_MSMPEG4V2,
04256 .priv_data_size = sizeof(MpegEncContext),
04257 .init = ff_MPV_encode_init,
04258 .encode2 = ff_MPV_encode_picture,
04259 .close = ff_MPV_encode_end,
04260 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04261 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
04262 .priv_class = &msmpeg4v2_class,
04263 };
04264
04265 FF_MPV_GENERIC_CLASS(msmpeg4v3)
04266
04267 AVCodec ff_msmpeg4v3_encoder = {
04268 .name = "msmpeg4",
04269 .type = AVMEDIA_TYPE_VIDEO,
04270 .id = CODEC_ID_MSMPEG4V3,
04271 .priv_data_size = sizeof(MpegEncContext),
04272 .init = ff_MPV_encode_init,
04273 .encode2 = ff_MPV_encode_picture,
04274 .close = ff_MPV_encode_end,
04275 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04276 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
04277 .priv_class = &msmpeg4v3_class,
04278 };
04279
04280 FF_MPV_GENERIC_CLASS(wmv1)
04281
04282 AVCodec ff_wmv1_encoder = {
04283 .name = "wmv1",
04284 .type = AVMEDIA_TYPE_VIDEO,
04285 .id = CODEC_ID_WMV1,
04286 .priv_data_size = sizeof(MpegEncContext),
04287 .init = ff_MPV_encode_init,
04288 .encode2 = ff_MPV_encode_picture,
04289 .close = ff_MPV_encode_end,
04290 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04291 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
04292 .priv_class = &wmv1_class,
04293 };