00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "avcodec.h"
00031 #include "dsputil.h"
00032 #include "mpegvideo.h"
00033 #include "mpegvideo_common.h"
00034 #include "mjpegenc.h"
00035 #include "msmpeg4.h"
00036 #include "h263.h"
00037 #include "faandct.h"
00038 #include "aandcttab.h"
00039 #include <limits.h>
00040
00041
00042
00043
00044 static int encode_picture(MpegEncContext *s, int picture_number);
00045 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
00046 static int sse_mb(MpegEncContext *s);
00047
00048
00049
00050
00051
00052
00053 static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
00054 static uint8_t default_fcode_tab[MAX_MV*2+1];
00055
00056 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
00057 const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
00058 {
00059 int qscale;
00060 int shift=0;
00061
00062 for(qscale=qmin; qscale<=qmax; qscale++){
00063 int i;
00064 if (dsp->fdct == ff_jpeg_fdct_islow
00065 #ifdef FAAN_POSTSCALE
00066 || dsp->fdct == ff_faandct
00067 #endif
00068 ) {
00069 for(i=0;i<64;i++) {
00070 const int j= dsp->idct_permutation[i];
00071
00072
00073
00074
00075
00076 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00077 (qscale * quant_matrix[j]));
00078 }
00079 } else if (dsp->fdct == fdct_ifast
00080 #ifndef FAAN_POSTSCALE
00081 || dsp->fdct == ff_faandct
00082 #endif
00083 ) {
00084 for(i=0;i<64;i++) {
00085 const int j= dsp->idct_permutation[i];
00086
00087
00088
00089
00090
00091 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
00092 (ff_aanscales[i] * qscale * quant_matrix[j]));
00093 }
00094 } else {
00095 for(i=0;i<64;i++) {
00096 const int j= dsp->idct_permutation[i];
00097
00098
00099
00100
00101
00102 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
00103
00104 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
00105
00106 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
00107 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
00108 }
00109 }
00110
00111 for(i=intra; i<64; i++){
00112 int64_t max= 8191;
00113 if (dsp->fdct == fdct_ifast
00114 #ifndef FAAN_POSTSCALE
00115 || dsp->fdct == ff_faandct
00116 #endif
00117 ) {
00118 max = (8191LL*ff_aanscales[i]) >> 14;
00119 }
00120 while(((max * qmat[qscale][i]) >> shift) > INT_MAX){
00121 shift++;
00122 }
00123 }
00124 }
00125 if(shift){
00126 av_log(NULL, AV_LOG_INFO, "Warning, QMAT_SHIFT is larger than %d, overflows possible\n", QMAT_SHIFT - shift);
00127 }
00128 }
00129
00130 static inline void update_qscale(MpegEncContext *s){
00131 s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
00132 s->qscale= av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
00133
00134 s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
00135 }
00136
00137 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){
00138 int i;
00139
00140 if(matrix){
00141 put_bits(pb, 1, 1);
00142 for(i=0;i<64;i++) {
00143 put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
00144 }
00145 }else
00146 put_bits(pb, 1, 0);
00147 }
00148
00149 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
00150 int i;
00151
00152 dst->pict_type = src->pict_type;
00153 dst->quality = src->quality;
00154 dst->coded_picture_number = src->coded_picture_number;
00155 dst->display_picture_number = src->display_picture_number;
00156
00157 dst->pts = src->pts;
00158 dst->interlaced_frame = src->interlaced_frame;
00159 dst->top_field_first = src->top_field_first;
00160
00161 if(s->avctx->me_threshold){
00162 if(!src->motion_val[0])
00163 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
00164 if(!src->mb_type)
00165 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
00166 if(!src->ref_index[0])
00167 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
00168 if(src->motion_subsample_log2 != dst->motion_subsample_log2)
00169 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
00170 src->motion_subsample_log2, dst->motion_subsample_log2);
00171
00172 memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
00173
00174 for(i=0; i<2; i++){
00175 int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
00176 int height= ((16*s->mb_height)>>src->motion_subsample_log2);
00177
00178 if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
00179 memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
00180 }
00181 if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
00182 memcpy(dst->ref_index[i], src->ref_index[i], s->b8_stride*2*s->mb_height*sizeof(int8_t));
00183 }
00184 }
00185 }
00186 }
00187
00188 static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
00189 #define COPY(a) dst->a= src->a
00190 COPY(pict_type);
00191 COPY(current_picture);
00192 COPY(f_code);
00193 COPY(b_code);
00194 COPY(qscale);
00195 COPY(lambda);
00196 COPY(lambda2);
00197 COPY(picture_in_gop_number);
00198 COPY(gop_picture_number);
00199 COPY(frame_pred_frame_dct);
00200 COPY(progressive_frame);
00201 COPY(partitioned_frame);
00202 #undef COPY
00203 }
00204
00209 static void MPV_encode_defaults(MpegEncContext *s){
00210 int i;
00211 MPV_common_defaults(s);
00212
00213 for(i=-16; i<16; i++){
00214 default_fcode_tab[i + MAX_MV]= 1;
00215 }
00216 s->me.mv_penalty= default_mv_penalty;
00217 s->fcode_tab= default_fcode_tab;
00218 }
00219
00220
00221 av_cold int MPV_encode_init(AVCodecContext *avctx)
00222 {
00223 MpegEncContext *s = avctx->priv_data;
00224 int i;
00225 int chroma_h_shift, chroma_v_shift;
00226
00227 MPV_encode_defaults(s);
00228
00229 switch (avctx->codec_id) {
00230 case CODEC_ID_MPEG2VIDEO:
00231 if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P){
00232 av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n");
00233 return -1;
00234 }
00235 break;
00236 case CODEC_ID_LJPEG:
00237 case CODEC_ID_MJPEG:
00238 if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && avctx->pix_fmt != PIX_FMT_RGB32 &&
00239 ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL)){
00240 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
00241 return -1;
00242 }
00243 break;
00244 default:
00245 if(avctx->pix_fmt != PIX_FMT_YUV420P){
00246 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
00247 return -1;
00248 }
00249 }
00250
00251 switch (avctx->pix_fmt) {
00252 case PIX_FMT_YUVJ422P:
00253 case PIX_FMT_YUV422P:
00254 s->chroma_format = CHROMA_422;
00255 break;
00256 case PIX_FMT_YUVJ420P:
00257 case PIX_FMT_YUV420P:
00258 default:
00259 s->chroma_format = CHROMA_420;
00260 break;
00261 }
00262
00263 s->bit_rate = avctx->bit_rate;
00264 s->width = avctx->width;
00265 s->height = avctx->height;
00266 if(avctx->gop_size > 600 && avctx->strict_std_compliance>FF_COMPLIANCE_EXPERIMENTAL){
00267 av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
00268 avctx->gop_size=600;
00269 }
00270 s->gop_size = avctx->gop_size;
00271 s->avctx = avctx;
00272 s->flags= avctx->flags;
00273 s->flags2= avctx->flags2;
00274 s->max_b_frames= avctx->max_b_frames;
00275 s->codec_id= avctx->codec->id;
00276 s->luma_elim_threshold = avctx->luma_elim_threshold;
00277 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
00278 s->strict_std_compliance= avctx->strict_std_compliance;
00279 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
00280 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
00281 s->mpeg_quant= avctx->mpeg_quant;
00282 s->rtp_mode= !!avctx->rtp_payload_size;
00283 s->intra_dc_precision= avctx->intra_dc_precision;
00284 s->user_specified_pts = AV_NOPTS_VALUE;
00285
00286 if (s->gop_size <= 1) {
00287 s->intra_only = 1;
00288 s->gop_size = 12;
00289 } else {
00290 s->intra_only = 0;
00291 }
00292
00293 s->me_method = avctx->me_method;
00294
00295
00296 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
00297
00298 s->adaptive_quant= ( s->avctx->lumi_masking
00299 || s->avctx->dark_masking
00300 || s->avctx->temporal_cplx_masking
00301 || s->avctx->spatial_cplx_masking
00302 || s->avctx->p_masking
00303 || s->avctx->border_masking
00304 || (s->flags&CODEC_FLAG_QP_RD))
00305 && !s->fixed_qscale;
00306
00307 s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
00308 s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
00309 s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
00310 s->intra_vlc_format= !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
00311 s->q_scale_type= !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
00312
00313 if(avctx->rc_max_rate && !avctx->rc_buffer_size){
00314 av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
00315 return -1;
00316 }
00317
00318 if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
00319 av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
00320 }
00321
00322 if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){
00323 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
00324 return -1;
00325 }
00326
00327 if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){
00328 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
00329 return -1;
00330 }
00331
00332 if(avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate){
00333 av_log(avctx, AV_LOG_INFO, "impossible bitrate constraints, this will fail\n");
00334 }
00335
00336 if(avctx->rc_buffer_size && avctx->bit_rate*av_q2d(avctx->time_base) > avctx->rc_buffer_size){
00337 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
00338 return -1;
00339 }
00340
00341 if(avctx->bit_rate*av_q2d(avctx->time_base) > avctx->bit_rate_tolerance){
00342 av_log(avctx, AV_LOG_ERROR, "bitrate tolerance too small for bitrate\n");
00343 return -1;
00344 }
00345
00346 if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate
00347 && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO)
00348 && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){
00349
00350 av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n");
00351 }
00352
00353 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
00354 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){
00355 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
00356 return -1;
00357 }
00358
00359 if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
00360 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n");
00361 return -1;
00362 }
00363
00364 if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
00365 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
00366 return -1;
00367 }
00368
00369 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
00370 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
00371 return -1;
00372 }
00373
00374 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
00375 av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
00376 return -1;
00377 }
00378
00379 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
00380 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
00381 return -1;
00382 }
00383
00384 if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN))
00385 && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){
00386 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
00387 return -1;
00388 }
00389
00390 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){
00391 av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n");
00392 return -1;
00393 }
00394
00395 if((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis){
00396 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
00397 return -1;
00398 }
00399
00400 if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
00401 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
00402 return -1;
00403 }
00404
00405 if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
00406 av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, set threshold to 1000000000\n");
00407 return -1;
00408 }
00409
00410 if((s->flags2 & CODEC_FLAG2_INTRA_VLC) && s->codec_id != CODEC_ID_MPEG2VIDEO){
00411 av_log(avctx, AV_LOG_ERROR, "intra vlc table not supported by codec\n");
00412 return -1;
00413 }
00414
00415 if(s->flags & CODEC_FLAG_LOW_DELAY){
00416 if (s->codec_id != CODEC_ID_MPEG2VIDEO){
00417 av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n");
00418 return -1;
00419 }
00420 if (s->max_b_frames != 0){
00421 av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n");
00422 return -1;
00423 }
00424 }
00425
00426 if(s->q_scale_type == 1){
00427 if(s->codec_id != CODEC_ID_MPEG2VIDEO){
00428 av_log(avctx, AV_LOG_ERROR, "non linear quant is only available for mpeg2\n");
00429 return -1;
00430 }
00431 if(avctx->qmax > 12){
00432 av_log(avctx, AV_LOG_ERROR, "non linear quant only supports qmax <= 12 currently\n");
00433 return -1;
00434 }
00435 }
00436
00437 if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4
00438 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO
00439 && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){
00440 av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n");
00441 return -1;
00442 }
00443
00444 if(s->avctx->thread_count > 1)
00445 s->rtp_mode= 1;
00446
00447 if(!avctx->time_base.den || !avctx->time_base.num){
00448 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
00449 return -1;
00450 }
00451
00452 i= (INT_MAX/2+128)>>8;
00453 if(avctx->me_threshold >= i){
00454 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1);
00455 return -1;
00456 }
00457 if(avctx->mb_threshold >= i){
00458 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1);
00459 return -1;
00460 }
00461
00462 if(avctx->b_frame_strategy && (avctx->flags&CODEC_FLAG_PASS2)){
00463 av_log(avctx, AV_LOG_INFO, "notice: b_frame_strategy only affects the first pass\n");
00464 avctx->b_frame_strategy = 0;
00465 }
00466
00467 i= av_gcd(avctx->time_base.den, avctx->time_base.num);
00468 if(i > 1){
00469 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
00470 avctx->time_base.den /= i;
00471 avctx->time_base.num /= i;
00472
00473 }
00474
00475 if(s->codec_id==CODEC_ID_MJPEG){
00476 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1);
00477 s->inter_quant_bias= 0;
00478 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
00479 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3);
00480 s->inter_quant_bias= 0;
00481 }else{
00482 s->intra_quant_bias=0;
00483 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2));
00484 }
00485
00486 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00487 s->intra_quant_bias= avctx->intra_quant_bias;
00488 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
00489 s->inter_quant_bias= avctx->inter_quant_bias;
00490
00491 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
00492
00493 if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){
00494 av_log(avctx, AV_LOG_ERROR, "timebase not supported by mpeg 4 standard\n");
00495 return -1;
00496 }
00497 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
00498
00499 switch(avctx->codec->id) {
00500 case CODEC_ID_MPEG1VIDEO:
00501 s->out_format = FMT_MPEG1;
00502 s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
00503 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
00504 break;
00505 case CODEC_ID_MPEG2VIDEO:
00506 s->out_format = FMT_MPEG1;
00507 s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
00508 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
00509 s->rtp_mode= 1;
00510 break;
00511 case CODEC_ID_LJPEG:
00512 case CODEC_ID_MJPEG:
00513 s->out_format = FMT_MJPEG;
00514 s->intra_only = 1;
00515 s->mjpeg_vsample[0] = 2;
00516 s->mjpeg_vsample[1] = 2>>chroma_v_shift;
00517 s->mjpeg_vsample[2] = 2>>chroma_v_shift;
00518 s->mjpeg_hsample[0] = 2;
00519 s->mjpeg_hsample[1] = 2>>chroma_h_shift;
00520 s->mjpeg_hsample[2] = 2>>chroma_h_shift;
00521 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER)
00522 || ff_mjpeg_encode_init(s) < 0)
00523 return -1;
00524 avctx->delay=0;
00525 s->low_delay=1;
00526 break;
00527 case CODEC_ID_H261:
00528 if (!CONFIG_H261_ENCODER) return -1;
00529 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
00530 av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height);
00531 return -1;
00532 }
00533 s->out_format = FMT_H261;
00534 avctx->delay=0;
00535 s->low_delay=1;
00536 break;
00537 case CODEC_ID_H263:
00538 if (!CONFIG_H263_ENCODER) return -1;
00539 if (h263_get_picture_format(s->width, s->height) == 7) {
00540 av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height);
00541 return -1;
00542 }
00543 s->out_format = FMT_H263;
00544 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
00545 avctx->delay=0;
00546 s->low_delay=1;
00547 break;
00548 case CODEC_ID_H263P:
00549 s->out_format = FMT_H263;
00550 s->h263_plus = 1;
00551
00552 s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
00553 s->h263_aic= (avctx->flags & CODEC_FLAG_AC_PRED) ? 1:0;
00554 s->modified_quant= s->h263_aic;
00555 s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
00556 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
00557 s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
00558 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
00559 s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
00560
00561
00562
00563 avctx->delay=0;
00564 s->low_delay=1;
00565 break;
00566 case CODEC_ID_FLV1:
00567 s->out_format = FMT_H263;
00568 s->h263_flv = 2;
00569 s->unrestricted_mv = 1;
00570 s->rtp_mode=0;
00571 avctx->delay=0;
00572 s->low_delay=1;
00573 break;
00574 case CODEC_ID_RV10:
00575 s->out_format = FMT_H263;
00576 avctx->delay=0;
00577 s->low_delay=1;
00578 break;
00579 case CODEC_ID_RV20:
00580 s->out_format = FMT_H263;
00581 avctx->delay=0;
00582 s->low_delay=1;
00583 s->modified_quant=1;
00584 s->h263_aic=1;
00585 s->h263_plus=1;
00586 s->loop_filter=1;
00587 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
00588 break;
00589 case CODEC_ID_MPEG4:
00590 s->out_format = FMT_H263;
00591 s->h263_pred = 1;
00592 s->unrestricted_mv = 1;
00593 s->low_delay= s->max_b_frames ? 0 : 1;
00594 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
00595 break;
00596 case CODEC_ID_MSMPEG4V1:
00597 s->out_format = FMT_H263;
00598 s->h263_msmpeg4 = 1;
00599 s->h263_pred = 1;
00600 s->unrestricted_mv = 1;
00601 s->msmpeg4_version= 1;
00602 avctx->delay=0;
00603 s->low_delay=1;
00604 break;
00605 case CODEC_ID_MSMPEG4V2:
00606 s->out_format = FMT_H263;
00607 s->h263_msmpeg4 = 1;
00608 s->h263_pred = 1;
00609 s->unrestricted_mv = 1;
00610 s->msmpeg4_version= 2;
00611 avctx->delay=0;
00612 s->low_delay=1;
00613 break;
00614 case CODEC_ID_MSMPEG4V3:
00615 s->out_format = FMT_H263;
00616 s->h263_msmpeg4 = 1;
00617 s->h263_pred = 1;
00618 s->unrestricted_mv = 1;
00619 s->msmpeg4_version= 3;
00620 s->flipflop_rounding=1;
00621 avctx->delay=0;
00622 s->low_delay=1;
00623 break;
00624 case CODEC_ID_WMV1:
00625 s->out_format = FMT_H263;
00626 s->h263_msmpeg4 = 1;
00627 s->h263_pred = 1;
00628 s->unrestricted_mv = 1;
00629 s->msmpeg4_version= 4;
00630 s->flipflop_rounding=1;
00631 avctx->delay=0;
00632 s->low_delay=1;
00633 break;
00634 case CODEC_ID_WMV2:
00635 s->out_format = FMT_H263;
00636 s->h263_msmpeg4 = 1;
00637 s->h263_pred = 1;
00638 s->unrestricted_mv = 1;
00639 s->msmpeg4_version= 5;
00640 s->flipflop_rounding=1;
00641 avctx->delay=0;
00642 s->low_delay=1;
00643 break;
00644 default:
00645 return -1;
00646 }
00647
00648 avctx->has_b_frames= !s->low_delay;
00649
00650 s->encoding = 1;
00651
00652
00653 if (MPV_common_init(s) < 0)
00654 return -1;
00655
00656 if(!s->dct_quantize)
00657 s->dct_quantize = dct_quantize_c;
00658 if(!s->denoise_dct)
00659 s->denoise_dct = denoise_dct_c;
00660 s->fast_dct_quantize = s->dct_quantize;
00661 if(avctx->trellis)
00662 s->dct_quantize = dct_quantize_trellis_c;
00663
00664 if((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
00665 s->chroma_qscale_table= ff_h263_chroma_qscale_table;
00666 s->progressive_frame=
00667 s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN));
00668 s->quant_precision=5;
00669
00670 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
00671 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
00672
00673 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
00674 ff_h261_encode_init(s);
00675 if (CONFIG_ANY_H263_ENCODER && s->out_format == FMT_H263)
00676 h263_encode_init(s);
00677 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
00678 ff_msmpeg4_encode_init(s);
00679 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
00680 && s->out_format == FMT_MPEG1)
00681 ff_mpeg1_encode_init(s);
00682
00683
00684 for(i=0;i<64;i++) {
00685 int j= s->dsp.idct_permutation[i];
00686 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
00687 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
00688 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
00689 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
00690 s->intra_matrix[j] =
00691 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00692 }else
00693 {
00694 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
00695 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00696 }
00697 if(s->avctx->intra_matrix)
00698 s->intra_matrix[j] = s->avctx->intra_matrix[i];
00699 if(s->avctx->inter_matrix)
00700 s->inter_matrix[j] = s->avctx->inter_matrix[i];
00701 }
00702
00703
00704
00705 if (s->out_format != FMT_MJPEG) {
00706 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
00707 s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1);
00708 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
00709 s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0);
00710 }
00711
00712 if(ff_rate_control_init(s) < 0)
00713 return -1;
00714
00715 return 0;
00716 }
00717
00718 av_cold int MPV_encode_end(AVCodecContext *avctx)
00719 {
00720 MpegEncContext *s = avctx->priv_data;
00721
00722 ff_rate_control_uninit(s);
00723
00724 MPV_common_end(s);
00725 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) && s->out_format == FMT_MJPEG)
00726 ff_mjpeg_encode_close(s);
00727
00728 av_freep(&avctx->extradata);
00729
00730 return 0;
00731 }
00732
00733 static int get_sae(uint8_t *src, int ref, int stride){
00734 int x,y;
00735 int acc=0;
00736
00737 for(y=0; y<16; y++){
00738 for(x=0; x<16; x++){
00739 acc+= FFABS(src[x+y*stride] - ref);
00740 }
00741 }
00742
00743 return acc;
00744 }
00745
00746 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
00747 int x, y, w, h;
00748 int acc=0;
00749
00750 w= s->width &~15;
00751 h= s->height&~15;
00752
00753 for(y=0; y<h; y+=16){
00754 for(x=0; x<w; x+=16){
00755 int offset= x + y*stride;
00756 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
00757 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
00758 int sae = get_sae(src + offset, mean, stride);
00759
00760 acc+= sae + 500 < sad;
00761 }
00762 }
00763 return acc;
00764 }
00765
00766
00767 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
00768 AVFrame *pic=NULL;
00769 int64_t pts;
00770 int i;
00771 const int encoding_delay= s->max_b_frames;
00772 int direct=1;
00773
00774 if(pic_arg){
00775 pts= pic_arg->pts;
00776 pic_arg->display_picture_number= s->input_picture_number++;
00777
00778 if(pts != AV_NOPTS_VALUE){
00779 if(s->user_specified_pts != AV_NOPTS_VALUE){
00780 int64_t time= pts;
00781 int64_t last= s->user_specified_pts;
00782
00783 if(time <= last){
00784 av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%"PRId64", last=%"PRId64"\n", pts, s->user_specified_pts);
00785 return -1;
00786 }
00787 }
00788 s->user_specified_pts= pts;
00789 }else{
00790 if(s->user_specified_pts != AV_NOPTS_VALUE){
00791 s->user_specified_pts=
00792 pts= s->user_specified_pts + 1;
00793 av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n", pts);
00794 }else{
00795 pts= pic_arg->display_picture_number;
00796 }
00797 }
00798 }
00799
00800 if(pic_arg){
00801 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
00802 if(pic_arg->linesize[0] != s->linesize) direct=0;
00803 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
00804 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
00805
00806
00807
00808 if(direct){
00809 i= ff_find_unused_picture(s, 1);
00810
00811 pic= (AVFrame*)&s->picture[i];
00812 pic->reference= 3;
00813
00814 for(i=0; i<4; i++){
00815 pic->data[i]= pic_arg->data[i];
00816 pic->linesize[i]= pic_arg->linesize[i];
00817 }
00818 alloc_picture(s, (Picture*)pic, 1);
00819 }else{
00820 i= ff_find_unused_picture(s, 0);
00821
00822 pic= (AVFrame*)&s->picture[i];
00823 pic->reference= 3;
00824
00825 alloc_picture(s, (Picture*)pic, 0);
00826
00827 if( pic->data[0] + INPLACE_OFFSET == pic_arg->data[0]
00828 && pic->data[1] + INPLACE_OFFSET == pic_arg->data[1]
00829 && pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]){
00830
00831 }else{
00832 int h_chroma_shift, v_chroma_shift;
00833 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
00834
00835 for(i=0; i<3; i++){
00836 int src_stride= pic_arg->linesize[i];
00837 int dst_stride= i ? s->uvlinesize : s->linesize;
00838 int h_shift= i ? h_chroma_shift : 0;
00839 int v_shift= i ? v_chroma_shift : 0;
00840 int w= s->width >>h_shift;
00841 int h= s->height>>v_shift;
00842 uint8_t *src= pic_arg->data[i];
00843 uint8_t *dst= pic->data[i];
00844
00845 if(!s->avctx->rc_buffer_size)
00846 dst +=INPLACE_OFFSET;
00847
00848 if(src_stride==dst_stride)
00849 memcpy(dst, src, src_stride*h);
00850 else{
00851 while(h--){
00852 memcpy(dst, src, w);
00853 dst += dst_stride;
00854 src += src_stride;
00855 }
00856 }
00857 }
00858 }
00859 }
00860 copy_picture_attributes(s, pic, pic_arg);
00861 pic->pts= pts;
00862 }
00863
00864
00865 for(i=1; i<MAX_PICTURE_COUNT ; i++)
00866 s->input_picture[i-1]= s->input_picture[i];
00867
00868 s->input_picture[encoding_delay]= (Picture*)pic;
00869
00870 return 0;
00871 }
00872
00873 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
00874 int x, y, plane;
00875 int score=0;
00876 int64_t score64=0;
00877
00878 for(plane=0; plane<3; plane++){
00879 const int stride= p->linesize[plane];
00880 const int bw= plane ? 1 : 2;
00881 for(y=0; y<s->mb_height*bw; y++){
00882 for(x=0; x<s->mb_width*bw; x++){
00883 int off= p->type == FF_BUFFER_TYPE_SHARED ? 0: 16;
00884 int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride)+off, ref->data[plane] + 8*(x + y*stride), stride, 8);
00885
00886 switch(s->avctx->frame_skip_exp){
00887 case 0: score= FFMAX(score, v); break;
00888 case 1: score+= FFABS(v);break;
00889 case 2: score+= v*v;break;
00890 case 3: score64+= FFABS(v*v*(int64_t)v);break;
00891 case 4: score64+= v*v*(int64_t)(v*v);break;
00892 }
00893 }
00894 }
00895 }
00896
00897 if(score) score64= score;
00898
00899 if(score64 < s->avctx->frame_skip_threshold)
00900 return 1;
00901 if(score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda)>>8))
00902 return 1;
00903 return 0;
00904 }
00905
00906 static int estimate_best_b_count(MpegEncContext *s){
00907 AVCodec *codec= avcodec_find_encoder(s->avctx->codec_id);
00908 AVCodecContext *c= avcodec_alloc_context();
00909 AVFrame input[FF_MAX_B_FRAMES+2];
00910 const int scale= s->avctx->brd_scale;
00911 int i, j, out_size, p_lambda, b_lambda, lambda2;
00912 int outbuf_size= s->width * s->height;
00913 uint8_t *outbuf= av_malloc(outbuf_size);
00914 int64_t best_rd= INT64_MAX;
00915 int best_b_count= -1;
00916
00917 assert(scale>=0 && scale <=3);
00918
00919
00920 p_lambda= s->last_lambda_for[FF_P_TYPE];
00921 b_lambda= s->last_lambda_for[FF_B_TYPE];
00922 if(!b_lambda) b_lambda= p_lambda;
00923 lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
00924
00925 c->width = s->width >> scale;
00926 c->height= s->height>> scale;
00927 c->flags= CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR | CODEC_FLAG_INPUT_PRESERVED ;
00928 c->flags|= s->avctx->flags & CODEC_FLAG_QPEL;
00929 c->mb_decision= s->avctx->mb_decision;
00930 c->me_cmp= s->avctx->me_cmp;
00931 c->mb_cmp= s->avctx->mb_cmp;
00932 c->me_sub_cmp= s->avctx->me_sub_cmp;
00933 c->pix_fmt = PIX_FMT_YUV420P;
00934 c->time_base= s->avctx->time_base;
00935 c->max_b_frames= s->max_b_frames;
00936
00937 if (avcodec_open(c, codec) < 0)
00938 return -1;
00939
00940 for(i=0; i<s->max_b_frames+2; i++){
00941 int ysize= c->width*c->height;
00942 int csize= (c->width/2)*(c->height/2);
00943 Picture pre_input, *pre_input_ptr= i ? s->input_picture[i-1] : s->next_picture_ptr;
00944
00945 avcodec_get_frame_defaults(&input[i]);
00946 input[i].data[0]= av_malloc(ysize + 2*csize);
00947 input[i].data[1]= input[i].data[0] + ysize;
00948 input[i].data[2]= input[i].data[1] + csize;
00949 input[i].linesize[0]= c->width;
00950 input[i].linesize[1]=
00951 input[i].linesize[2]= c->width/2;
00952
00953 if(pre_input_ptr && (!i || s->input_picture[i-1])) {
00954 pre_input= *pre_input_ptr;
00955
00956 if(pre_input.type != FF_BUFFER_TYPE_SHARED && i) {
00957 pre_input.data[0]+=INPLACE_OFFSET;
00958 pre_input.data[1]+=INPLACE_OFFSET;
00959 pre_input.data[2]+=INPLACE_OFFSET;
00960 }
00961
00962 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height);
00963 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1);
00964 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1);
00965 }
00966 }
00967
00968 for(j=0; j<s->max_b_frames+1; j++){
00969 int64_t rd=0;
00970
00971 if(!s->input_picture[j])
00972 break;
00973
00974 c->error[0]= c->error[1]= c->error[2]= 0;
00975
00976 input[0].pict_type= FF_I_TYPE;
00977 input[0].quality= 1 * FF_QP2LAMBDA;
00978 out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
00979
00980
00981 for(i=0; i<s->max_b_frames+1; i++){
00982 int is_p= i % (j+1) == j || i==s->max_b_frames;
00983
00984 input[i+1].pict_type= is_p ? FF_P_TYPE : FF_B_TYPE;
00985 input[i+1].quality= is_p ? p_lambda : b_lambda;
00986 out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
00987 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
00988 }
00989
00990
00991 while(out_size){
00992 out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
00993 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
00994 }
00995
00996 rd += c->error[0] + c->error[1] + c->error[2];
00997
00998 if(rd < best_rd){
00999 best_rd= rd;
01000 best_b_count= j;
01001 }
01002 }
01003
01004 av_freep(&outbuf);
01005 avcodec_close(c);
01006 av_freep(&c);
01007
01008 for(i=0; i<s->max_b_frames+2; i++){
01009 av_freep(&input[i].data[0]);
01010 }
01011
01012 return best_b_count;
01013 }
01014
01015 static void select_input_picture(MpegEncContext *s){
01016 int i;
01017
01018 for(i=1; i<MAX_PICTURE_COUNT; i++)
01019 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
01020 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
01021
01022
01023 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
01024 if( s->next_picture_ptr==NULL || s->intra_only){
01025 s->reordered_input_picture[0]= s->input_picture[0];
01026 s->reordered_input_picture[0]->pict_type= FF_I_TYPE;
01027 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
01028 }else{
01029 int b_frames;
01030
01031 if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
01032 if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
01033
01034
01035
01036 if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
01037 for(i=0; i<4; i++)
01038 s->input_picture[0]->data[i]= NULL;
01039 s->input_picture[0]->type= 0;
01040 }else{
01041 assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER
01042 || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
01043
01044 s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]);
01045 }
01046
01047 emms_c();
01048 ff_vbv_update(s, 0);
01049
01050 goto no_output_pic;
01051 }
01052 }
01053
01054 if(s->flags&CODEC_FLAG_PASS2){
01055 for(i=0; i<s->max_b_frames+1; i++){
01056 int pict_num= s->input_picture[0]->display_picture_number + i;
01057
01058 if(pict_num >= s->rc_context.num_entries)
01059 break;
01060 if(!s->input_picture[i]){
01061 s->rc_context.entry[pict_num-1].new_pict_type = FF_P_TYPE;
01062 break;
01063 }
01064
01065 s->input_picture[i]->pict_type=
01066 s->rc_context.entry[pict_num].new_pict_type;
01067 }
01068 }
01069
01070 if(s->avctx->b_frame_strategy==0){
01071 b_frames= s->max_b_frames;
01072 while(b_frames && !s->input_picture[b_frames]) b_frames--;
01073 }else if(s->avctx->b_frame_strategy==1){
01074 for(i=1; i<s->max_b_frames+1; i++){
01075 if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
01076 s->input_picture[i]->b_frame_score=
01077 get_intra_count(s, s->input_picture[i ]->data[0],
01078 s->input_picture[i-1]->data[0], s->linesize) + 1;
01079 }
01080 }
01081 for(i=0; i<s->max_b_frames+1; i++){
01082 if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/s->avctx->b_sensitivity) break;
01083 }
01084
01085 b_frames= FFMAX(0, i-1);
01086
01087
01088 for(i=0; i<b_frames+1; i++){
01089 s->input_picture[i]->b_frame_score=0;
01090 }
01091 }else if(s->avctx->b_frame_strategy==2){
01092 b_frames= estimate_best_b_count(s);
01093 }else{
01094 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
01095 b_frames=0;
01096 }
01097
01098 emms_c();
01099
01100
01101
01102
01103 for(i= b_frames - 1; i>=0; i--){
01104 int type= s->input_picture[i]->pict_type;
01105 if(type && type != FF_B_TYPE)
01106 b_frames= i;
01107 }
01108 if(s->input_picture[b_frames]->pict_type == FF_B_TYPE && b_frames == s->max_b_frames){
01109 av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
01110 }
01111
01112 if(s->picture_in_gop_number + b_frames >= s->gop_size){
01113 if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){
01114 b_frames= s->gop_size - s->picture_in_gop_number - 1;
01115 }else{
01116 if(s->flags & CODEC_FLAG_CLOSED_GOP)
01117 b_frames=0;
01118 s->input_picture[b_frames]->pict_type= FF_I_TYPE;
01119 }
01120 }
01121
01122 if( (s->flags & CODEC_FLAG_CLOSED_GOP)
01123 && b_frames
01124 && s->input_picture[b_frames]->pict_type== FF_I_TYPE)
01125 b_frames--;
01126
01127 s->reordered_input_picture[0]= s->input_picture[b_frames];
01128 if(s->reordered_input_picture[0]->pict_type != FF_I_TYPE)
01129 s->reordered_input_picture[0]->pict_type= FF_P_TYPE;
01130 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
01131 for(i=0; i<b_frames; i++){
01132 s->reordered_input_picture[i+1]= s->input_picture[i];
01133 s->reordered_input_picture[i+1]->pict_type= FF_B_TYPE;
01134 s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
01135 }
01136 }
01137 }
01138 no_output_pic:
01139 if(s->reordered_input_picture[0]){
01140 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=FF_B_TYPE ? 3 : 0;
01141
01142 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
01143
01144 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){
01145
01146
01147 int i= ff_find_unused_picture(s, 0);
01148 Picture *pic= &s->picture[i];
01149
01150 pic->reference = s->reordered_input_picture[0]->reference;
01151 alloc_picture(s, pic, 0);
01152
01153
01154 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_INTERNAL)
01155 s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
01156 for(i=0; i<4; i++)
01157 s->reordered_input_picture[0]->data[i]= NULL;
01158 s->reordered_input_picture[0]->type= 0;
01159
01160 copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
01161
01162 s->current_picture_ptr= pic;
01163 }else{
01164
01165
01166 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
01167 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
01168
01169 s->current_picture_ptr= s->reordered_input_picture[0];
01170 for(i=0; i<4; i++){
01171 s->new_picture.data[i]+= INPLACE_OFFSET;
01172 }
01173 }
01174 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01175
01176 s->picture_number= s->new_picture.display_picture_number;
01177
01178 }else{
01179 memset(&s->new_picture, 0, sizeof(Picture));
01180 }
01181 }
01182
01183 int MPV_encode_picture(AVCodecContext *avctx,
01184 unsigned char *buf, int buf_size, void *data)
01185 {
01186 MpegEncContext *s = avctx->priv_data;
01187 AVFrame *pic_arg = data;
01188 int i, stuffing_count;
01189
01190 for(i=0; i<avctx->thread_count; i++){
01191 int start_y= s->thread_context[i]->start_mb_y;
01192 int end_y= s->thread_context[i]-> end_mb_y;
01193 int h= s->mb_height;
01194 uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h);
01195 uint8_t *end = buf + (size_t)(((int64_t) buf_size)* end_y/h);
01196
01197 init_put_bits(&s->thread_context[i]->pb, start, end - start);
01198 }
01199
01200 s->picture_in_gop_number++;
01201
01202 if(load_input_picture(s, pic_arg) < 0)
01203 return -1;
01204
01205 select_input_picture(s);
01206
01207
01208 if(s->new_picture.data[0]){
01209 s->pict_type= s->new_picture.pict_type;
01210
01211
01212 MPV_frame_start(s, avctx);
01213 vbv_retry:
01214 if (encode_picture(s, s->picture_number) < 0)
01215 return -1;
01216
01217 avctx->real_pict_num = s->picture_number;
01218 avctx->header_bits = s->header_bits;
01219 avctx->mv_bits = s->mv_bits;
01220 avctx->misc_bits = s->misc_bits;
01221 avctx->i_tex_bits = s->i_tex_bits;
01222 avctx->p_tex_bits = s->p_tex_bits;
01223 avctx->i_count = s->i_count;
01224 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
01225 avctx->skip_count = s->skip_count;
01226
01227 MPV_frame_end(s);
01228
01229 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
01230 ff_mjpeg_encode_picture_trailer(s);
01231
01232 if(avctx->rc_buffer_size){
01233 RateControlContext *rcc= &s->rc_context;
01234 int max_size= rcc->buffer_index * avctx->rc_max_available_vbv_use;
01235
01236 if(put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax){
01237 s->next_lambda= FFMAX(s->lambda+1, s->lambda*(s->qscale+1) / s->qscale);
01238 if(s->adaptive_quant){
01239 int i;
01240 for(i=0; i<s->mb_height*s->mb_stride; i++)
01241 s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
01242 }
01243 s->mb_skipped = 0;
01244 if(s->pict_type==FF_P_TYPE){
01245 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
01246 s->no_rounding ^= 1;
01247 }
01248 if(s->pict_type!=FF_B_TYPE){
01249 s->time_base= s->last_time_base;
01250 s->last_non_b_time= s->time - s->pp_time;
01251 }
01252
01253 for(i=0; i<avctx->thread_count; i++){
01254 PutBitContext *pb= &s->thread_context[i]->pb;
01255 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
01256 }
01257 goto vbv_retry;
01258 }
01259
01260 assert(s->avctx->rc_max_rate);
01261 }
01262
01263 if(s->flags&CODEC_FLAG_PASS1)
01264 ff_write_pass1_stats(s);
01265
01266 for(i=0; i<4; i++){
01267 s->current_picture_ptr->error[i]= s->current_picture.error[i];
01268 avctx->error[i] += s->current_picture_ptr->error[i];
01269 }
01270
01271 if(s->flags&CODEC_FLAG_PASS1)
01272 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb));
01273 flush_put_bits(&s->pb);
01274 s->frame_bits = put_bits_count(&s->pb);
01275
01276 stuffing_count= ff_vbv_update(s, s->frame_bits);
01277 if(stuffing_count){
01278 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){
01279 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
01280 return -1;
01281 }
01282
01283 switch(s->codec_id){
01284 case CODEC_ID_MPEG1VIDEO:
01285 case CODEC_ID_MPEG2VIDEO:
01286 while(stuffing_count--){
01287 put_bits(&s->pb, 8, 0);
01288 }
01289 break;
01290 case CODEC_ID_MPEG4:
01291 put_bits(&s->pb, 16, 0);
01292 put_bits(&s->pb, 16, 0x1C3);
01293 stuffing_count -= 4;
01294 while(stuffing_count--){
01295 put_bits(&s->pb, 8, 0xFF);
01296 }
01297 break;
01298 default:
01299 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
01300 }
01301 flush_put_bits(&s->pb);
01302 s->frame_bits = put_bits_count(&s->pb);
01303 }
01304
01305
01306 if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1
01307 && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){
01308 int vbv_delay;
01309
01310 assert(s->repeat_first_field==0);
01311
01312 vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate);
01313 assert(vbv_delay < 0xFFFF);
01314
01315 s->vbv_delay_ptr[0] &= 0xF8;
01316 s->vbv_delay_ptr[0] |= vbv_delay>>13;
01317 s->vbv_delay_ptr[1] = vbv_delay>>5;
01318 s->vbv_delay_ptr[2] &= 0x07;
01319 s->vbv_delay_ptr[2] |= vbv_delay<<3;
01320 }
01321 s->total_bits += s->frame_bits;
01322 avctx->frame_bits = s->frame_bits;
01323 }else{
01324 assert((pbBufPtr(&s->pb) == s->pb.buf));
01325 s->frame_bits=0;
01326 }
01327 assert((s->frame_bits&7)==0);
01328
01329 return s->frame_bits/8;
01330 }
01331
01332 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
01333 {
01334 static const char tab[64]=
01335 {3,2,2,1,1,1,1,1,
01336 1,1,1,1,1,1,1,1,
01337 1,1,1,1,1,1,1,1,
01338 0,0,0,0,0,0,0,0,
01339 0,0,0,0,0,0,0,0,
01340 0,0,0,0,0,0,0,0,
01341 0,0,0,0,0,0,0,0,
01342 0,0,0,0,0,0,0,0};
01343 int score=0;
01344 int run=0;
01345 int i;
01346 DCTELEM *block= s->block[n];
01347 const int last_index= s->block_last_index[n];
01348 int skip_dc;
01349
01350 if(threshold<0){
01351 skip_dc=0;
01352 threshold= -threshold;
01353 }else
01354 skip_dc=1;
01355
01356
01357 if(last_index<=skip_dc - 1) return;
01358
01359 for(i=0; i<=last_index; i++){
01360 const int j = s->intra_scantable.permutated[i];
01361 const int level = FFABS(block[j]);
01362 if(level==1){
01363 if(skip_dc && i==0) continue;
01364 score+= tab[run];
01365 run=0;
01366 }else if(level>1){
01367 return;
01368 }else{
01369 run++;
01370 }
01371 }
01372 if(score >= threshold) return;
01373 for(i=skip_dc; i<=last_index; i++){
01374 const int j = s->intra_scantable.permutated[i];
01375 block[j]=0;
01376 }
01377 if(block[0]) s->block_last_index[n]= 0;
01378 else s->block_last_index[n]= -1;
01379 }
01380
01381 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
01382 {
01383 int i;
01384 const int maxlevel= s->max_qcoeff;
01385 const int minlevel= s->min_qcoeff;
01386 int overflow=0;
01387
01388 if(s->mb_intra){
01389 i=1;
01390 }else
01391 i=0;
01392
01393 for(;i<=last_index; i++){
01394 const int j= s->intra_scantable.permutated[i];
01395 int level = block[j];
01396
01397 if (level>maxlevel){
01398 level=maxlevel;
01399 overflow++;
01400 }else if(level<minlevel){
01401 level=minlevel;
01402 overflow++;
01403 }
01404
01405 block[j]= level;
01406 }
01407
01408 if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
01409 av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel);
01410 }
01411
01412 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride){
01413 int x, y;
01414
01415 for(y=0; y<8; y++){
01416 for(x=0; x<8; x++){
01417 int x2, y2;
01418 int sum=0;
01419 int sqr=0;
01420 int count=0;
01421
01422 for(y2= FFMAX(y-1, 0); y2 < FFMIN(8, y+2); y2++){
01423 for(x2= FFMAX(x-1, 0); x2 < FFMIN(8, x+2); x2++){
01424 int v= ptr[x2 + y2*stride];
01425 sum += v;
01426 sqr += v*v;
01427 count++;
01428 }
01429 }
01430 weight[x + 8*y]= (36*ff_sqrt(count*sqr - sum*sum)) / count;
01431 }
01432 }
01433 }
01434
01435 static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_count)
01436 {
01437 int16_t weight[8][64];
01438 DCTELEM orig[8][64];
01439 const int mb_x= s->mb_x;
01440 const int mb_y= s->mb_y;
01441 int i;
01442 int skip_dct[8];
01443 int dct_offset = s->linesize*8;
01444 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01445 int wrap_y, wrap_c;
01446
01447 for(i=0; i<mb_block_count; i++) skip_dct[i]=s->skipdct;
01448
01449 if(s->adaptive_quant){
01450 const int last_qp= s->qscale;
01451 const int mb_xy= mb_x + mb_y*s->mb_stride;
01452
01453 s->lambda= s->lambda_table[mb_xy];
01454 update_qscale(s);
01455
01456 if(!(s->flags&CODEC_FLAG_QP_RD)){
01457 s->qscale= s->current_picture_ptr->qscale_table[mb_xy];
01458 s->dquant= s->qscale - last_qp;
01459
01460 if(s->out_format==FMT_H263){
01461 s->dquant= av_clip(s->dquant, -2, 2);
01462
01463 if(s->codec_id==CODEC_ID_MPEG4){
01464 if(!s->mb_intra){
01465 if(s->pict_type == FF_B_TYPE){
01466 if(s->dquant&1 || s->mv_dir&MV_DIRECT)
01467 s->dquant= 0;
01468 }
01469 if(s->mv_type==MV_TYPE_8X8)
01470 s->dquant=0;
01471 }
01472 }
01473 }
01474 }
01475 ff_set_qscale(s, last_qp + s->dquant);
01476 }else if(s->flags&CODEC_FLAG_QP_RD)
01477 ff_set_qscale(s, s->qscale + s->dquant);
01478
01479 wrap_y = s->linesize;
01480 wrap_c = s->uvlinesize;
01481 ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
01482 ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01483 ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01484
01485 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
01486 uint8_t *ebuf= s->edge_emu_buffer + 32;
01487 ff_emulated_edge_mc(ebuf , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width , s->height);
01488 ptr_y= ebuf;
01489 ff_emulated_edge_mc(ebuf+18*wrap_y , ptr_cb, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
01490 ptr_cb= ebuf+18*wrap_y;
01491 ff_emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
01492 ptr_cr= ebuf+18*wrap_y+8;
01493 }
01494
01495 if (s->mb_intra) {
01496 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
01497 int progressive_score, interlaced_score;
01498
01499 s->interlaced_dct=0;
01500 progressive_score= s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y, 8)
01501 +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y*8, NULL, wrap_y, 8) - 400;
01502
01503 if(progressive_score > 0){
01504 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y*2, 8)
01505 +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y , NULL, wrap_y*2, 8);
01506 if(progressive_score > interlaced_score){
01507 s->interlaced_dct=1;
01508
01509 dct_offset= wrap_y;
01510 wrap_y<<=1;
01511 if (s->chroma_format == CHROMA_422)
01512 wrap_c<<=1;
01513 }
01514 }
01515 }
01516
01517 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
01518 s->dsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
01519 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
01520 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
01521
01522 if(s->flags&CODEC_FLAG_GRAY){
01523 skip_dct[4]= 1;
01524 skip_dct[5]= 1;
01525 }else{
01526 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
01527 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
01528 if(!s->chroma_y_shift){
01529 s->dsp.get_pixels(s->block[6], ptr_cb + (dct_offset>>1), wrap_c);
01530 s->dsp.get_pixels(s->block[7], ptr_cr + (dct_offset>>1), wrap_c);
01531 }
01532 }
01533 }else{
01534 op_pixels_func (*op_pix)[4];
01535 qpel_mc_func (*op_qpix)[16];
01536 uint8_t *dest_y, *dest_cb, *dest_cr;
01537
01538 dest_y = s->dest[0];
01539 dest_cb = s->dest[1];
01540 dest_cr = s->dest[2];
01541
01542 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
01543 op_pix = s->dsp.put_pixels_tab;
01544 op_qpix= s->dsp.put_qpel_pixels_tab;
01545 }else{
01546 op_pix = s->dsp.put_no_rnd_pixels_tab;
01547 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
01548 }
01549
01550 if (s->mv_dir & MV_DIR_FORWARD) {
01551 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
01552 op_pix = s->dsp.avg_pixels_tab;
01553 op_qpix= s->dsp.avg_qpel_pixels_tab;
01554 }
01555 if (s->mv_dir & MV_DIR_BACKWARD) {
01556 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
01557 }
01558
01559 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
01560 int progressive_score, interlaced_score;
01561
01562 s->interlaced_dct=0;
01563 progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8)
01564 +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400;
01565
01566 if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400;
01567
01568 if(progressive_score>0){
01569 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8)
01570 +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8);
01571
01572 if(progressive_score > interlaced_score){
01573 s->interlaced_dct=1;
01574
01575 dct_offset= wrap_y;
01576 wrap_y<<=1;
01577 if (s->chroma_format == CHROMA_422)
01578 wrap_c<<=1;
01579 }
01580 }
01581 }
01582
01583 s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
01584 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
01585 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
01586 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
01587
01588 if(s->flags&CODEC_FLAG_GRAY){
01589 skip_dct[4]= 1;
01590 skip_dct[5]= 1;
01591 }else{
01592 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
01593 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
01594 if(!s->chroma_y_shift){
01595 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset>>1), dest_cb + (dct_offset>>1), wrap_c);
01596 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset>>1), dest_cr + (dct_offset>>1), wrap_c);
01597 }
01598 }
01599
01600 if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
01601
01602 if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1;
01603 if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1;
01604 if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1;
01605 if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1;
01606 if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1;
01607 if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1;
01608 if(!s->chroma_y_shift){
01609 if(s->dsp.sad[1](NULL, ptr_cb +(dct_offset>>1), dest_cb +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[6]= 1;
01610 if(s->dsp.sad[1](NULL, ptr_cr +(dct_offset>>1), dest_cr +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[7]= 1;
01611 }
01612 }
01613 }
01614
01615 if(s->avctx->quantizer_noise_shaping){
01616 if(!skip_dct[0]) get_visual_weight(weight[0], ptr_y , wrap_y);
01617 if(!skip_dct[1]) get_visual_weight(weight[1], ptr_y + 8, wrap_y);
01618 if(!skip_dct[2]) get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
01619 if(!skip_dct[3]) get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
01620 if(!skip_dct[4]) get_visual_weight(weight[4], ptr_cb , wrap_c);
01621 if(!skip_dct[5]) get_visual_weight(weight[5], ptr_cr , wrap_c);
01622 if(!s->chroma_y_shift){
01623 if(!skip_dct[6]) get_visual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c);
01624 if(!skip_dct[7]) get_visual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c);
01625 }
01626 memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count);
01627 }
01628
01629
01630 assert(s->out_format!=FMT_MJPEG || s->qscale==8);
01631 {
01632 for(i=0;i<mb_block_count;i++) {
01633 if(!skip_dct[i]){
01634 int overflow;
01635 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
01636
01637
01638
01639 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
01640 }else
01641 s->block_last_index[i]= -1;
01642 }
01643 if(s->avctx->quantizer_noise_shaping){
01644 for(i=0;i<mb_block_count;i++) {
01645 if(!skip_dct[i]){
01646 s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale);
01647 }
01648 }
01649 }
01650
01651 if(s->luma_elim_threshold && !s->mb_intra)
01652 for(i=0; i<4; i++)
01653 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
01654 if(s->chroma_elim_threshold && !s->mb_intra)
01655 for(i=4; i<mb_block_count; i++)
01656 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
01657
01658 if(s->flags & CODEC_FLAG_CBP_RD){
01659 for(i=0;i<mb_block_count;i++) {
01660 if(s->block_last_index[i] == -1)
01661 s->coded_score[i]= INT_MAX/256;
01662 }
01663 }
01664 }
01665
01666 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
01667 s->block_last_index[4]=
01668 s->block_last_index[5]= 0;
01669 s->block[4][0]=
01670 s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
01671 }
01672
01673
01674 if(s->alternate_scan && s->dct_quantize != dct_quantize_c){
01675 for(i=0; i<mb_block_count; i++){
01676 int j;
01677 if(s->block_last_index[i]>0){
01678 for(j=63; j>0; j--){
01679 if(s->block[i][ s->intra_scantable.permutated[j] ]) break;
01680 }
01681 s->block_last_index[i]= j;
01682 }
01683 }
01684 }
01685
01686
01687 switch(s->codec_id){
01688 case CODEC_ID_MPEG1VIDEO:
01689 case CODEC_ID_MPEG2VIDEO:
01690 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
01691 mpeg1_encode_mb(s, s->block, motion_x, motion_y);
01692 break;
01693 case CODEC_ID_MPEG4:
01694 if (CONFIG_MPEG4_ENCODER)
01695 mpeg4_encode_mb(s, s->block, motion_x, motion_y);
01696 break;
01697 case CODEC_ID_MSMPEG4V2:
01698 case CODEC_ID_MSMPEG4V3:
01699 case CODEC_ID_WMV1:
01700 if (CONFIG_MSMPEG4_ENCODER)
01701 msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
01702 break;
01703 case CODEC_ID_WMV2:
01704 if (CONFIG_WMV2_ENCODER)
01705 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
01706 break;
01707 case CODEC_ID_H261:
01708 if (CONFIG_H261_ENCODER)
01709 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
01710 break;
01711 case CODEC_ID_H263:
01712 case CODEC_ID_H263P:
01713 case CODEC_ID_FLV1:
01714 case CODEC_ID_RV10:
01715 case CODEC_ID_RV20:
01716 if (CONFIG_H263_ENCODER || CONFIG_H263P_ENCODER ||
01717 CONFIG_FLV_ENCODER || CONFIG_RV10_ENCODER || CONFIG_RV20_ENCODER)
01718 h263_encode_mb(s, s->block, motion_x, motion_y);
01719 break;
01720 case CODEC_ID_MJPEG:
01721 if (CONFIG_MJPEG_ENCODER)
01722 ff_mjpeg_encode_mb(s, s->block);
01723 break;
01724 default:
01725 assert(0);
01726 }
01727 }
01728
01729 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
01730 {
01731 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
01732 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
01733 }
01734
01735 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
01736 int i;
01737
01738 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
01739
01740
01741 d->mb_skip_run= s->mb_skip_run;
01742 for(i=0; i<3; i++)
01743 d->last_dc[i]= s->last_dc[i];
01744
01745
01746 d->mv_bits= s->mv_bits;
01747 d->i_tex_bits= s->i_tex_bits;
01748 d->p_tex_bits= s->p_tex_bits;
01749 d->i_count= s->i_count;
01750 d->f_count= s->f_count;
01751 d->b_count= s->b_count;
01752 d->skip_count= s->skip_count;
01753 d->misc_bits= s->misc_bits;
01754 d->last_bits= 0;
01755
01756 d->mb_skipped= 0;
01757 d->qscale= s->qscale;
01758 d->dquant= s->dquant;
01759
01760 d->esc3_level_length= s->esc3_level_length;
01761 }
01762
01763 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
01764 int i;
01765
01766 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
01767 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
01768
01769
01770 d->mb_skip_run= s->mb_skip_run;
01771 for(i=0; i<3; i++)
01772 d->last_dc[i]= s->last_dc[i];
01773
01774
01775 d->mv_bits= s->mv_bits;
01776 d->i_tex_bits= s->i_tex_bits;
01777 d->p_tex_bits= s->p_tex_bits;
01778 d->i_count= s->i_count;
01779 d->f_count= s->f_count;
01780 d->b_count= s->b_count;
01781 d->skip_count= s->skip_count;
01782 d->misc_bits= s->misc_bits;
01783
01784 d->mb_intra= s->mb_intra;
01785 d->mb_skipped= s->mb_skipped;
01786 d->mv_type= s->mv_type;
01787 d->mv_dir= s->mv_dir;
01788 d->pb= s->pb;
01789 if(s->data_partitioning){
01790 d->pb2= s->pb2;
01791 d->tex_pb= s->tex_pb;
01792 }
01793 d->block= s->block;
01794 for(i=0; i<8; i++)
01795 d->block_last_index[i]= s->block_last_index[i];
01796 d->interlaced_dct= s->interlaced_dct;
01797 d->qscale= s->qscale;
01798
01799 d->esc3_level_length= s->esc3_level_length;
01800 }
01801
01802 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
01803 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
01804 int *dmin, int *next_block, int motion_x, int motion_y)
01805 {
01806 int score;
01807 uint8_t *dest_backup[3];
01808
01809 copy_context_before_encode(s, backup, type);
01810
01811 s->block= s->blocks[*next_block];
01812 s->pb= pb[*next_block];
01813 if(s->data_partitioning){
01814 s->pb2 = pb2 [*next_block];
01815 s->tex_pb= tex_pb[*next_block];
01816 }
01817
01818 if(*next_block){
01819 memcpy(dest_backup, s->dest, sizeof(s->dest));
01820 s->dest[0] = s->rd_scratchpad;
01821 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
01822 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
01823 assert(s->linesize >= 32);
01824 }
01825
01826 encode_mb(s, motion_x, motion_y);
01827
01828 score= put_bits_count(&s->pb);
01829 if(s->data_partitioning){
01830 score+= put_bits_count(&s->pb2);
01831 score+= put_bits_count(&s->tex_pb);
01832 }
01833
01834 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
01835 MPV_decode_mb(s, s->block);
01836
01837 score *= s->lambda2;
01838 score += sse_mb(s) << FF_LAMBDA_SHIFT;
01839 }
01840
01841 if(*next_block){
01842 memcpy(s->dest, dest_backup, sizeof(s->dest));
01843 }
01844
01845 if(score<*dmin){
01846 *dmin= score;
01847 *next_block^=1;
01848
01849 copy_context_after_encode(best, s, type);
01850 }
01851 }
01852
01853 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
01854 uint32_t *sq = ff_squareTbl + 256;
01855 int acc=0;
01856 int x,y;
01857
01858 if(w==16 && h==16)
01859 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
01860 else if(w==8 && h==8)
01861 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
01862
01863 for(y=0; y<h; y++){
01864 for(x=0; x<w; x++){
01865 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
01866 }
01867 }
01868
01869 assert(acc>=0);
01870
01871 return acc;
01872 }
01873
01874 static int sse_mb(MpegEncContext *s){
01875 int w= 16;
01876 int h= 16;
01877
01878 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
01879 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
01880
01881 if(w==16 && h==16)
01882 if(s->avctx->mb_cmp == FF_CMP_NSSE){
01883 return s->dsp.nsse[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
01884 +s->dsp.nsse[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
01885 +s->dsp.nsse[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
01886 }else{
01887 return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
01888 +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
01889 +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
01890 }
01891 else
01892 return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
01893 +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
01894 +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
01895 }
01896
01897 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
01898 MpegEncContext *s= *(void**)arg;
01899
01900
01901 s->me.pre_pass=1;
01902 s->me.dia_size= s->avctx->pre_dia_size;
01903 s->first_slice_line=1;
01904 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
01905 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
01906 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
01907 }
01908 s->first_slice_line=0;
01909 }
01910
01911 s->me.pre_pass=0;
01912
01913 return 0;
01914 }
01915
01916 static int estimate_motion_thread(AVCodecContext *c, void *arg){
01917 MpegEncContext *s= *(void**)arg;
01918
01919 ff_check_alignment();
01920
01921 s->me.dia_size= s->avctx->dia_size;
01922 s->first_slice_line=1;
01923 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
01924 s->mb_x=0;
01925 ff_init_block_index(s);
01926 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
01927 s->block_index[0]+=2;
01928 s->block_index[1]+=2;
01929 s->block_index[2]+=2;
01930 s->block_index[3]+=2;
01931
01932
01933 if(s->pict_type==FF_B_TYPE)
01934 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
01935 else
01936 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
01937 }
01938 s->first_slice_line=0;
01939 }
01940 return 0;
01941 }
01942
01943 static int mb_var_thread(AVCodecContext *c, void *arg){
01944 MpegEncContext *s= *(void**)arg;
01945 int mb_x, mb_y;
01946
01947 ff_check_alignment();
01948
01949 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
01950 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
01951 int xx = mb_x * 16;
01952 int yy = mb_y * 16;
01953 uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
01954 int varc;
01955 int sum = s->dsp.pix_sum(pix, s->linesize);
01956
01957 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
01958
01959 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
01960 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
01961 s->me.mb_var_sum_temp += varc;
01962 }
01963 }
01964 return 0;
01965 }
01966
01967 static void write_slice_end(MpegEncContext *s){
01968 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){
01969 if(s->partitioned_frame){
01970 ff_mpeg4_merge_partitions(s);
01971 }
01972
01973 ff_mpeg4_stuffing(&s->pb);
01974 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
01975 ff_mjpeg_encode_stuffing(&s->pb);
01976 }
01977
01978 align_put_bits(&s->pb);
01979 flush_put_bits(&s->pb);
01980
01981 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
01982 s->misc_bits+= get_bits_diff(s);
01983 }
01984
01985 static int encode_thread(AVCodecContext *c, void *arg){
01986 MpegEncContext *s= *(void**)arg;
01987 int mb_x, mb_y, pdif = 0;
01988 int chr_h= 16>>s->chroma_y_shift;
01989 int i, j;
01990 MpegEncContext best_s, backup_s;
01991 uint8_t bit_buf[2][MAX_MB_BYTES];
01992 uint8_t bit_buf2[2][MAX_MB_BYTES];
01993 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
01994 PutBitContext pb[2], pb2[2], tex_pb[2];
01995
01996
01997 ff_check_alignment();
01998
01999 for(i=0; i<2; i++){
02000 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
02001 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
02002 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
02003 }
02004
02005 s->last_bits= put_bits_count(&s->pb);
02006 s->mv_bits=0;
02007 s->misc_bits=0;
02008 s->i_tex_bits=0;
02009 s->p_tex_bits=0;
02010 s->i_count=0;
02011 s->f_count=0;
02012 s->b_count=0;
02013 s->skip_count=0;
02014
02015 for(i=0; i<3; i++){
02016
02017
02018 s->last_dc[i] = 128 << s->intra_dc_precision;
02019
02020 s->current_picture.error[i] = 0;
02021 }
02022 s->mb_skip_run = 0;
02023 memset(s->last_mv, 0, sizeof(s->last_mv));
02024
02025 s->last_mv_dir = 0;
02026
02027 switch(s->codec_id){
02028 case CODEC_ID_H263:
02029 case CODEC_ID_H263P:
02030 case CODEC_ID_FLV1:
02031 if (CONFIG_H263_ENCODER || CONFIG_H263P_ENCODER || CONFIG_FLV_ENCODER)
02032 s->gob_index = ff_h263_get_gob_height(s);
02033 break;
02034 case CODEC_ID_MPEG4:
02035 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
02036 ff_mpeg4_init_partitions(s);
02037 break;
02038 }
02039
02040 s->resync_mb_x=0;
02041 s->resync_mb_y=0;
02042 s->first_slice_line = 1;
02043 s->ptr_lastgob = s->pb.buf;
02044 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02045
02046 s->mb_x=0;
02047 s->mb_y= mb_y;
02048
02049 ff_set_qscale(s, s->qscale);
02050 ff_init_block_index(s);
02051
02052 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02053 int xy= mb_y*s->mb_stride + mb_x;
02054 int mb_type= s->mb_type[xy];
02055
02056 int dmin= INT_MAX;
02057 int dir;
02058
02059 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
02060 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02061 return -1;
02062 }
02063 if(s->data_partitioning){
02064 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
02065 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
02066 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02067 return -1;
02068 }
02069 }
02070
02071 s->mb_x = mb_x;
02072 s->mb_y = mb_y;
02073 ff_update_block_index(s);
02074
02075 if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){
02076 ff_h261_reorder_mb_index(s);
02077 xy= s->mb_y*s->mb_stride + s->mb_x;
02078 mb_type= s->mb_type[xy];
02079 }
02080
02081
02082 if(s->rtp_mode){
02083 int current_packet_size, is_gob_start;
02084
02085 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
02086
02087 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
02088
02089 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
02090
02091 switch(s->codec_id){
02092 case CODEC_ID_H263:
02093 case CODEC_ID_H263P:
02094 if(!s->h263_slice_structured)
02095 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
02096 break;
02097 case CODEC_ID_MPEG2VIDEO:
02098 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02099 case CODEC_ID_MPEG1VIDEO:
02100 if(s->mb_skip_run) is_gob_start=0;
02101 break;
02102 }
02103
02104 if(is_gob_start){
02105 if(s->start_mb_y != mb_y || mb_x!=0){
02106 write_slice_end(s);
02107
02108 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
02109 ff_mpeg4_init_partitions(s);
02110 }
02111 }
02112
02113 assert((put_bits_count(&s->pb)&7) == 0);
02114 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
02115
02116 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
02117 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
02118 int d= 100 / s->avctx->error_rate;
02119 if(r % d == 0){
02120 current_packet_size=0;
02121 #ifndef ALT_BITSTREAM_WRITER
02122 s->pb.buf_ptr= s->ptr_lastgob;
02123 #endif
02124 assert(pbBufPtr(&s->pb) == s->ptr_lastgob);
02125 }
02126 }
02127
02128 if (s->avctx->rtp_callback){
02129 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
02130 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
02131 }
02132
02133 switch(s->codec_id){
02134 case CODEC_ID_MPEG4:
02135 if (CONFIG_MPEG4_ENCODER) {
02136 ff_mpeg4_encode_video_packet_header(s);
02137 ff_mpeg4_clean_buffers(s);
02138 }
02139 break;
02140 case CODEC_ID_MPEG1VIDEO:
02141 case CODEC_ID_MPEG2VIDEO:
02142 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
02143 ff_mpeg1_encode_slice_header(s);
02144 ff_mpeg1_clean_buffers(s);
02145 }
02146 break;
02147 case CODEC_ID_H263:
02148 case CODEC_ID_H263P:
02149 if (CONFIG_H263_ENCODER || CONFIG_H263P_ENCODER)
02150 h263_encode_gob_header(s, mb_y);
02151 break;
02152 }
02153
02154 if(s->flags&CODEC_FLAG_PASS1){
02155 int bits= put_bits_count(&s->pb);
02156 s->misc_bits+= bits - s->last_bits;
02157 s->last_bits= bits;
02158 }
02159
02160 s->ptr_lastgob += current_packet_size;
02161 s->first_slice_line=1;
02162 s->resync_mb_x=mb_x;
02163 s->resync_mb_y=mb_y;
02164 }
02165 }
02166
02167 if( (s->resync_mb_x == s->mb_x)
02168 && s->resync_mb_y+1 == s->mb_y){
02169 s->first_slice_line=0;
02170 }
02171
02172 s->mb_skipped=0;
02173 s->dquant=0;
02174
02175 if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){
02176 int next_block=0;
02177 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
02178
02179 copy_context_before_encode(&backup_s, s, -1);
02180 backup_s.pb= s->pb;
02181 best_s.data_partitioning= s->data_partitioning;
02182 best_s.partitioned_frame= s->partitioned_frame;
02183 if(s->data_partitioning){
02184 backup_s.pb2= s->pb2;
02185 backup_s.tex_pb= s->tex_pb;
02186 }
02187
02188 if(mb_type&CANDIDATE_MB_TYPE_INTER){
02189 s->mv_dir = MV_DIR_FORWARD;
02190 s->mv_type = MV_TYPE_16X16;
02191 s->mb_intra= 0;
02192 s->mv[0][0][0] = s->p_mv_table[xy][0];
02193 s->mv[0][0][1] = s->p_mv_table[xy][1];
02194 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
02195 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02196 }
02197 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
02198 s->mv_dir = MV_DIR_FORWARD;
02199 s->mv_type = MV_TYPE_FIELD;
02200 s->mb_intra= 0;
02201 for(i=0; i<2; i++){
02202 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02203 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02204 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02205 }
02206 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
02207 &dmin, &next_block, 0, 0);
02208 }
02209 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
02210 s->mv_dir = MV_DIR_FORWARD;
02211 s->mv_type = MV_TYPE_16X16;
02212 s->mb_intra= 0;
02213 s->mv[0][0][0] = 0;
02214 s->mv[0][0][1] = 0;
02215 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
02216 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02217 }
02218 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
02219 s->mv_dir = MV_DIR_FORWARD;
02220 s->mv_type = MV_TYPE_8X8;
02221 s->mb_intra= 0;
02222 for(i=0; i<4; i++){
02223 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
02224 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
02225 }
02226 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
02227 &dmin, &next_block, 0, 0);
02228 }
02229 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
02230 s->mv_dir = MV_DIR_FORWARD;
02231 s->mv_type = MV_TYPE_16X16;
02232 s->mb_intra= 0;
02233 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02234 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02235 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
02236 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02237 }
02238 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
02239 s->mv_dir = MV_DIR_BACKWARD;
02240 s->mv_type = MV_TYPE_16X16;
02241 s->mb_intra= 0;
02242 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02243 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02244 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
02245 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
02246 }
02247 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
02248 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02249 s->mv_type = MV_TYPE_16X16;
02250 s->mb_intra= 0;
02251 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02252 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02253 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02254 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02255 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
02256 &dmin, &next_block, 0, 0);
02257 }
02258 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
02259 s->mv_dir = MV_DIR_FORWARD;
02260 s->mv_type = MV_TYPE_FIELD;
02261 s->mb_intra= 0;
02262 for(i=0; i<2; i++){
02263 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02264 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02265 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02266 }
02267 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
02268 &dmin, &next_block, 0, 0);
02269 }
02270 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
02271 s->mv_dir = MV_DIR_BACKWARD;
02272 s->mv_type = MV_TYPE_FIELD;
02273 s->mb_intra= 0;
02274 for(i=0; i<2; i++){
02275 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02276 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02277 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02278 }
02279 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
02280 &dmin, &next_block, 0, 0);
02281 }
02282 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
02283 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02284 s->mv_type = MV_TYPE_FIELD;
02285 s->mb_intra= 0;
02286 for(dir=0; dir<2; dir++){
02287 for(i=0; i<2; i++){
02288 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02289 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02290 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02291 }
02292 }
02293 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
02294 &dmin, &next_block, 0, 0);
02295 }
02296 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
02297 s->mv_dir = 0;
02298 s->mv_type = MV_TYPE_16X16;
02299 s->mb_intra= 1;
02300 s->mv[0][0][0] = 0;
02301 s->mv[0][0][1] = 0;
02302 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
02303 &dmin, &next_block, 0, 0);
02304 if(s->h263_pred || s->h263_aic){
02305 if(best_s.mb_intra)
02306 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
02307 else
02308 ff_clean_intra_table_entries(s);
02309 }
02310 }
02311
02312 if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){
02313 if(best_s.mv_type==MV_TYPE_16X16){
02314 const int last_qp= backup_s.qscale;
02315 int qpi, qp, dc[6];
02316 DCTELEM ac[6][16];
02317 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
02318 static const int dquant_tab[4]={-1,1,-2,2};
02319
02320 assert(backup_s.dquant == 0);
02321
02322
02323 s->mv_dir= best_s.mv_dir;
02324 s->mv_type = MV_TYPE_16X16;
02325 s->mb_intra= best_s.mb_intra;
02326 s->mv[0][0][0] = best_s.mv[0][0][0];
02327 s->mv[0][0][1] = best_s.mv[0][0][1];
02328 s->mv[1][0][0] = best_s.mv[1][0][0];
02329 s->mv[1][0][1] = best_s.mv[1][0][1];
02330
02331 qpi = s->pict_type == FF_B_TYPE ? 2 : 0;
02332 for(; qpi<4; qpi++){
02333 int dquant= dquant_tab[qpi];
02334 qp= last_qp + dquant;
02335 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
02336 continue;
02337 backup_s.dquant= dquant;
02338 if(s->mb_intra && s->dc_val[0]){
02339 for(i=0; i<6; i++){
02340 dc[i]= s->dc_val[0][ s->block_index[i] ];
02341 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
02342 }
02343 }
02344
02345 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02346 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
02347 if(best_s.qscale != qp){
02348 if(s->mb_intra && s->dc_val[0]){
02349 for(i=0; i<6; i++){
02350 s->dc_val[0][ s->block_index[i] ]= dc[i];
02351 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
02352 }
02353 }
02354 }
02355 }
02356 }
02357 }
02358 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
02359 int mx= s->b_direct_mv_table[xy][0];
02360 int my= s->b_direct_mv_table[xy][1];
02361
02362 backup_s.dquant = 0;
02363 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02364 s->mb_intra= 0;
02365 ff_mpeg4_set_direct_mv(s, mx, my);
02366 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02367 &dmin, &next_block, mx, my);
02368 }
02369 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
02370 backup_s.dquant = 0;
02371 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02372 s->mb_intra= 0;
02373 ff_mpeg4_set_direct_mv(s, 0, 0);
02374 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02375 &dmin, &next_block, 0, 0);
02376 }
02377 if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){
02378 int coded=0;
02379 for(i=0; i<6; i++)
02380 coded |= s->block_last_index[i];
02381 if(coded){
02382 int mx,my;
02383 memcpy(s->mv, best_s.mv, sizeof(s->mv));
02384 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
02385 mx=my=0;
02386 ff_mpeg4_set_direct_mv(s, mx, my);
02387 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
02388 mx= s->mv[1][0][0];
02389 my= s->mv[1][0][1];
02390 }else{
02391 mx= s->mv[0][0][0];
02392 my= s->mv[0][0][1];
02393 }
02394
02395 s->mv_dir= best_s.mv_dir;
02396 s->mv_type = best_s.mv_type;
02397 s->mb_intra= 0;
02398
02399
02400
02401
02402 backup_s.dquant= 0;
02403 s->skipdct=1;
02404 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02405 &dmin, &next_block, mx, my);
02406 s->skipdct=0;
02407 }
02408 }
02409
02410 s->current_picture.qscale_table[xy]= best_s.qscale;
02411
02412 copy_context_after_encode(s, &best_s, -1);
02413
02414 pb_bits_count= put_bits_count(&s->pb);
02415 flush_put_bits(&s->pb);
02416 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
02417 s->pb= backup_s.pb;
02418
02419 if(s->data_partitioning){
02420 pb2_bits_count= put_bits_count(&s->pb2);
02421 flush_put_bits(&s->pb2);
02422 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
02423 s->pb2= backup_s.pb2;
02424
02425 tex_pb_bits_count= put_bits_count(&s->tex_pb);
02426 flush_put_bits(&s->tex_pb);
02427 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
02428 s->tex_pb= backup_s.tex_pb;
02429 }
02430 s->last_bits= put_bits_count(&s->pb);
02431
02432 if (CONFIG_ANY_H263_ENCODER &&
02433 s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
02434 ff_h263_update_motion_val(s);
02435
02436 if(next_block==0){
02437 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
02438 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
02439 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
02440 }
02441
02442 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
02443 MPV_decode_mb(s, s->block);
02444 } else {
02445 int motion_x = 0, motion_y = 0;
02446 s->mv_type=MV_TYPE_16X16;
02447
02448
02449 switch(mb_type){
02450 case CANDIDATE_MB_TYPE_INTRA:
02451 s->mv_dir = 0;
02452 s->mb_intra= 1;
02453 motion_x= s->mv[0][0][0] = 0;
02454 motion_y= s->mv[0][0][1] = 0;
02455 break;
02456 case CANDIDATE_MB_TYPE_INTER:
02457 s->mv_dir = MV_DIR_FORWARD;
02458 s->mb_intra= 0;
02459 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
02460 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
02461 break;
02462 case CANDIDATE_MB_TYPE_INTER_I:
02463 s->mv_dir = MV_DIR_FORWARD;
02464 s->mv_type = MV_TYPE_FIELD;
02465 s->mb_intra= 0;
02466 for(i=0; i<2; i++){
02467 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02468 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02469 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02470 }
02471 break;
02472 case CANDIDATE_MB_TYPE_INTER4V:
02473 s->mv_dir = MV_DIR_FORWARD;
02474 s->mv_type = MV_TYPE_8X8;
02475 s->mb_intra= 0;
02476 for(i=0; i<4; i++){
02477 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
02478 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
02479 }
02480 break;
02481 case CANDIDATE_MB_TYPE_DIRECT:
02482 if (CONFIG_MPEG4_ENCODER) {
02483 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02484 s->mb_intra= 0;
02485 motion_x=s->b_direct_mv_table[xy][0];
02486 motion_y=s->b_direct_mv_table[xy][1];
02487 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
02488 }
02489 break;
02490 case CANDIDATE_MB_TYPE_DIRECT0:
02491 if (CONFIG_MPEG4_ENCODER) {
02492 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02493 s->mb_intra= 0;
02494 ff_mpeg4_set_direct_mv(s, 0, 0);
02495 }
02496 break;
02497 case CANDIDATE_MB_TYPE_BIDIR:
02498 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02499 s->mb_intra= 0;
02500 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02501 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02502 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02503 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02504 break;
02505 case CANDIDATE_MB_TYPE_BACKWARD:
02506 s->mv_dir = MV_DIR_BACKWARD;
02507 s->mb_intra= 0;
02508 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02509 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02510 break;
02511 case CANDIDATE_MB_TYPE_FORWARD:
02512 s->mv_dir = MV_DIR_FORWARD;
02513 s->mb_intra= 0;
02514 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02515 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02516
02517 break;
02518 case CANDIDATE_MB_TYPE_FORWARD_I:
02519 s->mv_dir = MV_DIR_FORWARD;
02520 s->mv_type = MV_TYPE_FIELD;
02521 s->mb_intra= 0;
02522 for(i=0; i<2; i++){
02523 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02524 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02525 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02526 }
02527 break;
02528 case CANDIDATE_MB_TYPE_BACKWARD_I:
02529 s->mv_dir = MV_DIR_BACKWARD;
02530 s->mv_type = MV_TYPE_FIELD;
02531 s->mb_intra= 0;
02532 for(i=0; i<2; i++){
02533 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02534 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02535 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02536 }
02537 break;
02538 case CANDIDATE_MB_TYPE_BIDIR_I:
02539 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02540 s->mv_type = MV_TYPE_FIELD;
02541 s->mb_intra= 0;
02542 for(dir=0; dir<2; dir++){
02543 for(i=0; i<2; i++){
02544 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02545 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02546 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02547 }
02548 }
02549 break;
02550 default:
02551 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
02552 }
02553
02554 encode_mb(s, motion_x, motion_y);
02555
02556
02557 s->last_mv_dir = s->mv_dir;
02558
02559 if (CONFIG_ANY_H263_ENCODER &&
02560 s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
02561 ff_h263_update_motion_val(s);
02562
02563 MPV_decode_mb(s, s->block);
02564 }
02565
02566
02567 if(s->mb_intra ){
02568 s->p_mv_table[xy][0]=0;
02569 s->p_mv_table[xy][1]=0;
02570 }
02571
02572 if(s->flags&CODEC_FLAG_PSNR){
02573 int w= 16;
02574 int h= 16;
02575
02576 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02577 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02578
02579 s->current_picture.error[0] += sse(
02580 s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
02581 s->dest[0], w, h, s->linesize);
02582 s->current_picture.error[1] += sse(
02583 s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
02584 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
02585 s->current_picture.error[2] += sse(
02586 s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
02587 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
02588 }
02589 if(s->loop_filter){
02590 if(CONFIG_ANY_H263_ENCODER && s->out_format == FMT_H263)
02591 ff_h263_loop_filter(s);
02592 }
02593
02594 }
02595 }
02596
02597
02598 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == FF_I_TYPE)
02599 msmpeg4_encode_ext_header(s);
02600
02601 write_slice_end(s);
02602
02603
02604 if (s->avctx->rtp_callback) {
02605 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
02606 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
02607
02608 emms_c();
02609 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
02610 }
02611
02612 return 0;
02613 }
02614
02615 #define MERGE(field) dst->field += src->field; src->field=0
02616 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
02617 MERGE(me.scene_change_score);
02618 MERGE(me.mc_mb_var_sum_temp);
02619 MERGE(me.mb_var_sum_temp);
02620 }
02621
02622 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
02623 int i;
02624
02625 MERGE(dct_count[0]);
02626 MERGE(dct_count[1]);
02627 MERGE(mv_bits);
02628 MERGE(i_tex_bits);
02629 MERGE(p_tex_bits);
02630 MERGE(i_count);
02631 MERGE(f_count);
02632 MERGE(b_count);
02633 MERGE(skip_count);
02634 MERGE(misc_bits);
02635 MERGE(error_count);
02636 MERGE(padding_bug_score);
02637 MERGE(current_picture.error[0]);
02638 MERGE(current_picture.error[1]);
02639 MERGE(current_picture.error[2]);
02640
02641 if(dst->avctx->noise_reduction){
02642 for(i=0; i<64; i++){
02643 MERGE(dct_error_sum[0][i]);
02644 MERGE(dct_error_sum[1][i]);
02645 }
02646 }
02647
02648 assert(put_bits_count(&src->pb) % 8 ==0);
02649 assert(put_bits_count(&dst->pb) % 8 ==0);
02650 ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
02651 flush_put_bits(&dst->pb);
02652 }
02653
02654 static int estimate_qp(MpegEncContext *s, int dry_run){
02655 if (s->next_lambda){
02656 s->current_picture_ptr->quality=
02657 s->current_picture.quality = s->next_lambda;
02658 if(!dry_run) s->next_lambda= 0;
02659 } else if (!s->fixed_qscale) {
02660 s->current_picture_ptr->quality=
02661 s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run);
02662 if (s->current_picture.quality < 0)
02663 return -1;
02664 }
02665
02666 if(s->adaptive_quant){
02667 switch(s->codec_id){
02668 case CODEC_ID_MPEG4:
02669 if (CONFIG_MPEG4_ENCODER)
02670 ff_clean_mpeg4_qscales(s);
02671 break;
02672 case CODEC_ID_H263:
02673 case CODEC_ID_H263P:
02674 case CODEC_ID_FLV1:
02675 if (CONFIG_H263_ENCODER||CONFIG_H263P_ENCODER||CONFIG_FLV_ENCODER)
02676 ff_clean_h263_qscales(s);
02677 break;
02678 }
02679
02680 s->lambda= s->lambda_table[0];
02681
02682 }else
02683 s->lambda= s->current_picture.quality;
02684
02685 update_qscale(s);
02686 return 0;
02687 }
02688
02689
02690 static void set_frame_distances(MpegEncContext * s){
02691 assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
02692 s->time= s->current_picture_ptr->pts*s->avctx->time_base.num;
02693
02694 if(s->pict_type==FF_B_TYPE){
02695 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
02696 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
02697 }else{
02698 s->pp_time= s->time - s->last_non_b_time;
02699 s->last_non_b_time= s->time;
02700 assert(s->picture_number==0 || s->pp_time > 0);
02701 }
02702 }
02703
02704 static int encode_picture(MpegEncContext *s, int picture_number)
02705 {
02706 int i;
02707 int bits;
02708
02709 s->picture_number = picture_number;
02710
02711
02712 s->me.mb_var_sum_temp =
02713 s->me.mc_mb_var_sum_temp = 0;
02714
02715
02716
02717 if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
02718 set_frame_distances(s);
02719 if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
02720 ff_set_mpeg4_time(s);
02721
02722 s->me.scene_change_score=0;
02723
02724
02725
02726 if(s->pict_type==FF_I_TYPE){
02727 if(s->msmpeg4_version >= 3) s->no_rounding=1;
02728 else s->no_rounding=0;
02729 }else if(s->pict_type!=FF_B_TYPE){
02730 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
02731 s->no_rounding ^= 1;
02732 }
02733
02734 if(s->flags & CODEC_FLAG_PASS2){
02735 if (estimate_qp(s,1) < 0)
02736 return -1;
02737 ff_get_2pass_fcode(s);
02738 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
02739 if(s->pict_type==FF_B_TYPE)
02740 s->lambda= s->last_lambda_for[s->pict_type];
02741 else
02742 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
02743 update_qscale(s);
02744 }
02745
02746 s->mb_intra=0;
02747 for(i=1; i<s->avctx->thread_count; i++){
02748 ff_update_duplicate_context(s->thread_context[i], s);
02749 }
02750
02751 if(ff_init_me(s)<0)
02752 return -1;
02753
02754
02755 if(s->pict_type != FF_I_TYPE){
02756 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
02757 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
02758 if(s->pict_type != FF_B_TYPE && s->avctx->me_threshold==0){
02759 if((s->avctx->pre_me && s->last_non_b_pict_type==FF_I_TYPE) || s->avctx->pre_me==2){
02760 s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*));
02761 }
02762 }
02763
02764 s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*));
02765 }else {
02766
02767 for(i=0; i<s->mb_stride*s->mb_height; i++)
02768 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
02769
02770 if(!s->fixed_qscale){
02771
02772 s->avctx->execute(s->avctx, mb_var_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*));
02773 }
02774 }
02775 for(i=1; i<s->avctx->thread_count; i++){
02776 merge_context_after_me(s, s->thread_context[i]);
02777 }
02778 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
02779 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
02780 emms_c();
02781
02782 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == FF_P_TYPE){
02783 s->pict_type= FF_I_TYPE;
02784 for(i=0; i<s->mb_stride*s->mb_height; i++)
02785 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
02786
02787 }
02788
02789 if(!s->umvplus){
02790 if(s->pict_type==FF_P_TYPE || s->pict_type==FF_S_TYPE) {
02791 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
02792
02793 if(s->flags & CODEC_FLAG_INTERLACED_ME){
02794 int a,b;
02795 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I);
02796 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
02797 s->f_code= FFMAX3(s->f_code, a, b);
02798 }
02799
02800 ff_fix_long_p_mvs(s);
02801 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
02802 if(s->flags & CODEC_FLAG_INTERLACED_ME){
02803 int j;
02804 for(i=0; i<2; i++){
02805 for(j=0; j<2; j++)
02806 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
02807 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
02808 }
02809 }
02810 }
02811
02812 if(s->pict_type==FF_B_TYPE){
02813 int a, b;
02814
02815 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
02816 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
02817 s->f_code = FFMAX(a, b);
02818
02819 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
02820 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
02821 s->b_code = FFMAX(a, b);
02822
02823 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
02824 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
02825 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
02826 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
02827 if(s->flags & CODEC_FLAG_INTERLACED_ME){
02828 int dir, j;
02829 for(dir=0; dir<2; dir++){
02830 for(i=0; i<2; i++){
02831 for(j=0; j<2; j++){
02832 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
02833 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
02834 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
02835 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
02836 }
02837 }
02838 }
02839 }
02840 }
02841 }
02842
02843 if (estimate_qp(s, 0) < 0)
02844 return -1;
02845
02846 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==FF_I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
02847 s->qscale= 3;
02848
02849 if (s->out_format == FMT_MJPEG) {
02850
02851 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
02852 for(i=1;i<64;i++){
02853 int j= s->dsp.idct_permutation[i];
02854
02855 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
02856 }
02857 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
02858 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
02859 s->qscale= 8;
02860 }
02861
02862
02863 s->current_picture_ptr->key_frame=
02864 s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
02865 s->current_picture_ptr->pict_type=
02866 s->current_picture.pict_type= s->pict_type;
02867
02868 if(s->current_picture.key_frame)
02869 s->picture_in_gop_number=0;
02870
02871 s->last_bits= put_bits_count(&s->pb);
02872 switch(s->out_format) {
02873 case FMT_MJPEG:
02874 if (CONFIG_MJPEG_ENCODER)
02875 ff_mjpeg_encode_picture_header(s);
02876 break;
02877 case FMT_H261:
02878 if (CONFIG_H261_ENCODER)
02879 ff_h261_encode_picture_header(s, picture_number);
02880 break;
02881 case FMT_H263:
02882 if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
02883 ff_wmv2_encode_picture_header(s, picture_number);
02884 else if (CONFIG_MSMPEG4_ENCODER && s->h263_msmpeg4)
02885 msmpeg4_encode_picture_header(s, picture_number);
02886 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
02887 mpeg4_encode_picture_header(s, picture_number);
02888 else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
02889 rv10_encode_picture_header(s, picture_number);
02890 else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
02891 rv20_encode_picture_header(s, picture_number);
02892 else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
02893 ff_flv_encode_picture_header(s, picture_number);
02894 else if (CONFIG_ANY_H263_ENCODER)
02895 h263_encode_picture_header(s, picture_number);
02896 break;
02897 case FMT_MPEG1:
02898 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
02899 mpeg1_encode_picture_header(s, picture_number);
02900 break;
02901 case FMT_H264:
02902 break;
02903 default:
02904 assert(0);
02905 }
02906 bits= put_bits_count(&s->pb);
02907 s->header_bits= bits - s->last_bits;
02908
02909 for(i=1; i<s->avctx->thread_count; i++){
02910 update_duplicate_context_after_me(s->thread_context[i], s);
02911 }
02912 s->avctx->execute(s->avctx, encode_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*));
02913 for(i=1; i<s->avctx->thread_count; i++){
02914 merge_context_after_encode(s, s->thread_context[i]);
02915 }
02916 emms_c();
02917 return 0;
02918 }
02919
02920 void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
02921 const int intra= s->mb_intra;
02922 int i;
02923
02924 s->dct_count[intra]++;
02925
02926 for(i=0; i<64; i++){
02927 int level= block[i];
02928
02929 if(level){
02930 if(level>0){
02931 s->dct_error_sum[intra][i] += level;
02932 level -= s->dct_offset[intra][i];
02933 if(level<0) level=0;
02934 }else{
02935 s->dct_error_sum[intra][i] -= level;
02936 level += s->dct_offset[intra][i];
02937 if(level>0) level=0;
02938 }
02939 block[i]= level;
02940 }
02941 }
02942 }
02943
02944 int dct_quantize_trellis_c(MpegEncContext *s,
02945 DCTELEM *block, int n,
02946 int qscale, int *overflow){
02947 const int *qmat;
02948 const uint8_t *scantable= s->intra_scantable.scantable;
02949 const uint8_t *perm_scantable= s->intra_scantable.permutated;
02950 int max=0;
02951 unsigned int threshold1, threshold2;
02952 int bias=0;
02953 int run_tab[65];
02954 int level_tab[65];
02955 int score_tab[65];
02956 int survivor[65];
02957 int survivor_count;
02958 int last_run=0;
02959 int last_level=0;
02960 int last_score= 0;
02961 int last_i;
02962 int coeff[2][64];
02963 int coeff_count[64];
02964 int qmul, qadd, start_i, last_non_zero, i, dc;
02965 const int esc_length= s->ac_esc_length;
02966 uint8_t * length;
02967 uint8_t * last_length;
02968 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
02969
02970 s->dsp.fdct (block);
02971
02972 if(s->dct_error_sum)
02973 s->denoise_dct(s, block);
02974 qmul= qscale*16;
02975 qadd= ((qscale-1)|1)*8;
02976
02977 if (s->mb_intra) {
02978 int q;
02979 if (!s->h263_aic) {
02980 if (n < 4)
02981 q = s->y_dc_scale;
02982 else
02983 q = s->c_dc_scale;
02984 q = q << 3;
02985 } else{
02986
02987 q = 1 << 3;
02988 qadd=0;
02989 }
02990
02991
02992 block[0] = (block[0] + (q >> 1)) / q;
02993 start_i = 1;
02994 last_non_zero = 0;
02995 qmat = s->q_intra_matrix[qscale];
02996 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
02997 bias= 1<<(QMAT_SHIFT-1);
02998 length = s->intra_ac_vlc_length;
02999 last_length= s->intra_ac_vlc_last_length;
03000 } else {
03001 start_i = 0;
03002 last_non_zero = -1;
03003 qmat = s->q_inter_matrix[qscale];
03004 length = s->inter_ac_vlc_length;
03005 last_length= s->inter_ac_vlc_last_length;
03006 }
03007 last_i= start_i;
03008
03009 threshold1= (1<<QMAT_SHIFT) - bias - 1;
03010 threshold2= (threshold1<<1);
03011
03012 for(i=63; i>=start_i; i--) {
03013 const int j = scantable[i];
03014 int level = block[j] * qmat[j];
03015
03016 if(((unsigned)(level+threshold1))>threshold2){
03017 last_non_zero = i;
03018 break;
03019 }
03020 }
03021
03022 for(i=start_i; i<=last_non_zero; i++) {
03023 const int j = scantable[i];
03024 int level = block[j] * qmat[j];
03025
03026
03027
03028 if(((unsigned)(level+threshold1))>threshold2){
03029 if(level>0){
03030 level= (bias + level)>>QMAT_SHIFT;
03031 coeff[0][i]= level;
03032 coeff[1][i]= level-1;
03033
03034 }else{
03035 level= (bias - level)>>QMAT_SHIFT;
03036 coeff[0][i]= -level;
03037 coeff[1][i]= -level+1;
03038
03039 }
03040 coeff_count[i]= FFMIN(level, 2);
03041 assert(coeff_count[i]);
03042 max |=level;
03043 }else{
03044 coeff[0][i]= (level>>31)|1;
03045 coeff_count[i]= 1;
03046 }
03047 }
03048
03049 *overflow= s->max_qcoeff < max;
03050
03051 if(last_non_zero < start_i){
03052 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03053 return last_non_zero;
03054 }
03055
03056 score_tab[start_i]= 0;
03057 survivor[0]= start_i;
03058 survivor_count= 1;
03059
03060 for(i=start_i; i<=last_non_zero; i++){
03061 int level_index, j, zero_distortion;
03062 int dct_coeff= FFABS(block[ scantable[i] ]);
03063 int best_score=256*256*256*120;
03064
03065 if ( s->dsp.fdct == fdct_ifast
03066 #ifndef FAAN_POSTSCALE
03067 || s->dsp.fdct == ff_faandct
03068 #endif
03069 )
03070 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
03071 zero_distortion= dct_coeff*dct_coeff;
03072
03073 for(level_index=0; level_index < coeff_count[i]; level_index++){
03074 int distortion;
03075 int level= coeff[level_index][i];
03076 const int alevel= FFABS(level);
03077 int unquant_coeff;
03078
03079 assert(level);
03080
03081 if(s->out_format == FMT_H263){
03082 unquant_coeff= alevel*qmul + qadd;
03083 }else{
03084 j= s->dsp.idct_permutation[ scantable[i] ];
03085 if(s->mb_intra){
03086 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
03087 unquant_coeff = (unquant_coeff - 1) | 1;
03088 }else{
03089 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
03090 unquant_coeff = (unquant_coeff - 1) | 1;
03091 }
03092 unquant_coeff<<= 3;
03093 }
03094
03095 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
03096 level+=64;
03097 if((level&(~127)) == 0){
03098 for(j=survivor_count-1; j>=0; j--){
03099 int run= i - survivor[j];
03100 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03101 score += score_tab[i-run];
03102
03103 if(score < best_score){
03104 best_score= score;
03105 run_tab[i+1]= run;
03106 level_tab[i+1]= level-64;
03107 }
03108 }
03109
03110 if(s->out_format == FMT_H263){
03111 for(j=survivor_count-1; j>=0; j--){
03112 int run= i - survivor[j];
03113 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03114 score += score_tab[i-run];
03115 if(score < last_score){
03116 last_score= score;
03117 last_run= run;
03118 last_level= level-64;
03119 last_i= i+1;
03120 }
03121 }
03122 }
03123 }else{
03124 distortion += esc_length*lambda;
03125 for(j=survivor_count-1; j>=0; j--){
03126 int run= i - survivor[j];
03127 int score= distortion + score_tab[i-run];
03128
03129 if(score < best_score){
03130 best_score= score;
03131 run_tab[i+1]= run;
03132 level_tab[i+1]= level-64;
03133 }
03134 }
03135
03136 if(s->out_format == FMT_H263){
03137 for(j=survivor_count-1; j>=0; j--){
03138 int run= i - survivor[j];
03139 int score= distortion + score_tab[i-run];
03140 if(score < last_score){
03141 last_score= score;
03142 last_run= run;
03143 last_level= level-64;
03144 last_i= i+1;
03145 }
03146 }
03147 }
03148 }
03149 }
03150
03151 score_tab[i+1]= best_score;
03152
03153
03154 if(last_non_zero <= 27){
03155 for(; survivor_count; survivor_count--){
03156 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
03157 break;
03158 }
03159 }else{
03160 for(; survivor_count; survivor_count--){
03161 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
03162 break;
03163 }
03164 }
03165
03166 survivor[ survivor_count++ ]= i+1;
03167 }
03168
03169 if(s->out_format != FMT_H263){
03170 last_score= 256*256*256*120;
03171 for(i= survivor[0]; i<=last_non_zero + 1; i++){
03172 int score= score_tab[i];
03173 if(i) score += lambda*2;
03174
03175 if(score < last_score){
03176 last_score= score;
03177 last_i= i;
03178 last_level= level_tab[i];
03179 last_run= run_tab[i];
03180 }
03181 }
03182 }
03183
03184 s->coded_score[n] = last_score;
03185
03186 dc= FFABS(block[0]);
03187 last_non_zero= last_i - 1;
03188 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03189
03190 if(last_non_zero < start_i)
03191 return last_non_zero;
03192
03193 if(last_non_zero == 0 && start_i == 0){
03194 int best_level= 0;
03195 int best_score= dc * dc;
03196
03197 for(i=0; i<coeff_count[0]; i++){
03198 int level= coeff[i][0];
03199 int alevel= FFABS(level);
03200 int unquant_coeff, score, distortion;
03201
03202 if(s->out_format == FMT_H263){
03203 unquant_coeff= (alevel*qmul + qadd)>>3;
03204 }else{
03205 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
03206 unquant_coeff = (unquant_coeff - 1) | 1;
03207 }
03208 unquant_coeff = (unquant_coeff + 4) >> 3;
03209 unquant_coeff<<= 3 + 3;
03210
03211 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
03212 level+=64;
03213 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
03214 else score= distortion + esc_length*lambda;
03215
03216 if(score < best_score){
03217 best_score= score;
03218 best_level= level - 64;
03219 }
03220 }
03221 block[0]= best_level;
03222 s->coded_score[n] = best_score - dc*dc;
03223 if(best_level == 0) return -1;
03224 else return last_non_zero;
03225 }
03226
03227 i= last_i;
03228 assert(last_level);
03229
03230 block[ perm_scantable[last_non_zero] ]= last_level;
03231 i -= last_run + 1;
03232
03233 for(; i>start_i; i -= run_tab[i] + 1){
03234 block[ perm_scantable[i-1] ]= level_tab[i];
03235 }
03236
03237 return last_non_zero;
03238 }
03239
03240
03241 static int16_t basis[64][64];
03242
03243 static void build_basis(uint8_t *perm){
03244 int i, j, x, y;
03245 emms_c();
03246 for(i=0; i<8; i++){
03247 for(j=0; j<8; j++){
03248 for(y=0; y<8; y++){
03249 for(x=0; x<8; x++){
03250 double s= 0.25*(1<<BASIS_SHIFT);
03251 int index= 8*i + j;
03252 int perm_index= perm[index];
03253 if(i==0) s*= sqrt(0.5);
03254 if(j==0) s*= sqrt(0.5);
03255 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
03256 }
03257 }
03258 }
03259 }
03260 }
03261
03262 static int dct_quantize_refine(MpegEncContext *s,
03263 DCTELEM *block, int16_t *weight, DCTELEM *orig,
03264 int n, int qscale){
03265 int16_t rem[64];
03266 DECLARE_ALIGNED_16(DCTELEM, d1[64]);
03267 const int *qmat;
03268 const uint8_t *scantable= s->intra_scantable.scantable;
03269 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03270
03271
03272 int run_tab[65];
03273 int prev_run=0;
03274 int prev_level=0;
03275 int qmul, qadd, start_i, last_non_zero, i, dc;
03276 uint8_t * length;
03277 uint8_t * last_length;
03278 int lambda;
03279 int rle_index, run, q = 1, sum;
03280 #ifdef REFINE_STATS
03281 static int count=0;
03282 static int after_last=0;
03283 static int to_zero=0;
03284 static int from_zero=0;
03285 static int raise=0;
03286 static int lower=0;
03287 static int messed_sign=0;
03288 #endif
03289
03290 if(basis[0][0] == 0)
03291 build_basis(s->dsp.idct_permutation);
03292
03293 qmul= qscale*2;
03294 qadd= (qscale-1)|1;
03295 if (s->mb_intra) {
03296 if (!s->h263_aic) {
03297 if (n < 4)
03298 q = s->y_dc_scale;
03299 else
03300 q = s->c_dc_scale;
03301 } else{
03302
03303 q = 1;
03304 qadd=0;
03305 }
03306 q <<= RECON_SHIFT-3;
03307
03308 dc= block[0]*q;
03309
03310 start_i = 1;
03311 qmat = s->q_intra_matrix[qscale];
03312
03313
03314 length = s->intra_ac_vlc_length;
03315 last_length= s->intra_ac_vlc_last_length;
03316 } else {
03317 dc= 0;
03318 start_i = 0;
03319 qmat = s->q_inter_matrix[qscale];
03320 length = s->inter_ac_vlc_length;
03321 last_length= s->inter_ac_vlc_last_length;
03322 }
03323 last_non_zero = s->block_last_index[n];
03324
03325 #ifdef REFINE_STATS
03326 {START_TIMER
03327 #endif
03328 dc += (1<<(RECON_SHIFT-1));
03329 for(i=0; i<64; i++){
03330 rem[i]= dc - (orig[i]<<RECON_SHIFT);
03331 }
03332 #ifdef REFINE_STATS
03333 STOP_TIMER("memset rem[]")}
03334 #endif
03335 sum=0;
03336 for(i=0; i<64; i++){
03337 int one= 36;
03338 int qns=4;
03339 int w;
03340
03341 w= FFABS(weight[i]) + qns*one;
03342 w= 15 + (48*qns*one + w/2)/w;
03343
03344 weight[i] = w;
03345
03346
03347 assert(w>0);
03348 assert(w<(1<<6));
03349 sum += w*w;
03350 }
03351 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
03352 #ifdef REFINE_STATS
03353 {START_TIMER
03354 #endif
03355 run=0;
03356 rle_index=0;
03357 for(i=start_i; i<=last_non_zero; i++){
03358 int j= perm_scantable[i];
03359 const int level= block[j];
03360 int coeff;
03361
03362 if(level){
03363 if(level<0) coeff= qmul*level - qadd;
03364 else coeff= qmul*level + qadd;
03365 run_tab[rle_index++]=run;
03366 run=0;
03367
03368 s->dsp.add_8x8basis(rem, basis[j], coeff);
03369 }else{
03370 run++;
03371 }
03372 }
03373 #ifdef REFINE_STATS
03374 if(last_non_zero>0){
03375 STOP_TIMER("init rem[]")
03376 }
03377 }
03378
03379 {START_TIMER
03380 #endif
03381 for(;;){
03382 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
03383 int best_coeff=0;
03384 int best_change=0;
03385 int run2, best_unquant_change=0, analyze_gradient;
03386 #ifdef REFINE_STATS
03387 {START_TIMER
03388 #endif
03389 analyze_gradient = last_non_zero > 2 || s->avctx->quantizer_noise_shaping >= 3;
03390
03391 if(analyze_gradient){
03392 #ifdef REFINE_STATS
03393 {START_TIMER
03394 #endif
03395 for(i=0; i<64; i++){
03396 int w= weight[i];
03397
03398 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
03399 }
03400 #ifdef REFINE_STATS
03401 STOP_TIMER("rem*w*w")}
03402 {START_TIMER
03403 #endif
03404 s->dsp.fdct(d1);
03405 #ifdef REFINE_STATS
03406 STOP_TIMER("dct")}
03407 #endif
03408 }
03409
03410 if(start_i){
03411 const int level= block[0];
03412 int change, old_coeff;
03413
03414 assert(s->mb_intra);
03415
03416 old_coeff= q*level;
03417
03418 for(change=-1; change<=1; change+=2){
03419 int new_level= level + change;
03420 int score, new_coeff;
03421
03422 new_coeff= q*new_level;
03423 if(new_coeff >= 2048 || new_coeff < 0)
03424 continue;
03425
03426 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
03427 if(score<best_score){
03428 best_score= score;
03429 best_coeff= 0;
03430 best_change= change;
03431 best_unquant_change= new_coeff - old_coeff;
03432 }
03433 }
03434 }
03435
03436 run=0;
03437 rle_index=0;
03438 run2= run_tab[rle_index++];
03439 prev_level=0;
03440 prev_run=0;
03441
03442 for(i=start_i; i<64; i++){
03443 int j= perm_scantable[i];
03444 const int level= block[j];
03445 int change, old_coeff;
03446
03447 if(s->avctx->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
03448 break;
03449
03450 if(level){
03451 if(level<0) old_coeff= qmul*level - qadd;
03452 else old_coeff= qmul*level + qadd;
03453 run2= run_tab[rle_index++];
03454 }else{
03455 old_coeff=0;
03456 run2--;
03457 assert(run2>=0 || i >= last_non_zero );
03458 }
03459
03460 for(change=-1; change<=1; change+=2){
03461 int new_level= level + change;
03462 int score, new_coeff, unquant_change;
03463
03464 score=0;
03465 if(s->avctx->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
03466 continue;
03467
03468 if(new_level){
03469 if(new_level<0) new_coeff= qmul*new_level - qadd;
03470 else new_coeff= qmul*new_level + qadd;
03471 if(new_coeff >= 2048 || new_coeff <= -2048)
03472 continue;
03473
03474
03475 if(level){
03476 if(level < 63 && level > -63){
03477 if(i < last_non_zero)
03478 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
03479 - length[UNI_AC_ENC_INDEX(run, level+64)];
03480 else
03481 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
03482 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
03483 }
03484 }else{
03485 assert(FFABS(new_level)==1);
03486
03487 if(analyze_gradient){
03488 int g= d1[ scantable[i] ];
03489 if(g && (g^new_level) >= 0)
03490 continue;
03491 }
03492
03493 if(i < last_non_zero){
03494 int next_i= i + run2 + 1;
03495 int next_level= block[ perm_scantable[next_i] ] + 64;
03496
03497 if(next_level&(~127))
03498 next_level= 0;
03499
03500 if(next_i < last_non_zero)
03501 score += length[UNI_AC_ENC_INDEX(run, 65)]
03502 + length[UNI_AC_ENC_INDEX(run2, next_level)]
03503 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03504 else
03505 score += length[UNI_AC_ENC_INDEX(run, 65)]
03506 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
03507 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03508 }else{
03509 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
03510 if(prev_level){
03511 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
03512 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
03513 }
03514 }
03515 }
03516 }else{
03517 new_coeff=0;
03518 assert(FFABS(level)==1);
03519
03520 if(i < last_non_zero){
03521 int next_i= i + run2 + 1;
03522 int next_level= block[ perm_scantable[next_i] ] + 64;
03523
03524 if(next_level&(~127))
03525 next_level= 0;
03526
03527 if(next_i < last_non_zero)
03528 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
03529 - length[UNI_AC_ENC_INDEX(run2, next_level)]
03530 - length[UNI_AC_ENC_INDEX(run, 65)];
03531 else
03532 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
03533 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
03534 - length[UNI_AC_ENC_INDEX(run, 65)];
03535 }else{
03536 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
03537 if(prev_level){
03538 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
03539 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
03540 }
03541 }
03542 }
03543
03544 score *= lambda;
03545
03546 unquant_change= new_coeff - old_coeff;
03547 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
03548
03549 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
03550 if(score<best_score){
03551 best_score= score;
03552 best_coeff= i;
03553 best_change= change;
03554 best_unquant_change= unquant_change;
03555 }
03556 }
03557 if(level){
03558 prev_level= level + 64;
03559 if(prev_level&(~127))
03560 prev_level= 0;
03561 prev_run= run;
03562 run=0;
03563 }else{
03564 run++;
03565 }
03566 }
03567 #ifdef REFINE_STATS
03568 STOP_TIMER("iterative step")}
03569 #endif
03570
03571 if(best_change){
03572 int j= perm_scantable[ best_coeff ];
03573
03574 block[j] += best_change;
03575
03576 if(best_coeff > last_non_zero){
03577 last_non_zero= best_coeff;
03578 assert(block[j]);
03579 #ifdef REFINE_STATS
03580 after_last++;
03581 #endif
03582 }else{
03583 #ifdef REFINE_STATS
03584 if(block[j]){
03585 if(block[j] - best_change){
03586 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
03587 raise++;
03588 }else{
03589 lower++;
03590 }
03591 }else{
03592 from_zero++;
03593 }
03594 }else{
03595 to_zero++;
03596 }
03597 #endif
03598 for(; last_non_zero>=start_i; last_non_zero--){
03599 if(block[perm_scantable[last_non_zero]])
03600 break;
03601 }
03602 }
03603 #ifdef REFINE_STATS
03604 count++;
03605 if(256*256*256*64 % count == 0){
03606 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
03607 }
03608 #endif
03609 run=0;
03610 rle_index=0;
03611 for(i=start_i; i<=last_non_zero; i++){
03612 int j= perm_scantable[i];
03613 const int level= block[j];
03614
03615 if(level){
03616 run_tab[rle_index++]=run;
03617 run=0;
03618 }else{
03619 run++;
03620 }
03621 }
03622
03623 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
03624 }else{
03625 break;
03626 }
03627 }
03628 #ifdef REFINE_STATS
03629 if(last_non_zero>0){
03630 STOP_TIMER("iterative search")
03631 }
03632 }
03633 #endif
03634
03635 return last_non_zero;
03636 }
03637
03638 int dct_quantize_c(MpegEncContext *s,
03639 DCTELEM *block, int n,
03640 int qscale, int *overflow)
03641 {
03642 int i, j, level, last_non_zero, q, start_i;
03643 const int *qmat;
03644 const uint8_t *scantable= s->intra_scantable.scantable;
03645 int bias;
03646 int max=0;
03647 unsigned int threshold1, threshold2;
03648
03649 s->dsp.fdct (block);
03650
03651 if(s->dct_error_sum)
03652 s->denoise_dct(s, block);
03653
03654 if (s->mb_intra) {
03655 if (!s->h263_aic) {
03656 if (n < 4)
03657 q = s->y_dc_scale;
03658 else
03659 q = s->c_dc_scale;
03660 q = q << 3;
03661 } else
03662
03663 q = 1 << 3;
03664
03665
03666 block[0] = (block[0] + (q >> 1)) / q;
03667 start_i = 1;
03668 last_non_zero = 0;
03669 qmat = s->q_intra_matrix[qscale];
03670 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
03671 } else {
03672 start_i = 0;
03673 last_non_zero = -1;
03674 qmat = s->q_inter_matrix[qscale];
03675 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
03676 }
03677 threshold1= (1<<QMAT_SHIFT) - bias - 1;
03678 threshold2= (threshold1<<1);
03679 for(i=63;i>=start_i;i--) {
03680 j = scantable[i];
03681 level = block[j] * qmat[j];
03682
03683 if(((unsigned)(level+threshold1))>threshold2){
03684 last_non_zero = i;
03685 break;
03686 }else{
03687 block[j]=0;
03688 }
03689 }
03690 for(i=start_i; i<=last_non_zero; i++) {
03691 j = scantable[i];
03692 level = block[j] * qmat[j];
03693
03694
03695
03696 if(((unsigned)(level+threshold1))>threshold2){
03697 if(level>0){
03698 level= (bias + level)>>QMAT_SHIFT;
03699 block[j]= level;
03700 }else{
03701 level= (bias - level)>>QMAT_SHIFT;
03702 block[j]= -level;
03703 }
03704 max |=level;
03705 }else{
03706 block[j]=0;
03707 }
03708 }
03709 *overflow= s->max_qcoeff < max;
03710
03711
03712 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
03713 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
03714
03715 return last_non_zero;
03716 }
03717
03718 AVCodec h263_encoder = {
03719 "h263",
03720 CODEC_TYPE_VIDEO,
03721 CODEC_ID_H263,
03722 sizeof(MpegEncContext),
03723 MPV_encode_init,
03724 MPV_encode_picture,
03725 MPV_encode_end,
03726 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03727 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
03728 };
03729
03730 AVCodec h263p_encoder = {
03731 "h263p",
03732 CODEC_TYPE_VIDEO,
03733 CODEC_ID_H263P,
03734 sizeof(MpegEncContext),
03735 MPV_encode_init,
03736 MPV_encode_picture,
03737 MPV_encode_end,
03738 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03739 .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
03740 };
03741
03742 AVCodec flv_encoder = {
03743 "flv",
03744 CODEC_TYPE_VIDEO,
03745 CODEC_ID_FLV1,
03746 sizeof(MpegEncContext),
03747 MPV_encode_init,
03748 MPV_encode_picture,
03749 MPV_encode_end,
03750 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03751 .long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV)"),
03752 };
03753
03754 AVCodec rv10_encoder = {
03755 "rv10",
03756 CODEC_TYPE_VIDEO,
03757 CODEC_ID_RV10,
03758 sizeof(MpegEncContext),
03759 MPV_encode_init,
03760 MPV_encode_picture,
03761 MPV_encode_end,
03762 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03763 .long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
03764 };
03765
03766 AVCodec rv20_encoder = {
03767 "rv20",
03768 CODEC_TYPE_VIDEO,
03769 CODEC_ID_RV20,
03770 sizeof(MpegEncContext),
03771 MPV_encode_init,
03772 MPV_encode_picture,
03773 MPV_encode_end,
03774 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03775 .long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
03776 };
03777
03778 AVCodec mpeg4_encoder = {
03779 "mpeg4",
03780 CODEC_TYPE_VIDEO,
03781 CODEC_ID_MPEG4,
03782 sizeof(MpegEncContext),
03783 MPV_encode_init,
03784 MPV_encode_picture,
03785 MPV_encode_end,
03786 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03787 .capabilities= CODEC_CAP_DELAY,
03788 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
03789 };
03790
03791 AVCodec msmpeg4v1_encoder = {
03792 "msmpeg4v1",
03793 CODEC_TYPE_VIDEO,
03794 CODEC_ID_MSMPEG4V1,
03795 sizeof(MpegEncContext),
03796 MPV_encode_init,
03797 MPV_encode_picture,
03798 MPV_encode_end,
03799 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03800 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
03801 };
03802
03803 AVCodec msmpeg4v2_encoder = {
03804 "msmpeg4v2",
03805 CODEC_TYPE_VIDEO,
03806 CODEC_ID_MSMPEG4V2,
03807 sizeof(MpegEncContext),
03808 MPV_encode_init,
03809 MPV_encode_picture,
03810 MPV_encode_end,
03811 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03812 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
03813 };
03814
03815 AVCodec msmpeg4v3_encoder = {
03816 "msmpeg4",
03817 CODEC_TYPE_VIDEO,
03818 CODEC_ID_MSMPEG4V3,
03819 sizeof(MpegEncContext),
03820 MPV_encode_init,
03821 MPV_encode_picture,
03822 MPV_encode_end,
03823 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03824 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
03825 };
03826
03827 AVCodec wmv1_encoder = {
03828 "wmv1",
03829 CODEC_TYPE_VIDEO,
03830 CODEC_ID_WMV1,
03831 sizeof(MpegEncContext),
03832 MPV_encode_init,
03833 MPV_encode_picture,
03834 MPV_encode_end,
03835 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
03836 .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
03837 };