00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 
00018 
00019 
00020 
00021 
00022 
00023 
00024 
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043 
00044 
00045 
00046 
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048                                    DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050                                    DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052                                    DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054                                    DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056                                    DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058                                   DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060                                   DCTELEM *block, int n, int qscale);
00061 
00062 
00063 
00064 
00065 
00066 
00067 
00068 
00069 static const uint8_t ff_default_chroma_qscale_table[32]={
00070 
00071     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00072 };
00073 
00074 const uint8_t ff_mpeg1_dc_scale_table[128]={
00075 
00076     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 };
00081 
00082 static const uint8_t mpeg2_dc_scale_table1[128]={
00083 
00084     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00086     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00087     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00088 };
00089 
00090 static const uint8_t mpeg2_dc_scale_table2[128]={
00091 
00092     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00093     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00094     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00095     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00096 };
00097 
00098 static const uint8_t mpeg2_dc_scale_table3[128]={
00099 
00100     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00101     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00102     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00103     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00104 };
00105 
00106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
00107     ff_mpeg1_dc_scale_table,
00108     mpeg2_dc_scale_table1,
00109     mpeg2_dc_scale_table2,
00110     mpeg2_dc_scale_table3,
00111 };
00112 
00113 const enum PixelFormat ff_pixfmt_list_420[] = {
00114     PIX_FMT_YUV420P,
00115     PIX_FMT_NONE
00116 };
00117 
00118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00119     PIX_FMT_DXVA2_VLD,
00120     PIX_FMT_VAAPI_VLD,
00121     PIX_FMT_YUV420P,
00122     PIX_FMT_NONE
00123 };
00124 
00125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00126     int i;
00127 
00128     assert(p<=end);
00129     if(p>=end)
00130         return end;
00131 
00132     for(i=0; i<3; i++){
00133         uint32_t tmp= *state << 8;
00134         *state= tmp + *(p++);
00135         if(tmp == 0x100 || p==end)
00136             return p;
00137     }
00138 
00139     while(p<end){
00140         if     (p[-1] > 1      ) p+= 3;
00141         else if(p[-2]          ) p+= 2;
00142         else if(p[-3]|(p[-1]-1)) p++;
00143         else{
00144             p++;
00145             break;
00146         }
00147     }
00148 
00149     p= FFMIN(p, end)-4;
00150     *state= AV_RB32(p);
00151 
00152     return p+4;
00153 }
00154 
00155 
00156 av_cold int ff_dct_common_init(MpegEncContext *s)
00157 {
00158     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00159     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00160     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00161     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00162     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00163     if(s->flags & CODEC_FLAG_BITEXACT)
00164         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00165     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00166 
00167 #if   HAVE_MMX
00168     MPV_common_init_mmx(s);
00169 #elif ARCH_ALPHA
00170     MPV_common_init_axp(s);
00171 #elif CONFIG_MLIB
00172     MPV_common_init_mlib(s);
00173 #elif HAVE_MMI
00174     MPV_common_init_mmi(s);
00175 #elif ARCH_ARM
00176     MPV_common_init_arm(s);
00177 #elif HAVE_ALTIVEC
00178     MPV_common_init_altivec(s);
00179 #elif ARCH_BFIN
00180     MPV_common_init_bfin(s);
00181 #endif
00182 
00183     
00184 
00185 
00186     if(s->alternate_scan){
00187         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00188         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00189     }else{
00190         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00191         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00192     }
00193     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00194     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00195 
00196     return 0;
00197 }
00198 
00199 void ff_copy_picture(Picture *dst, Picture *src){
00200     *dst = *src;
00201     dst->type= FF_BUFFER_TYPE_COPY;
00202 }
00203 
00207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00208 {
00209     ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
00210     av_freep(&pic->hwaccel_picture_private);
00211 }
00212 
00216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00217 {
00218     int r;
00219 
00220     if (s->avctx->hwaccel) {
00221         assert(!pic->hwaccel_picture_private);
00222         if (s->avctx->hwaccel->priv_data_size) {
00223             pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00224             if (!pic->hwaccel_picture_private) {
00225                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00226                 return -1;
00227             }
00228         }
00229     }
00230 
00231     r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
00232 
00233     if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
00234         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00235         av_freep(&pic->hwaccel_picture_private);
00236         return -1;
00237     }
00238 
00239     if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
00240         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00241         free_frame_buffer(s, pic);
00242         return -1;
00243     }
00244 
00245     if (pic->linesize[1] != pic->linesize[2]) {
00246         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00247         free_frame_buffer(s, pic);
00248         return -1;
00249     }
00250 
00251     return 0;
00252 }
00253 
00258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00259     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; 
00260     const int mb_array_size= s->mb_stride*s->mb_height;
00261     const int b8_array_size= s->b8_stride*s->mb_height*2;
00262     const int b4_array_size= s->b4_stride*s->mb_height*4;
00263     int i;
00264     int r= -1;
00265 
00266     if(shared){
00267         assert(pic->data[0]);
00268         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00269         pic->type= FF_BUFFER_TYPE_SHARED;
00270     }else{
00271         assert(!pic->data[0]);
00272 
00273         if (alloc_frame_buffer(s, pic) < 0)
00274             return -1;
00275 
00276         s->linesize  = pic->linesize[0];
00277         s->uvlinesize= pic->linesize[1];
00278     }
00279 
00280     if(pic->qscale_table==NULL){
00281         if (s->encoding) {
00282             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var   , mb_array_size * sizeof(int16_t)  , fail)
00283             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t)  , fail)
00284             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean  , mb_array_size * sizeof(int8_t )  , fail)
00285         }
00286 
00287         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) 
00288         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t)  , fail)
00289         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
00290         pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00291         pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
00292         if(s->out_format == FMT_H264){
00293             for(i=0; i<2; i++){
00294                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t), fail)
00295                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00296                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00297             }
00298             pic->motion_subsample_log2= 2;
00299         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00300             for(i=0; i<2; i++){
00301                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
00302                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00303                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00304             }
00305             pic->motion_subsample_log2= 3;
00306         }
00307         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00308             FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
00309         }
00310         pic->qstride= s->mb_stride;
00311         FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
00312     }
00313 
00314     
00315 
00316     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00317     s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
00318     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
00319         pic->age= INT_MAX; 
00320     pic->owner2 = NULL;
00321 
00322     return 0;
00323 fail: 
00324     if(r>=0)
00325         free_frame_buffer(s, pic);
00326     return -1;
00327 }
00328 
00332 static void free_picture(MpegEncContext *s, Picture *pic){
00333     int i;
00334 
00335     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00336         free_frame_buffer(s, pic);
00337     }
00338 
00339     av_freep(&pic->mb_var);
00340     av_freep(&pic->mc_mb_var);
00341     av_freep(&pic->mb_mean);
00342     av_freep(&pic->mbskip_table);
00343     av_freep(&pic->qscale_table_base);
00344     av_freep(&pic->mb_type_base);
00345     av_freep(&pic->dct_coeff);
00346     av_freep(&pic->pan_scan);
00347     pic->mb_type= NULL;
00348     for(i=0; i<2; i++){
00349         av_freep(&pic->motion_val_base[i]);
00350         av_freep(&pic->ref_index[i]);
00351     }
00352 
00353     if(pic->type == FF_BUFFER_TYPE_SHARED){
00354         for(i=0; i<4; i++){
00355             pic->base[i]=
00356             pic->data[i]= NULL;
00357         }
00358         pic->type= 0;
00359     }
00360 }
00361 
00362 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00363     int y_size = s->b8_stride * (2 * s->mb_height + 1);
00364     int c_size = s->mb_stride * (s->mb_height + 1);
00365     int yc_size = y_size + 2 * c_size;
00366     int i;
00367 
00368     
00369     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2*2, fail); 
00370 
00371 
00372      
00373     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t), fail)
00374     s->me.temp=         s->me.scratchpad;
00375     s->rd_scratchpad=   s->me.scratchpad;
00376     s->b_scratchpad=    s->me.scratchpad;
00377     s->obmc_scratchpad= s->me.scratchpad + 16;
00378     if (s->encoding) {
00379         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map      , ME_MAP_SIZE*sizeof(uint32_t), fail)
00380         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
00381         if(s->avctx->noise_reduction){
00382             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
00383         }
00384     }
00385     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
00386     s->block= s->blocks[0];
00387 
00388     for(i=0;i<12;i++){
00389         s->pblocks[i] = &s->block[i];
00390     }
00391 
00392     if (s->out_format == FMT_H263) {
00393         
00394         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
00395         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00396         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00397         s->ac_val[2] = s->ac_val[1] + c_size;
00398     }
00399 
00400     return 0;
00401 fail:
00402     return -1; 
00403 }
00404 
00405 static void free_duplicate_context(MpegEncContext *s){
00406     if(s==NULL) return;
00407 
00408     av_freep(&s->edge_emu_buffer);
00409     av_freep(&s->me.scratchpad);
00410     s->me.temp=
00411     s->rd_scratchpad=
00412     s->b_scratchpad=
00413     s->obmc_scratchpad= NULL;
00414 
00415     av_freep(&s->dct_error_sum);
00416     av_freep(&s->me.map);
00417     av_freep(&s->me.score_map);
00418     av_freep(&s->blocks);
00419     av_freep(&s->ac_val_base);
00420     s->block= NULL;
00421 }
00422 
00423 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00424 #define COPY(a) bak->a= src->a
00425     COPY(edge_emu_buffer);
00426     COPY(me.scratchpad);
00427     COPY(me.temp);
00428     COPY(rd_scratchpad);
00429     COPY(b_scratchpad);
00430     COPY(obmc_scratchpad);
00431     COPY(me.map);
00432     COPY(me.score_map);
00433     COPY(blocks);
00434     COPY(block);
00435     COPY(start_mb_y);
00436     COPY(end_mb_y);
00437     COPY(me.map_generation);
00438     COPY(pb);
00439     COPY(dct_error_sum);
00440     COPY(dct_count[0]);
00441     COPY(dct_count[1]);
00442     COPY(ac_val_base);
00443     COPY(ac_val[0]);
00444     COPY(ac_val[1]);
00445     COPY(ac_val[2]);
00446 #undef COPY
00447 }
00448 
00449 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00450     MpegEncContext bak;
00451     int i;
00452     
00453 
00454     backup_duplicate_context(&bak, dst);
00455     memcpy(dst, src, sizeof(MpegEncContext));
00456     backup_duplicate_context(dst, &bak);
00457     for(i=0;i<12;i++){
00458         dst->pblocks[i] = &dst->block[i];
00459     }
00460 
00461 }
00462 
00463 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
00464 {
00465     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00466 
00467     if(dst == src || !s1->context_initialized) return 0;
00468 
00469     
00470     if(!s->context_initialized){
00471         memcpy(s, s1, sizeof(MpegEncContext));
00472 
00473         s->avctx                 = dst;
00474         s->picture_range_start  += MAX_PICTURE_COUNT;
00475         s->picture_range_end    += MAX_PICTURE_COUNT;
00476         s->bitstream_buffer      = NULL;
00477         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00478 
00479         MPV_common_init(s);
00480     }
00481 
00482     s->avctx->coded_height  = s1->avctx->coded_height;
00483     s->avctx->coded_width   = s1->avctx->coded_width;
00484     s->avctx->width         = s1->avctx->width;
00485     s->avctx->height        = s1->avctx->height;
00486 
00487     s->coded_picture_number = s1->coded_picture_number;
00488     s->picture_number       = s1->picture_number;
00489     s->input_picture_number = s1->input_picture_number;
00490 
00491     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00492     memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
00493 
00494     s->last_picture_ptr     = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
00495     s->current_picture_ptr  = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00496     s->next_picture_ptr     = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
00497 
00498     memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
00499 
00500     
00501     s->next_p_frame_damaged = s1->next_p_frame_damaged;
00502     s->workaround_bugs      = s1->workaround_bugs;
00503 
00504     
00505     memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
00506 
00507     
00508     s->max_b_frames         = s1->max_b_frames;
00509     s->low_delay            = s1->low_delay;
00510     s->dropable             = s1->dropable;
00511 
00512     
00513     s->divx_packed          = s1->divx_packed;
00514 
00515     if(s1->bitstream_buffer){
00516         if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00517             av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
00518         s->bitstream_buffer_size  = s1->bitstream_buffer_size;
00519         memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
00520         memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
00521     }
00522 
00523     
00524     memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
00525 
00526     if(!s1->first_field){
00527         s->last_pict_type= s1->pict_type;
00528         if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
00529 
00530         if(s1->pict_type!=FF_B_TYPE){
00531             s->last_non_b_pict_type= s1->pict_type;
00532         }
00533     }
00534 
00535     return 0;
00536 }
00537 
00542 void MPV_common_defaults(MpegEncContext *s){
00543     s->y_dc_scale_table=
00544     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00545     s->chroma_qscale_table= ff_default_chroma_qscale_table;
00546     s->progressive_frame= 1;
00547     s->progressive_sequence= 1;
00548     s->picture_structure= PICT_FRAME;
00549 
00550     s->coded_picture_number = 0;
00551     s->picture_number = 0;
00552     s->input_picture_number = 0;
00553 
00554     s->picture_in_gop_number = 0;
00555 
00556     s->f_code = 1;
00557     s->b_code = 1;
00558 
00559     s->picture_range_start = 0;
00560     s->picture_range_end = MAX_PICTURE_COUNT;
00561 }
00562 
00567 void MPV_decode_defaults(MpegEncContext *s){
00568     MPV_common_defaults(s);
00569 }
00570 
00575 av_cold int MPV_common_init(MpegEncContext *s)
00576 {
00577     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00578 
00579     if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00580         s->mb_height = (s->height + 31) / 32 * 2;
00581     else if (s->codec_id != CODEC_ID_H264)
00582         s->mb_height = (s->height + 15) / 16;
00583 
00584     if(s->avctx->pix_fmt == PIX_FMT_NONE){
00585         av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00586         return -1;
00587     }
00588 
00589     if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
00590        (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
00591         av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00592         return -1;
00593     }
00594 
00595     if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
00596         return -1;
00597 
00598     dsputil_init(&s->dsp, s->avctx);
00599     ff_dct_common_init(s);
00600 
00601     s->flags= s->avctx->flags;
00602     s->flags2= s->avctx->flags2;
00603 
00604     s->mb_width  = (s->width  + 15) / 16;
00605     s->mb_stride = s->mb_width + 1;
00606     s->b8_stride = s->mb_width*2 + 1;
00607     s->b4_stride = s->mb_width*4 + 1;
00608     mb_array_size= s->mb_height * s->mb_stride;
00609     mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00610 
00611     
00612     avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00613                                                     &(s->chroma_y_shift) );
00614 
00615     
00616     s->h_edge_pos= s->mb_width*16;
00617     s->v_edge_pos= s->mb_height*16;
00618 
00619     s->mb_num = s->mb_width * s->mb_height;
00620 
00621     s->block_wrap[0]=
00622     s->block_wrap[1]=
00623     s->block_wrap[2]=
00624     s->block_wrap[3]= s->b8_stride;
00625     s->block_wrap[4]=
00626     s->block_wrap[5]= s->mb_stride;
00627 
00628     y_size = s->b8_stride * (2 * s->mb_height + 1);
00629     c_size = s->mb_stride * (s->mb_height + 1);
00630     yc_size = y_size + 2 * c_size;
00631 
00632     
00633     s->codec_tag = ff_toupper4(s->avctx->codec_tag);
00634 
00635     s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
00636 
00637     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00638 
00639     FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) 
00640     for(y=0; y<s->mb_height; y++){
00641         for(x=0; x<s->mb_width; x++){
00642             s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00643         }
00644     }
00645     s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; 
00646 
00647     if (s->encoding) {
00648         
00649         FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
00650         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00651         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00652         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00653         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00654         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
00655         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
00656         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
00657         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
00658         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00659         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00660         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
00661 
00662         if(s->msmpeg4_version){
00663             FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00664         }
00665         FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00666 
00667         
00668         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) 
00669 
00670         FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00671 
00672         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
00673         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
00674         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00675         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00676         FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00677         FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00678 
00679         if(s->avctx->noise_reduction){
00680             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00681         }
00682     }
00683 
00684     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00685     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
00686     for(i = 0; i < s->picture_count; i++) {
00687         avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
00688     }
00689 
00690     FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00691 
00692     if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00693         
00694             for(i=0; i<2; i++){
00695                 int j, k;
00696                 for(j=0; j<2; j++){
00697                     for(k=0; k<2; k++){
00698                         FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00699                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00700                     }
00701                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00702                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00703                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
00704                 }
00705                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00706             }
00707     }
00708     if (s->out_format == FMT_H263) {
00709         
00710         FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00711         s->coded_block= s->coded_block_base + s->b8_stride + 1;
00712 
00713         
00714         FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
00715         FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
00716     }
00717 
00718     if (s->h263_pred || s->h263_plus || !s->encoding) {
00719         
00720         
00721         FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00722         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00723         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00724         s->dc_val[2] = s->dc_val[1] + c_size;
00725         for(i=0;i<yc_size;i++)
00726             s->dc_val_base[i] = 1024;
00727     }
00728 
00729     
00730     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00731     memset(s->mbintra_table, 1, mb_array_size);
00732 
00733     
00734     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
00735     
00736     FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00737 
00738     s->parse_context.state= -1;
00739     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00740        s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00741        s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00742        s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00743     }
00744 
00745     s->context_initialized = 1;
00746     s->thread_context[0]= s;
00747 
00748     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00749         threads = s->avctx->thread_count;
00750 
00751         for(i=1; i<threads; i++){
00752             s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00753             memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00754         }
00755 
00756         for(i=0; i<threads; i++){
00757             if(init_duplicate_context(s->thread_context[i], s) < 0)
00758                 goto fail;
00759             s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00760             s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00761         }
00762     } else {
00763         if(init_duplicate_context(s, s) < 0) goto fail;
00764         s->start_mb_y = 0;
00765         s->end_mb_y   = s->mb_height;
00766 
00767     }
00768 
00769     return 0;
00770  fail:
00771     MPV_common_end(s);
00772     return -1;
00773 }
00774 
00775 
00776 void MPV_common_end(MpegEncContext *s)
00777 {
00778     int i, j, k;
00779 
00780     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00781         for(i=0; i<s->avctx->thread_count; i++){
00782             free_duplicate_context(s->thread_context[i]);
00783         }
00784         for(i=1; i<s->avctx->thread_count; i++){
00785             av_freep(&s->thread_context[i]);
00786         }
00787     } else free_duplicate_context(s);
00788 
00789     av_freep(&s->parse_context.buffer);
00790     s->parse_context.buffer_size=0;
00791 
00792     av_freep(&s->mb_type);
00793     av_freep(&s->p_mv_table_base);
00794     av_freep(&s->b_forw_mv_table_base);
00795     av_freep(&s->b_back_mv_table_base);
00796     av_freep(&s->b_bidir_forw_mv_table_base);
00797     av_freep(&s->b_bidir_back_mv_table_base);
00798     av_freep(&s->b_direct_mv_table_base);
00799     s->p_mv_table= NULL;
00800     s->b_forw_mv_table= NULL;
00801     s->b_back_mv_table= NULL;
00802     s->b_bidir_forw_mv_table= NULL;
00803     s->b_bidir_back_mv_table= NULL;
00804     s->b_direct_mv_table= NULL;
00805     for(i=0; i<2; i++){
00806         for(j=0; j<2; j++){
00807             for(k=0; k<2; k++){
00808                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00809                 s->b_field_mv_table[i][j][k]=NULL;
00810             }
00811             av_freep(&s->b_field_select_table[i][j]);
00812             av_freep(&s->p_field_mv_table_base[i][j]);
00813             s->p_field_mv_table[i][j]=NULL;
00814         }
00815         av_freep(&s->p_field_select_table[i]);
00816     }
00817 
00818     av_freep(&s->dc_val_base);
00819     av_freep(&s->coded_block_base);
00820     av_freep(&s->mbintra_table);
00821     av_freep(&s->cbp_table);
00822     av_freep(&s->pred_dir_table);
00823 
00824     av_freep(&s->mbskip_table);
00825     av_freep(&s->prev_pict_types);
00826     av_freep(&s->bitstream_buffer);
00827     s->allocated_bitstream_buffer_size=0;
00828 
00829     av_freep(&s->avctx->stats_out);
00830     av_freep(&s->ac_stats);
00831     av_freep(&s->error_status_table);
00832     av_freep(&s->mb_index2xy);
00833     av_freep(&s->lambda_table);
00834     av_freep(&s->q_intra_matrix);
00835     av_freep(&s->q_inter_matrix);
00836     av_freep(&s->q_intra_matrix16);
00837     av_freep(&s->q_inter_matrix16);
00838     av_freep(&s->input_picture);
00839     av_freep(&s->reordered_input_picture);
00840     av_freep(&s->dct_offset);
00841 
00842     if(s->picture && !s->avctx->is_copy){
00843         for(i=0; i<s->picture_count; i++){
00844             free_picture(s, &s->picture[i]);
00845         }
00846     }
00847     av_freep(&s->picture);
00848     s->context_initialized = 0;
00849     s->last_picture_ptr=
00850     s->next_picture_ptr=
00851     s->current_picture_ptr= NULL;
00852     s->linesize= s->uvlinesize= 0;
00853 
00854     for(i=0; i<3; i++)
00855         av_freep(&s->visualization_buffer[i]);
00856 
00857     if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
00858         avcodec_default_free_buffers(s->avctx);
00859 }
00860 
00861 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00862 {
00863     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00864     uint8_t index_run[MAX_RUN+1];
00865     int last, run, level, start, end, i;
00866 
00867     
00868     if(static_store && rl->max_level[0])
00869         return;
00870 
00871     
00872     for(last=0;last<2;last++) {
00873         if (last == 0) {
00874             start = 0;
00875             end = rl->last;
00876         } else {
00877             start = rl->last;
00878             end = rl->n;
00879         }
00880 
00881         memset(max_level, 0, MAX_RUN + 1);
00882         memset(max_run, 0, MAX_LEVEL + 1);
00883         memset(index_run, rl->n, MAX_RUN + 1);
00884         for(i=start;i<end;i++) {
00885             run = rl->table_run[i];
00886             level = rl->table_level[i];
00887             if (index_run[run] == rl->n)
00888                 index_run[run] = i;
00889             if (level > max_level[run])
00890                 max_level[run] = level;
00891             if (run > max_run[level])
00892                 max_run[level] = run;
00893         }
00894         if(static_store)
00895             rl->max_level[last] = static_store[last];
00896         else
00897             rl->max_level[last] = av_malloc(MAX_RUN + 1);
00898         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00899         if(static_store)
00900             rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00901         else
00902             rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00903         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00904         if(static_store)
00905             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00906         else
00907             rl->index_run[last] = av_malloc(MAX_RUN + 1);
00908         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00909     }
00910 }
00911 
00912 void init_vlc_rl(RLTable *rl)
00913 {
00914     int i, q;
00915 
00916     for(q=0; q<32; q++){
00917         int qmul= q*2;
00918         int qadd= (q-1)|1;
00919 
00920         if(q==0){
00921             qmul=1;
00922             qadd=0;
00923         }
00924         for(i=0; i<rl->vlc.table_size; i++){
00925             int code= rl->vlc.table[i][0];
00926             int len = rl->vlc.table[i][1];
00927             int level, run;
00928 
00929             if(len==0){ 
00930                 run= 66;
00931                 level= MAX_LEVEL;
00932             }else if(len<0){ 
00933                 run= 0;
00934                 level= code;
00935             }else{
00936                 if(code==rl->n){ 
00937                     run= 66;
00938                     level= 0;
00939                 }else{
00940                     run=   rl->table_run  [code] + 1;
00941                     level= rl->table_level[code] * qmul + qadd;
00942                     if(code >= rl->last) run+=192;
00943                 }
00944             }
00945             rl->rl_vlc[q][i].len= len;
00946             rl->rl_vlc[q][i].level= level;
00947             rl->rl_vlc[q][i].run= run;
00948         }
00949     }
00950 }
00951 
00952 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
00953 {
00954     int i;
00955 
00956     
00957     for(i=0; i<s->picture_count; i++){
00958         if(s->picture[i].data[0] && !s->picture[i].reference
00959            && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
00960            && (remove_current || &s->picture[i] != s->current_picture_ptr)
00961            ){
00962             free_frame_buffer(s, &s->picture[i]);
00963         }
00964     }
00965 }
00966 
00967 int ff_find_unused_picture(MpegEncContext *s, int shared){
00968     int i;
00969 
00970     if(shared){
00971         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00972             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00973         }
00974     }else{
00975         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00976             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; 
00977         }
00978         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00979             if(s->picture[i].data[0]==NULL) return i;
00980         }
00981     }
00982 
00983     av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00984     
00985 
00986 
00987 
00988 
00989 
00990 
00991 
00992 
00993 
00994 
00995     abort();
00996     return -1;
00997 }
00998 
00999 static void update_noise_reduction(MpegEncContext *s){
01000     int intra, i;
01001 
01002     for(intra=0; intra<2; intra++){
01003         if(s->dct_count[intra] > (1<<16)){
01004             for(i=0; i<64; i++){
01005                 s->dct_error_sum[intra][i] >>=1;
01006             }
01007             s->dct_count[intra] >>= 1;
01008         }
01009 
01010         for(i=0; i<64; i++){
01011             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
01012         }
01013     }
01014 }
01015 
01019 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01020 {
01021     int i;
01022     Picture *pic;
01023     s->mb_skipped = 0;
01024 
01025     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
01026 
01027     
01028     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
01029       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
01030           free_frame_buffer(s, s->last_picture_ptr);
01031 
01032         
01033         
01034         if(!s->encoding){
01035             for(i=0; i<s->picture_count; i++){
01036                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
01037                     av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
01038                     free_frame_buffer(s, &s->picture[i]);
01039                 }
01040             }
01041         }
01042       }
01043     }
01044 
01045     if(!s->encoding){
01046         ff_release_unused_pictures(s, 1);
01047 
01048         if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
01049             pic= s->current_picture_ptr; 
01050         else{
01051             i= ff_find_unused_picture(s, 0);
01052             pic= &s->picture[i];
01053         }
01054 
01055         pic->reference= 0;
01056         if (!s->dropable){
01057             if (s->codec_id == CODEC_ID_H264)
01058                 pic->reference = s->picture_structure;
01059             else if (s->pict_type != AV_PICTURE_TYPE_B)
01060                 pic->reference = 3;
01061         }
01062 
01063         pic->coded_picture_number= s->coded_picture_number++;
01064 
01065         if(ff_alloc_picture(s, pic, 0) < 0)
01066             return -1;
01067 
01068         s->current_picture_ptr= pic;
01069         
01070         s->current_picture_ptr->top_field_first= s->top_field_first;
01071         if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01072             if(s->picture_structure != PICT_FRAME)
01073                 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01074         }
01075         s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
01076         s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
01077     }
01078 
01079     s->current_picture_ptr->pict_type= s->pict_type;
01080 
01081   
01082     s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
01083 
01084     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01085 
01086     if (s->pict_type != AV_PICTURE_TYPE_B) {
01087         s->last_picture_ptr= s->next_picture_ptr;
01088         if(!s->dropable)
01089             s->next_picture_ptr= s->current_picture_ptr;
01090     }
01091 
01092 
01093 
01094 
01095 
01096 
01097     if(s->codec_id != CODEC_ID_H264){
01098         if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
01099            (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
01100             if (s->pict_type != AV_PICTURE_TYPE_I)
01101                 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
01102             else if (s->picture_structure != PICT_FRAME)
01103                 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
01104 
01105             
01106             i= ff_find_unused_picture(s, 0);
01107             s->last_picture_ptr= &s->picture[i];
01108             if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01109                 return -1;
01110             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
01111             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
01112         }
01113         if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
01114             
01115             i= ff_find_unused_picture(s, 0);
01116             s->next_picture_ptr= &s->picture[i];
01117             if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01118                 return -1;
01119             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
01120             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
01121         }
01122     }
01123 
01124     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01125     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01126 
01127     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
01128 
01129     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01130         int i;
01131         for(i=0; i<4; i++){
01132             if(s->picture_structure == PICT_BOTTOM_FIELD){
01133                  s->current_picture.data[i] += s->current_picture.linesize[i];
01134             }
01135             s->current_picture.linesize[i] *= 2;
01136             s->last_picture.linesize[i] *=2;
01137             s->next_picture.linesize[i] *=2;
01138         }
01139     }
01140 
01141     s->error_recognition= avctx->error_recognition;
01142 
01143     
01144 
01145     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01146         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01147         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01148     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01149         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01150         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01151     }else{
01152         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01153         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01154     }
01155 
01156     if(s->dct_error_sum){
01157         assert(s->avctx->noise_reduction && s->encoding);
01158 
01159         update_noise_reduction(s);
01160     }
01161 
01162     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01163         return ff_xvmc_field_start(s, avctx);
01164 
01165     return 0;
01166 }
01167 
01168 
01169 void MPV_frame_end(MpegEncContext *s)
01170 {
01171     int i;
01172     
01173     
01174     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01175         ff_xvmc_field_end(s);
01176    }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
01177        && !s->avctx->hwaccel
01178        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01179        && s->unrestricted_mv
01180        && s->current_picture.reference
01181        && !s->intra_only
01182        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01183             int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01184             int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01185             s->dsp.draw_edges(s->current_picture.data[0], s->linesize  ,
01186                               s->h_edge_pos             , s->v_edge_pos,
01187                               EDGE_WIDTH        , EDGE_WIDTH        , EDGE_TOP | EDGE_BOTTOM);
01188             s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
01189                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01190                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01191             s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
01192                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01193                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01194     }
01195 
01196     emms_c();
01197 
01198     s->last_pict_type    = s->pict_type;
01199     s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01200     if(s->pict_type!=AV_PICTURE_TYPE_B){
01201         s->last_non_b_pict_type= s->pict_type;
01202     }
01203 #if 0
01204         
01205     for(i=0; i<MAX_PICTURE_COUNT; i++){
01206         if(s->picture[i].data[0] == s->current_picture.data[0]){
01207             s->picture[i]= s->current_picture;
01208             break;
01209         }
01210     }
01211     assert(i<MAX_PICTURE_COUNT);
01212 #endif
01213 
01214     if(s->encoding){
01215         
01216         for(i=0; i<s->picture_count; i++){
01217             if(s->picture[i].data[0] && !s->picture[i].reference ){
01218                 free_frame_buffer(s, &s->picture[i]);
01219             }
01220         }
01221     }
01222     
01223 #if 0
01224     memset(&s->last_picture, 0, sizeof(Picture));
01225     memset(&s->next_picture, 0, sizeof(Picture));
01226     memset(&s->current_picture, 0, sizeof(Picture));
01227 #endif
01228     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01229 
01230     if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
01231         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
01232     }
01233 }
01234 
01242 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01243     int x, y, fr, f;
01244 
01245     sx= av_clip(sx, 0, w-1);
01246     sy= av_clip(sy, 0, h-1);
01247     ex= av_clip(ex, 0, w-1);
01248     ey= av_clip(ey, 0, h-1);
01249 
01250     buf[sy*stride + sx]+= color;
01251 
01252     if(FFABS(ex - sx) > FFABS(ey - sy)){
01253         if(sx > ex){
01254             FFSWAP(int, sx, ex);
01255             FFSWAP(int, sy, ey);
01256         }
01257         buf+= sx + sy*stride;
01258         ex-= sx;
01259         f= ((ey-sy)<<16)/ex;
01260         for(x= 0; x <= ex; x++){
01261             y = (x*f)>>16;
01262             fr= (x*f)&0xFFFF;
01263             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
01264             buf[(y+1)*stride + x]+= (color*         fr )>>16;
01265         }
01266     }else{
01267         if(sy > ey){
01268             FFSWAP(int, sx, ex);
01269             FFSWAP(int, sy, ey);
01270         }
01271         buf+= sx + sy*stride;
01272         ey-= sy;
01273         if(ey) f= ((ex-sx)<<16)/ey;
01274         else   f= 0;
01275         for(y= 0; y <= ey; y++){
01276             x = (y*f)>>16;
01277             fr= (y*f)&0xFFFF;
01278             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
01279             buf[y*stride + x+1]+= (color*         fr )>>16;
01280         }
01281     }
01282 }
01283 
01291 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01292     int dx,dy;
01293 
01294     sx= av_clip(sx, -100, w+100);
01295     sy= av_clip(sy, -100, h+100);
01296     ex= av_clip(ex, -100, w+100);
01297     ey= av_clip(ey, -100, h+100);
01298 
01299     dx= ex - sx;
01300     dy= ey - sy;
01301 
01302     if(dx*dx + dy*dy > 3*3){
01303         int rx=  dx + dy;
01304         int ry= -dx + dy;
01305         int length= ff_sqrt((rx*rx + ry*ry)<<8);
01306 
01307         
01308         rx= ROUNDED_DIV(rx*3<<4, length);
01309         ry= ROUNDED_DIV(ry*3<<4, length);
01310 
01311         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01312         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01313     }
01314     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01315 }
01316 
01320 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01321 
01322     if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01323 
01324     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01325         int x,y;
01326 
01327         av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01328                av_get_picture_type_char(pict->pict_type));
01329         for(y=0; y<s->mb_height; y++){
01330             for(x=0; x<s->mb_width; x++){
01331                 if(s->avctx->debug&FF_DEBUG_SKIP){
01332                     int count= s->mbskip_table[x + y*s->mb_stride];
01333                     if(count>9) count=9;
01334                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01335                 }
01336                 if(s->avctx->debug&FF_DEBUG_QP){
01337                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01338                 }
01339                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01340                     int mb_type= pict->mb_type[x + y*s->mb_stride];
01341                     
01342                     if(IS_PCM(mb_type))
01343                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01344                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01345                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01346                     else if(IS_INTRA4x4(mb_type))
01347                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01348                     else if(IS_INTRA16x16(mb_type))
01349                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01350                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01351                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01352                     else if(IS_DIRECT(mb_type))
01353                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01354                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01355                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01356                     else if(IS_GMC(mb_type))
01357                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01358                     else if(IS_SKIP(mb_type))
01359                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01360                     else if(!USES_LIST(mb_type, 1))
01361                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01362                     else if(!USES_LIST(mb_type, 0))
01363                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01364                     else{
01365                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01366                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01367                     }
01368 
01369                     
01370                     if(IS_8X8(mb_type))
01371                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01372                     else if(IS_16X8(mb_type))
01373                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01374                     else if(IS_8X16(mb_type))
01375                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01376                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01377                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01378                     else
01379                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01380 
01381 
01382                     if(IS_INTERLACED(mb_type))
01383                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01384                     else
01385                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01386                 }
01387 
01388             }
01389             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01390         }
01391     }
01392 
01393     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01394         const int shift= 1 + s->quarter_sample;
01395         int mb_y;
01396         uint8_t *ptr;
01397         int i;
01398         int h_chroma_shift, v_chroma_shift, block_height;
01399         const int width = s->avctx->width;
01400         const int height= s->avctx->height;
01401         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01402         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01403         s->low_delay=0; 
01404 
01405         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01406         for(i=0; i<3; i++){
01407             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01408             pict->data[i]= s->visualization_buffer[i];
01409         }
01410         pict->type= FF_BUFFER_TYPE_COPY;
01411         pict->opaque= NULL;
01412         ptr= pict->data[0];
01413         block_height = 16>>v_chroma_shift;
01414 
01415         for(mb_y=0; mb_y<s->mb_height; mb_y++){
01416             int mb_x;
01417             for(mb_x=0; mb_x<s->mb_width; mb_x++){
01418                 const int mb_index= mb_x + mb_y*s->mb_stride;
01419                 if((s->avctx->debug_mv) && pict->motion_val){
01420                   int type;
01421                   for(type=0; type<3; type++){
01422                     int direction = 0;
01423                     switch (type) {
01424                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
01425                                 continue;
01426                               direction = 0;
01427                               break;
01428                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01429                                 continue;
01430                               direction = 0;
01431                               break;
01432                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01433                                 continue;
01434                               direction = 1;
01435                               break;
01436                     }
01437                     if(!USES_LIST(pict->mb_type[mb_index], direction))
01438                         continue;
01439 
01440                     if(IS_8X8(pict->mb_type[mb_index])){
01441                       int i;
01442                       for(i=0; i<4; i++){
01443                         int sx= mb_x*16 + 4 + 8*(i&1);
01444                         int sy= mb_y*16 + 4 + 8*(i>>1);
01445                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01446                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01447                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01448                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01449                       }
01450                     }else if(IS_16X8(pict->mb_type[mb_index])){
01451                       int i;
01452                       for(i=0; i<2; i++){
01453                         int sx=mb_x*16 + 8;
01454                         int sy=mb_y*16 + 4 + 8*i;
01455                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01456                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01457                         int my=(pict->motion_val[direction][xy][1]>>shift);
01458 
01459                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01460                             my*=2;
01461 
01462                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01463                       }
01464                     }else if(IS_8X16(pict->mb_type[mb_index])){
01465                       int i;
01466                       for(i=0; i<2; i++){
01467                         int sx=mb_x*16 + 4 + 8*i;
01468                         int sy=mb_y*16 + 8;
01469                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01470                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01471                         int my=(pict->motion_val[direction][xy][1]>>shift);
01472 
01473                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01474                             my*=2;
01475 
01476                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01477                       }
01478                     }else{
01479                       int sx= mb_x*16 + 8;
01480                       int sy= mb_y*16 + 8;
01481                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01482                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01483                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01484                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01485                     }
01486                   }
01487                 }
01488                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01489                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01490                     int y;
01491                     for(y=0; y<block_height; y++){
01492                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01493                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01494                     }
01495                 }
01496                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01497                     int mb_type= pict->mb_type[mb_index];
01498                     uint64_t u,v;
01499                     int y;
01500 #define COLOR(theta, r)\
01501 u= (int)(128 + r*cos(theta*3.141592/180));\
01502 v= (int)(128 + r*sin(theta*3.141592/180));
01503 
01504 
01505                     u=v=128;
01506                     if(IS_PCM(mb_type)){
01507                         COLOR(120,48)
01508                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01509                         COLOR(30,48)
01510                     }else if(IS_INTRA4x4(mb_type)){
01511                         COLOR(90,48)
01512                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01513 
01514                     }else if(IS_DIRECT(mb_type)){
01515                         COLOR(150,48)
01516                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01517                         COLOR(170,48)
01518                     }else if(IS_GMC(mb_type)){
01519                         COLOR(190,48)
01520                     }else if(IS_SKIP(mb_type)){
01521 
01522                     }else if(!USES_LIST(mb_type, 1)){
01523                         COLOR(240,48)
01524                     }else if(!USES_LIST(mb_type, 0)){
01525                         COLOR(0,48)
01526                     }else{
01527                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01528                         COLOR(300,48)
01529                     }
01530 
01531                     u*= 0x0101010101010101ULL;
01532                     v*= 0x0101010101010101ULL;
01533                     for(y=0; y<block_height; y++){
01534                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01535                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01536                     }
01537 
01538                     
01539                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01540                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01541                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01542                     }
01543                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01544                         for(y=0; y<16; y++)
01545                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01546                     }
01547                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01548                         int dm= 1 << (mv_sample_log2-2);
01549                         for(i=0; i<4; i++){
01550                             int sx= mb_x*16 + 8*(i&1);
01551                             int sy= mb_y*16 + 8*(i>>1);
01552                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01553                             
01554                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01555                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01556                                 for(y=0; y<8; y++)
01557                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01558                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01559                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01560                         }
01561                     }
01562 
01563                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01564                         
01565                     }
01566                 }
01567                 s->mbskip_table[mb_index]=0;
01568             }
01569         }
01570     }
01571 }
01572 
01573 static inline int hpel_motion_lowres(MpegEncContext *s,
01574                                   uint8_t *dest, uint8_t *src,
01575                                   int field_based, int field_select,
01576                                   int src_x, int src_y,
01577                                   int width, int height, int stride,
01578                                   int h_edge_pos, int v_edge_pos,
01579                                   int w, int h, h264_chroma_mc_func *pix_op,
01580                                   int motion_x, int motion_y)
01581 {
01582     const int lowres= s->avctx->lowres;
01583     const int op_index= FFMIN(lowres, 2);
01584     const int s_mask= (2<<lowres)-1;
01585     int emu=0;
01586     int sx, sy;
01587 
01588     if(s->quarter_sample){
01589         motion_x/=2;
01590         motion_y/=2;
01591     }
01592 
01593     sx= motion_x & s_mask;
01594     sy= motion_y & s_mask;
01595     src_x += motion_x >> (lowres+1);
01596     src_y += motion_y >> (lowres+1);
01597 
01598     src += src_y * stride + src_x;
01599 
01600     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
01601        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01602         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01603                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01604         src= s->edge_emu_buffer;
01605         emu=1;
01606     }
01607 
01608     sx= (sx << 2) >> lowres;
01609     sy= (sy << 2) >> lowres;
01610     if(field_select)
01611         src += s->linesize;
01612     pix_op[op_index](dest, src, stride, h, sx, sy);
01613     return emu;
01614 }
01615 
01616 
01617 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01618                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01619                                int field_based, int bottom_field, int field_select,
01620                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01621                                int motion_x, int motion_y, int h, int mb_y)
01622 {
01623     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01624     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01625     const int lowres= s->avctx->lowres;
01626     const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
01627     const int block_s= 8>>lowres;
01628     const int s_mask= (2<<lowres)-1;
01629     const int h_edge_pos = s->h_edge_pos >> lowres;
01630     const int v_edge_pos = s->v_edge_pos >> lowres;
01631     linesize   = s->current_picture.linesize[0] << field_based;
01632     uvlinesize = s->current_picture.linesize[1] << field_based;
01633 
01634     if(s->quarter_sample){ 
01635         motion_x/=2;
01636         motion_y/=2;
01637     }
01638 
01639     if(field_based){
01640         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01641     }
01642 
01643     sx= motion_x & s_mask;
01644     sy= motion_y & s_mask;
01645     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
01646     src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01647 
01648     if (s->out_format == FMT_H263) {
01649         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01650         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01651         uvsrc_x = src_x>>1;
01652         uvsrc_y = src_y>>1;
01653     }else if(s->out_format == FMT_H261){
01654         mx = motion_x / 4;
01655         my = motion_y / 4;
01656         uvsx = (2*mx) & s_mask;
01657         uvsy = (2*my) & s_mask;
01658         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
01659         uvsrc_y =    mb_y*block_s               + (my >> lowres);
01660     } else {
01661         if(s->chroma_y_shift){
01662             mx = motion_x / 2;
01663             my = motion_y / 2;
01664             uvsx = mx & s_mask;
01665             uvsy = my & s_mask;
01666             uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01667             uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
01668         } else {
01669             if(s->chroma_x_shift){
01670             
01671                 mx = motion_x / 2;
01672                 uvsx = mx & s_mask;
01673                 uvsy = motion_y & s_mask;
01674                 uvsrc_y = src_y;
01675                 uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01676             } else {
01677             
01678                 uvsx = motion_x & s_mask;
01679                 uvsy = motion_y & s_mask;
01680                 uvsrc_x = src_x;
01681                 uvsrc_y = src_y;
01682             }
01683         }
01684     }
01685 
01686     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
01687     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01688     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01689 
01690     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
01691        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01692             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01693                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01694             ptr_y = s->edge_emu_buffer;
01695             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01696                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01697                 s->dsp.emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
01698                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01699                 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01700                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01701                 ptr_cb= uvbuf;
01702                 ptr_cr= uvbuf+16;
01703             }
01704     }
01705 
01706     if(bottom_field){ 
01707         dest_y += s->linesize;
01708         dest_cb+= s->uvlinesize;
01709         dest_cr+= s->uvlinesize;
01710     }
01711 
01712     if(field_select){
01713         ptr_y += s->linesize;
01714         ptr_cb+= s->uvlinesize;
01715         ptr_cr+= s->uvlinesize;
01716     }
01717 
01718     sx= (sx << 2) >> lowres;
01719     sy= (sy << 2) >> lowres;
01720     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01721 
01722     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01723         uvsx= (uvsx << 2) >> lowres;
01724         uvsy= (uvsy << 2) >> lowres;
01725         if(h >> s->chroma_y_shift){
01726             pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01727             pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01728         }
01729     }
01730     
01731 }
01732 
01733 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01734                                      uint8_t *dest_cb, uint8_t *dest_cr,
01735                                      uint8_t **ref_picture,
01736                                      h264_chroma_mc_func *pix_op,
01737                                      int mx, int my){
01738     const int lowres= s->avctx->lowres;
01739     const int op_index= FFMIN(lowres, 2);
01740     const int block_s= 8>>lowres;
01741     const int s_mask= (2<<lowres)-1;
01742     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01743     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01744     int emu=0, src_x, src_y, offset, sx, sy;
01745     uint8_t *ptr;
01746 
01747     if(s->quarter_sample){
01748         mx/=2;
01749         my/=2;
01750     }
01751 
01752     
01753 
01754     mx= ff_h263_round_chroma(mx);
01755     my= ff_h263_round_chroma(my);
01756 
01757     sx= mx & s_mask;
01758     sy= my & s_mask;
01759     src_x = s->mb_x*block_s + (mx >> (lowres+1));
01760     src_y = s->mb_y*block_s + (my >> (lowres+1));
01761 
01762     offset = src_y * s->uvlinesize + src_x;
01763     ptr = ref_picture[1] + offset;
01764     if(s->flags&CODEC_FLAG_EMU_EDGE){
01765         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01766            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01767             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01768             ptr= s->edge_emu_buffer;
01769             emu=1;
01770         }
01771     }
01772     sx= (sx << 2) >> lowres;
01773     sy= (sy << 2) >> lowres;
01774     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01775 
01776     ptr = ref_picture[2] + offset;
01777     if(emu){
01778         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01779         ptr= s->edge_emu_buffer;
01780     }
01781     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01782 }
01783 
01795 static inline void MPV_motion_lowres(MpegEncContext *s,
01796                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01797                               int dir, uint8_t **ref_picture,
01798                               h264_chroma_mc_func *pix_op)
01799 {
01800     int mx, my;
01801     int mb_x, mb_y, i;
01802     const int lowres= s->avctx->lowres;
01803     const int block_s= 8>>lowres;
01804 
01805     mb_x = s->mb_x;
01806     mb_y = s->mb_y;
01807 
01808     switch(s->mv_type) {
01809     case MV_TYPE_16X16:
01810         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01811                     0, 0, 0,
01812                     ref_picture, pix_op,
01813                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01814         break;
01815     case MV_TYPE_8X8:
01816         mx = 0;
01817         my = 0;
01818             for(i=0;i<4;i++) {
01819                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01820                             ref_picture[0], 0, 0,
01821                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01822                             s->width, s->height, s->linesize,
01823                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01824                             block_s, block_s, pix_op,
01825                             s->mv[dir][i][0], s->mv[dir][i][1]);
01826 
01827                 mx += s->mv[dir][i][0];
01828                 my += s->mv[dir][i][1];
01829             }
01830 
01831         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01832             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01833         break;
01834     case MV_TYPE_FIELD:
01835         if (s->picture_structure == PICT_FRAME) {
01836             
01837             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01838                         1, 0, s->field_select[dir][0],
01839                         ref_picture, pix_op,
01840                         s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01841             
01842             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01843                         1, 1, s->field_select[dir][1],
01844                         ref_picture, pix_op,
01845                         s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01846         } else {
01847             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
01848                 ref_picture= s->current_picture_ptr->data;
01849             }
01850 
01851             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01852                         0, 0, s->field_select[dir][0],
01853                         ref_picture, pix_op,
01854                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01855         }
01856         break;
01857     case MV_TYPE_16X8:
01858         for(i=0; i<2; i++){
01859             uint8_t ** ref2picture;
01860 
01861             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
01862                 ref2picture= ref_picture;
01863             }else{
01864                 ref2picture= s->current_picture_ptr->data;
01865             }
01866 
01867             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01868                         0, 0, s->field_select[dir][i],
01869                         ref2picture, pix_op,
01870                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01871 
01872             dest_y += 2*block_s*s->linesize;
01873             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01874             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01875         }
01876         break;
01877     case MV_TYPE_DMV:
01878         if(s->picture_structure == PICT_FRAME){
01879             for(i=0; i<2; i++){
01880                 int j;
01881                 for(j=0; j<2; j++){
01882                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01883                                 1, j, j^i,
01884                                 ref_picture, pix_op,
01885                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
01886                 }
01887                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01888             }
01889         }else{
01890             for(i=0; i<2; i++){
01891                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01892                             0, 0, s->picture_structure != i+1,
01893                             ref_picture, pix_op,
01894                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
01895 
01896                 
01897                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01898 
01899                 
01900                 if(!s->first_field){
01901                     ref_picture = s->current_picture_ptr->data;
01902                 }
01903             }
01904         }
01905     break;
01906     default: assert(0);
01907     }
01908 }
01909 
01913 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
01914 {
01915     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
01916     int my, off, i, mvs;
01917 
01918     if (s->picture_structure != PICT_FRAME) goto unhandled;
01919 
01920     switch (s->mv_type) {
01921         case MV_TYPE_16X16:
01922             mvs = 1;
01923             break;
01924         case MV_TYPE_16X8:
01925             mvs = 2;
01926             break;
01927         case MV_TYPE_8X8:
01928             mvs = 4;
01929             break;
01930         default:
01931             goto unhandled;
01932     }
01933 
01934     for (i = 0; i < mvs; i++) {
01935         my = s->mv[dir][i][1]<<qpel_shift;
01936         my_max = FFMAX(my_max, my);
01937         my_min = FFMIN(my_min, my);
01938     }
01939 
01940     off = (FFMAX(-my_min, my_max) + 63) >> 6;
01941 
01942     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
01943 unhandled:
01944     return s->mb_height-1;
01945 }
01946 
01947 
01948 static inline void put_dct(MpegEncContext *s,
01949                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01950 {
01951     s->dct_unquantize_intra(s, block, i, qscale);
01952     s->dsp.idct_put (dest, line_size, block);
01953 }
01954 
01955 
01956 static inline void add_dct(MpegEncContext *s,
01957                            DCTELEM *block, int i, uint8_t *dest, int line_size)
01958 {
01959     if (s->block_last_index[i] >= 0) {
01960         s->dsp.idct_add (dest, line_size, block);
01961     }
01962 }
01963 
01964 static inline void add_dequant_dct(MpegEncContext *s,
01965                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01966 {
01967     if (s->block_last_index[i] >= 0) {
01968         s->dct_unquantize_inter(s, block, i, qscale);
01969 
01970         s->dsp.idct_add (dest, line_size, block);
01971     }
01972 }
01973 
01977 void ff_clean_intra_table_entries(MpegEncContext *s)
01978 {
01979     int wrap = s->b8_stride;
01980     int xy = s->block_index[0];
01981 
01982     s->dc_val[0][xy           ] =
01983     s->dc_val[0][xy + 1       ] =
01984     s->dc_val[0][xy     + wrap] =
01985     s->dc_val[0][xy + 1 + wrap] = 1024;
01986     
01987     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
01988     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01989     if (s->msmpeg4_version>=3) {
01990         s->coded_block[xy           ] =
01991         s->coded_block[xy + 1       ] =
01992         s->coded_block[xy     + wrap] =
01993         s->coded_block[xy + 1 + wrap] = 0;
01994     }
01995     
01996     wrap = s->mb_stride;
01997     xy = s->mb_x + s->mb_y * wrap;
01998     s->dc_val[1][xy] =
01999     s->dc_val[2][xy] = 1024;
02000     
02001     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02002     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02003 
02004     s->mbintra_table[xy]= 0;
02005 }
02006 
02007 
02008 
02009 
02010 
02011 
02012 
02013 
02014 
02015 
02016 
02017 static av_always_inline
02018 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02019                             int lowres_flag, int is_mpeg12)
02020 {
02021     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02022     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02023         ff_xvmc_decode_mb(s);
02024         return;
02025     }
02026 
02027     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02028        
02029        int i,j;
02030        DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
02031        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02032        for(i=0; i<6; i++){
02033            for(j=0; j<64; j++){
02034                *dct++ = block[i][s->dsp.idct_permutation[j]];
02035                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02036            }
02037            av_log(s->avctx, AV_LOG_DEBUG, "\n");
02038        }
02039     }
02040 
02041     s->current_picture.qscale_table[mb_xy]= s->qscale;
02042 
02043     
02044     if (!s->mb_intra) {
02045         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02046             if(s->mbintra_table[mb_xy])
02047                 ff_clean_intra_table_entries(s);
02048         } else {
02049             s->last_dc[0] =
02050             s->last_dc[1] =
02051             s->last_dc[2] = 128 << s->intra_dc_precision;
02052         }
02053     }
02054     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02055         s->mbintra_table[mb_xy]=1;
02056 
02057     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { 
02058         uint8_t *dest_y, *dest_cb, *dest_cr;
02059         int dct_linesize, dct_offset;
02060         op_pixels_func (*op_pix)[4];
02061         qpel_mc_func (*op_qpix)[16];
02062         const int linesize= s->current_picture.linesize[0]; 
02063         const int uvlinesize= s->current_picture.linesize[1];
02064         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02065         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02066 
02067         
02068         
02069         if(!s->encoding){
02070             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02071             const int age= s->current_picture.age;
02072 
02073             assert(age);
02074 
02075             if (s->mb_skipped) {
02076                 s->mb_skipped= 0;
02077                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02078 
02079                 (*mbskip_ptr) ++; 
02080                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02081 
02082                 
02083                 if (*mbskip_ptr >= age && s->current_picture.reference){
02084                     return;
02085                 }
02086             } else if(!s->current_picture.reference){
02087                 (*mbskip_ptr) ++; 
02088                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02089             } else{
02090                 *mbskip_ptr = 0; 
02091             }
02092         }
02093 
02094         dct_linesize = linesize << s->interlaced_dct;
02095         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
02096 
02097         if(readable){
02098             dest_y=  s->dest[0];
02099             dest_cb= s->dest[1];
02100             dest_cr= s->dest[2];
02101         }else{
02102             dest_y = s->b_scratchpad;
02103             dest_cb= s->b_scratchpad+16*linesize;
02104             dest_cr= s->b_scratchpad+32*linesize;
02105         }
02106 
02107         if (!s->mb_intra) {
02108             
02109             
02110             if(!s->encoding){
02111 
02112                 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02113                     if (s->mv_dir & MV_DIR_FORWARD) {
02114                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02115                     }
02116                     if (s->mv_dir & MV_DIR_BACKWARD) {
02117                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02118                     }
02119                 }
02120 
02121                 if(lowres_flag){
02122                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02123 
02124                     if (s->mv_dir & MV_DIR_FORWARD) {
02125                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
02126                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02127                     }
02128                     if (s->mv_dir & MV_DIR_BACKWARD) {
02129                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
02130                     }
02131                 }else{
02132                     op_qpix= s->me.qpel_put;
02133                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02134                         op_pix = s->dsp.put_pixels_tab;
02135                     }else{
02136                         op_pix = s->dsp.put_no_rnd_pixels_tab;
02137                     }
02138                     if (s->mv_dir & MV_DIR_FORWARD) {
02139                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
02140                         op_pix = s->dsp.avg_pixels_tab;
02141                         op_qpix= s->me.qpel_avg;
02142                     }
02143                     if (s->mv_dir & MV_DIR_BACKWARD) {
02144                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
02145                     }
02146                 }
02147             }
02148 
02149             
02150             if(s->avctx->skip_idct){
02151                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02152                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02153                    || s->avctx->skip_idct >= AVDISCARD_ALL)
02154                     goto skip_idct;
02155             }
02156 
02157             
02158             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02159                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02160                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02161                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02162                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02163                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02164 
02165                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02166                     if (s->chroma_y_shift){
02167                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02168                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02169                     }else{
02170                         dct_linesize >>= 1;
02171                         dct_offset >>=1;
02172                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02173                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02174                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02175                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02176                     }
02177                 }
02178             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02179                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
02180                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
02181                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
02182                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02183 
02184                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02185                     if(s->chroma_y_shift){
02186                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
02187                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
02188                     }else{
02189                         
02190                         dct_linesize = uvlinesize << s->interlaced_dct;
02191                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
02192 
02193                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
02194                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
02195                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02196                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02197                         if(!s->chroma_x_shift){
02198                             add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02199                             add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02200                             add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02201                             add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02202                         }
02203                     }
02204                 }
02205             }
02206             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02207                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02208             }
02209         } else {
02210             
02211             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02212                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02213                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02214                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02215                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02216 
02217                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02218                     if(s->chroma_y_shift){
02219                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02220                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02221                     }else{
02222                         dct_offset >>=1;
02223                         dct_linesize >>=1;
02224                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02225                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02226                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02227                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02228                     }
02229                 }
02230             }else{
02231                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02232                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02233                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02234                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02235 
02236                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02237                     if(s->chroma_y_shift){
02238                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02239                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02240                     }else{
02241 
02242                         dct_linesize = uvlinesize << s->interlaced_dct;
02243                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
02244 
02245                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02246                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02247                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02248                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02249                         if(!s->chroma_x_shift){
02250                             s->dsp.idct_put(dest_cb + block_size,              dct_linesize, block[8]);
02251                             s->dsp.idct_put(dest_cr + block_size,              dct_linesize, block[9]);
02252                             s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02253                             s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02254                         }
02255                     }
02256                 }
02257             }
02258         }
02259 skip_idct:
02260         if(!readable){
02261             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02262             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02263             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02264         }
02265     }
02266 }
02267 
02268 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02269 #if !CONFIG_SMALL
02270     if(s->out_format == FMT_MPEG1) {
02271         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02272         else                 MPV_decode_mb_internal(s, block, 0, 1);
02273     } else
02274 #endif
02275     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02276     else                  MPV_decode_mb_internal(s, block, 0, 0);
02277 }
02278 
02283 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02284     const int field_pic= s->picture_structure != PICT_FRAME;
02285     if(field_pic){
02286         h <<= 1;
02287         y <<= 1;
02288     }
02289 
02290     if (!s->avctx->hwaccel
02291        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02292        && s->unrestricted_mv
02293        && s->current_picture.reference
02294        && !s->intra_only
02295        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02296         int sides = 0, edge_h;
02297         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02298         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02299         if (y==0) sides |= EDGE_TOP;
02300         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02301 
02302         edge_h= FFMIN(h, s->v_edge_pos - y);
02303 
02304         s->dsp.draw_edges(s->current_picture_ptr->data[0] +  y         *s->linesize,
02305                           s->linesize,           s->h_edge_pos,         edge_h,
02306                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
02307         s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize,
02308                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02309                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02310         s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize,
02311                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02312                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02313     }
02314 
02315     h= FFMIN(h, s->avctx->height - y);
02316 
02317     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02318 
02319     if (s->avctx->draw_horiz_band) {
02320         AVFrame *src;
02321         int offset[4];
02322 
02323         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02324             src= (AVFrame*)s->current_picture_ptr;
02325         else if(s->last_picture_ptr)
02326             src= (AVFrame*)s->last_picture_ptr;
02327         else
02328             return;
02329 
02330         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02331             offset[0]=
02332             offset[1]=
02333             offset[2]=
02334             offset[3]= 0;
02335         }else{
02336             offset[0]= y * s->linesize;
02337             offset[1]=
02338             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02339             offset[3]= 0;
02340         }
02341 
02342         emms_c();
02343 
02344         s->avctx->draw_horiz_band(s->avctx, src, offset,
02345                                   y, s->picture_structure, h);
02346     }
02347 }
02348 
02349 void ff_init_block_index(MpegEncContext *s){ 
02350     const int linesize= s->current_picture.linesize[0]; 
02351     const int uvlinesize= s->current_picture.linesize[1];
02352     const int mb_size= 4 - s->avctx->lowres;
02353 
02354     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02355     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02356     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02357     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02358     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02359     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02360     
02361 
02362     s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02363     s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02364     s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02365 
02366     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02367     {
02368         if(s->picture_structure==PICT_FRAME){
02369         s->dest[0] += s->mb_y *   linesize << mb_size;
02370         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02371         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02372         }else{
02373             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02374             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02375             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02376             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02377         }
02378     }
02379 }
02380 
02381 void ff_mpeg_flush(AVCodecContext *avctx){
02382     int i;
02383     MpegEncContext *s = avctx->priv_data;
02384 
02385     if(s==NULL || s->picture==NULL)
02386         return;
02387 
02388     for(i=0; i<s->picture_count; i++){
02389        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02390                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
02391         free_frame_buffer(s, &s->picture[i]);
02392     }
02393     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02394 
02395     s->mb_x= s->mb_y= 0;
02396     s->closed_gop= 0;
02397 
02398     s->parse_context.state= -1;
02399     s->parse_context.frame_start_found= 0;
02400     s->parse_context.overread= 0;
02401     s->parse_context.overread_index= 0;
02402     s->parse_context.index= 0;
02403     s->parse_context.last_index= 0;
02404     s->bitstream_buffer_size=0;
02405     s->pp_time=0;
02406 }
02407 
02408 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02409                                    DCTELEM *block, int n, int qscale)
02410 {
02411     int i, level, nCoeffs;
02412     const uint16_t *quant_matrix;
02413 
02414     nCoeffs= s->block_last_index[n];
02415 
02416     if (n < 4)
02417         block[0] = block[0] * s->y_dc_scale;
02418     else
02419         block[0] = block[0] * s->c_dc_scale;
02420     
02421     quant_matrix = s->intra_matrix;
02422     for(i=1;i<=nCoeffs;i++) {
02423         int j= s->intra_scantable.permutated[i];
02424         level = block[j];
02425         if (level) {
02426             if (level < 0) {
02427                 level = -level;
02428                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02429                 level = (level - 1) | 1;
02430                 level = -level;
02431             } else {
02432                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02433                 level = (level - 1) | 1;
02434             }
02435             block[j] = level;
02436         }
02437     }
02438 }
02439 
02440 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02441                                    DCTELEM *block, int n, int qscale)
02442 {
02443     int i, level, nCoeffs;
02444     const uint16_t *quant_matrix;
02445 
02446     nCoeffs= s->block_last_index[n];
02447 
02448     quant_matrix = s->inter_matrix;
02449     for(i=0; i<=nCoeffs; i++) {
02450         int j= s->intra_scantable.permutated[i];
02451         level = block[j];
02452         if (level) {
02453             if (level < 0) {
02454                 level = -level;
02455                 level = (((level << 1) + 1) * qscale *
02456                          ((int) (quant_matrix[j]))) >> 4;
02457                 level = (level - 1) | 1;
02458                 level = -level;
02459             } else {
02460                 level = (((level << 1) + 1) * qscale *
02461                          ((int) (quant_matrix[j]))) >> 4;
02462                 level = (level - 1) | 1;
02463             }
02464             block[j] = level;
02465         }
02466     }
02467 }
02468 
02469 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02470                                    DCTELEM *block, int n, int qscale)
02471 {
02472     int i, level, nCoeffs;
02473     const uint16_t *quant_matrix;
02474 
02475     if(s->alternate_scan) nCoeffs= 63;
02476     else nCoeffs= s->block_last_index[n];
02477 
02478     if (n < 4)
02479         block[0] = block[0] * s->y_dc_scale;
02480     else
02481         block[0] = block[0] * s->c_dc_scale;
02482     quant_matrix = s->intra_matrix;
02483     for(i=1;i<=nCoeffs;i++) {
02484         int j= s->intra_scantable.permutated[i];
02485         level = block[j];
02486         if (level) {
02487             if (level < 0) {
02488                 level = -level;
02489                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02490                 level = -level;
02491             } else {
02492                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02493             }
02494             block[j] = level;
02495         }
02496     }
02497 }
02498 
02499 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02500                                    DCTELEM *block, int n, int qscale)
02501 {
02502     int i, level, nCoeffs;
02503     const uint16_t *quant_matrix;
02504     int sum=-1;
02505 
02506     if(s->alternate_scan) nCoeffs= 63;
02507     else nCoeffs= s->block_last_index[n];
02508 
02509     if (n < 4)
02510         block[0] = block[0] * s->y_dc_scale;
02511     else
02512         block[0] = block[0] * s->c_dc_scale;
02513     quant_matrix = s->intra_matrix;
02514     for(i=1;i<=nCoeffs;i++) {
02515         int j= s->intra_scantable.permutated[i];
02516         level = block[j];
02517         if (level) {
02518             if (level < 0) {
02519                 level = -level;
02520                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02521                 level = -level;
02522             } else {
02523                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02524             }
02525             block[j] = level;
02526             sum+=level;
02527         }
02528     }
02529     block[63]^=sum&1;
02530 }
02531 
02532 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02533                                    DCTELEM *block, int n, int qscale)
02534 {
02535     int i, level, nCoeffs;
02536     const uint16_t *quant_matrix;
02537     int sum=-1;
02538 
02539     if(s->alternate_scan) nCoeffs= 63;
02540     else nCoeffs= s->block_last_index[n];
02541 
02542     quant_matrix = s->inter_matrix;
02543     for(i=0; i<=nCoeffs; i++) {
02544         int j= s->intra_scantable.permutated[i];
02545         level = block[j];
02546         if (level) {
02547             if (level < 0) {
02548                 level = -level;
02549                 level = (((level << 1) + 1) * qscale *
02550                          ((int) (quant_matrix[j]))) >> 4;
02551                 level = -level;
02552             } else {
02553                 level = (((level << 1) + 1) * qscale *
02554                          ((int) (quant_matrix[j]))) >> 4;
02555             }
02556             block[j] = level;
02557             sum+=level;
02558         }
02559     }
02560     block[63]^=sum&1;
02561 }
02562 
02563 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02564                                   DCTELEM *block, int n, int qscale)
02565 {
02566     int i, level, qmul, qadd;
02567     int nCoeffs;
02568 
02569     assert(s->block_last_index[n]>=0);
02570 
02571     qmul = qscale << 1;
02572 
02573     if (!s->h263_aic) {
02574         if (n < 4)
02575             block[0] = block[0] * s->y_dc_scale;
02576         else
02577             block[0] = block[0] * s->c_dc_scale;
02578         qadd = (qscale - 1) | 1;
02579     }else{
02580         qadd = 0;
02581     }
02582     if(s->ac_pred)
02583         nCoeffs=63;
02584     else
02585         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02586 
02587     for(i=1; i<=nCoeffs; i++) {
02588         level = block[i];
02589         if (level) {
02590             if (level < 0) {
02591                 level = level * qmul - qadd;
02592             } else {
02593                 level = level * qmul + qadd;
02594             }
02595             block[i] = level;
02596         }
02597     }
02598 }
02599 
02600 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02601                                   DCTELEM *block, int n, int qscale)
02602 {
02603     int i, level, qmul, qadd;
02604     int nCoeffs;
02605 
02606     assert(s->block_last_index[n]>=0);
02607 
02608     qadd = (qscale - 1) | 1;
02609     qmul = qscale << 1;
02610 
02611     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02612 
02613     for(i=0; i<=nCoeffs; i++) {
02614         level = block[i];
02615         if (level) {
02616             if (level < 0) {
02617                 level = level * qmul - qadd;
02618             } else {
02619                 level = level * qmul + qadd;
02620             }
02621             block[i] = level;
02622         }
02623     }
02624 }
02625 
02629 void ff_set_qscale(MpegEncContext * s, int qscale)
02630 {
02631     if (qscale < 1)
02632         qscale = 1;
02633     else if (qscale > 31)
02634         qscale = 31;
02635 
02636     s->qscale = qscale;
02637     s->chroma_qscale= s->chroma_qscale_table[qscale];
02638 
02639     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02640     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02641 }
02642 
02643 void MPV_report_decode_progress(MpegEncContext *s)
02644 {
02645     if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
02646         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02647 }