00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mjpegenc.h"
00037 #include "msmpeg4.h"
00038 #include "xvmc_internal.h"
00039 #include "thread.h"
00040 #include <limits.h>
00041
00042
00043
00044
00045 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00046 DCTELEM *block, int n, int qscale);
00047 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00058 DCTELEM *block, int n, int qscale);
00059
00060
00061
00062
00063
00064
00065
00066
00067 static const uint8_t ff_default_chroma_qscale_table[32] = {
00068
00069 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
00070 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00071 };
00072
00073 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00074
00075 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00076 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083 };
00084
00085 static const uint8_t mpeg2_dc_scale_table1[128] = {
00086
00087 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00088 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00089 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095 };
00096
00097 static const uint8_t mpeg2_dc_scale_table2[128] = {
00098
00099 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107 };
00108
00109 static const uint8_t mpeg2_dc_scale_table3[128] = {
00110
00111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119 };
00120
00121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00122 ff_mpeg1_dc_scale_table,
00123 mpeg2_dc_scale_table1,
00124 mpeg2_dc_scale_table2,
00125 mpeg2_dc_scale_table3,
00126 };
00127
00128 const enum PixelFormat ff_pixfmt_list_420[] = {
00129 PIX_FMT_YUV420P,
00130 PIX_FMT_NONE
00131 };
00132
00133 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00134 PIX_FMT_DXVA2_VLD,
00135 PIX_FMT_VAAPI_VLD,
00136 PIX_FMT_VDA_VLD,
00137 PIX_FMT_YUV420P,
00138 PIX_FMT_NONE
00139 };
00140
00141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *av_restrict p,
00142 const uint8_t *end,
00143 uint32_t *av_restrict state)
00144 {
00145 int i;
00146
00147 assert(p <= end);
00148 if (p >= end)
00149 return end;
00150
00151 for (i = 0; i < 3; i++) {
00152 uint32_t tmp = *state << 8;
00153 *state = tmp + *(p++);
00154 if (tmp == 0x100 || p == end)
00155 return p;
00156 }
00157
00158 while (p < end) {
00159 if (p[-1] > 1 ) p += 3;
00160 else if (p[-2] ) p += 2;
00161 else if (p[-3]|(p[-1]-1)) p++;
00162 else {
00163 p++;
00164 break;
00165 }
00166 }
00167
00168 p = FFMIN(p, end) - 4;
00169 *state = AV_RB32(p);
00170
00171 return p + 4;
00172 }
00173
00174
00175 av_cold int ff_dct_common_init(MpegEncContext *s)
00176 {
00177 ff_dsputil_init(&s->dsp, s->avctx);
00178
00179 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00180 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00181 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00182 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00183 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00184 if (s->flags & CODEC_FLAG_BITEXACT)
00185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00186 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00187
00188 #if ARCH_X86
00189 ff_MPV_common_init_x86(s);
00190 #elif ARCH_ALPHA
00191 ff_MPV_common_init_axp(s);
00192 #elif HAVE_MMI
00193 ff_MPV_common_init_mmi(s);
00194 #elif ARCH_ARM
00195 ff_MPV_common_init_arm(s);
00196 #elif HAVE_ALTIVEC
00197 ff_MPV_common_init_altivec(s);
00198 #elif ARCH_BFIN
00199 ff_MPV_common_init_bfin(s);
00200 #endif
00201
00202
00203
00204
00205 if (s->alternate_scan) {
00206 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00208 } else {
00209 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00211 }
00212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00213 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00214
00215 return 0;
00216 }
00217
00218 void ff_copy_picture(Picture *dst, Picture *src)
00219 {
00220 *dst = *src;
00221 dst->f.type = FF_BUFFER_TYPE_COPY;
00222 }
00223
00227 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00228 {
00229
00230
00231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
00232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
00233 s->codec_id != AV_CODEC_ID_MSS2)
00234 ff_thread_release_buffer(s->avctx, &pic->f);
00235 else
00236 avcodec_default_release_buffer(s->avctx, &pic->f);
00237 av_freep(&pic->f.hwaccel_picture_private);
00238 }
00239
00243 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00244 {
00245 int r;
00246
00247 if (s->avctx->hwaccel) {
00248 assert(!pic->f.hwaccel_picture_private);
00249 if (s->avctx->hwaccel->priv_data_size) {
00250 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00251 if (!pic->f.hwaccel_picture_private) {
00252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00253 return -1;
00254 }
00255 }
00256 }
00257
00258 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
00259 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
00260 s->codec_id != AV_CODEC_ID_MSS2)
00261 r = ff_thread_get_buffer(s->avctx, &pic->f);
00262 else
00263 r = avcodec_default_get_buffer(s->avctx, &pic->f);
00264
00265 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00266 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00267 r, pic->f.type, pic->f.data[0]);
00268 av_freep(&pic->f.hwaccel_picture_private);
00269 return -1;
00270 }
00271
00272 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
00273 s->uvlinesize != pic->f.linesize[1])) {
00274 av_log(s->avctx, AV_LOG_ERROR,
00275 "get_buffer() failed (stride changed)\n");
00276 free_frame_buffer(s, pic);
00277 return -1;
00278 }
00279
00280 if (pic->f.linesize[1] != pic->f.linesize[2]) {
00281 av_log(s->avctx, AV_LOG_ERROR,
00282 "get_buffer() failed (uv stride mismatch)\n");
00283 free_frame_buffer(s, pic);
00284 return -1;
00285 }
00286
00287 return 0;
00288 }
00289
00294 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00295 {
00296 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00297
00298
00299
00300 const int mb_array_size = s->mb_stride * s->mb_height;
00301 const int b8_array_size = s->b8_stride * s->mb_height * 2;
00302 const int b4_array_size = s->b4_stride * s->mb_height * 4;
00303 int i;
00304 int r = -1;
00305
00306 if (shared) {
00307 assert(pic->f.data[0]);
00308 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00309 pic->f.type = FF_BUFFER_TYPE_SHARED;
00310 } else {
00311 assert(!pic->f.data[0]);
00312
00313 if (alloc_frame_buffer(s, pic) < 0)
00314 return -1;
00315
00316 s->linesize = pic->f.linesize[0];
00317 s->uvlinesize = pic->f.linesize[1];
00318 }
00319
00320 if (pic->f.qscale_table == NULL) {
00321 if (s->encoding) {
00322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00323 mb_array_size * sizeof(int16_t), fail)
00324 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00325 mb_array_size * sizeof(int16_t), fail)
00326 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00327 mb_array_size * sizeof(int8_t ), fail)
00328 }
00329
00330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00331 mb_array_size * sizeof(uint8_t) + 2, fail)
00332 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00333 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00334 fail)
00335 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00336 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00337 fail)
00338 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00339 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00340 if (s->out_format == FMT_H264) {
00341 for (i = 0; i < 2; i++) {
00342 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00343 2 * (b4_array_size + 4) * sizeof(int16_t),
00344 fail)
00345 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00346 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00347 4 * mb_array_size * sizeof(uint8_t), fail)
00348 }
00349 pic->f.motion_subsample_log2 = 2;
00350 } else if (s->out_format == FMT_H263 || s->encoding ||
00351 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00352 for (i = 0; i < 2; i++) {
00353 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00354 2 * (b8_array_size + 4) * sizeof(int16_t),
00355 fail)
00356 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00357 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00358 4 * mb_array_size * sizeof(uint8_t), fail)
00359 }
00360 pic->f.motion_subsample_log2 = 3;
00361 }
00362 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00364 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00365 }
00366 pic->f.qstride = s->mb_stride;
00367 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00368 1 * sizeof(AVPanScan), fail)
00369 }
00370
00371 pic->owner2 = s;
00372
00373 return 0;
00374 fail:
00375 if (r >= 0)
00376 free_frame_buffer(s, pic);
00377 return -1;
00378 }
00379
00383 static void free_picture(MpegEncContext *s, Picture *pic)
00384 {
00385 int i;
00386
00387 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00388 free_frame_buffer(s, pic);
00389 }
00390
00391 av_freep(&pic->mb_var);
00392 av_freep(&pic->mc_mb_var);
00393 av_freep(&pic->mb_mean);
00394 av_freep(&pic->f.mbskip_table);
00395 av_freep(&pic->qscale_table_base);
00396 pic->f.qscale_table = NULL;
00397 av_freep(&pic->mb_type_base);
00398 pic->f.mb_type = NULL;
00399 av_freep(&pic->f.dct_coeff);
00400 av_freep(&pic->f.pan_scan);
00401 pic->f.mb_type = NULL;
00402 for (i = 0; i < 2; i++) {
00403 av_freep(&pic->motion_val_base[i]);
00404 av_freep(&pic->f.ref_index[i]);
00405 pic->f.motion_val[i] = NULL;
00406 }
00407
00408 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00409 for (i = 0; i < 4; i++) {
00410 pic->f.base[i] =
00411 pic->f.data[i] = NULL;
00412 }
00413 pic->f.type = 0;
00414 }
00415 }
00416
00417 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00418 {
00419 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00420 int c_size = s->mb_stride * (s->mb_height + 1);
00421 int yc_size = y_size + 2 * c_size;
00422 int i;
00423
00424
00425
00426 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00427 (s->width + 95) * 2 * 21 * 4, fail);
00428
00429
00430
00431 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00432 (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00433 s->me.temp = s->me.scratchpad;
00434 s->rd_scratchpad = s->me.scratchpad;
00435 s->b_scratchpad = s->me.scratchpad;
00436 s->obmc_scratchpad = s->me.scratchpad + 16;
00437 if (s->encoding) {
00438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00439 ME_MAP_SIZE * sizeof(uint32_t), fail)
00440 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00441 ME_MAP_SIZE * sizeof(uint32_t), fail)
00442 if (s->avctx->noise_reduction) {
00443 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00444 2 * 64 * sizeof(int), fail)
00445 }
00446 }
00447 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00448 s->block = s->blocks[0];
00449
00450 for (i = 0; i < 12; i++) {
00451 s->pblocks[i] = &s->block[i];
00452 }
00453
00454 if (s->out_format == FMT_H263) {
00455
00456 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00457 yc_size * sizeof(int16_t) * 16, fail);
00458 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00459 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00460 s->ac_val[2] = s->ac_val[1] + c_size;
00461 }
00462
00463 return 0;
00464 fail:
00465 return -1;
00466 }
00467
00468 static void free_duplicate_context(MpegEncContext *s)
00469 {
00470 if (s == NULL)
00471 return;
00472
00473 av_freep(&s->edge_emu_buffer);
00474 av_freep(&s->me.scratchpad);
00475 s->me.temp =
00476 s->rd_scratchpad =
00477 s->b_scratchpad =
00478 s->obmc_scratchpad = NULL;
00479
00480 av_freep(&s->dct_error_sum);
00481 av_freep(&s->me.map);
00482 av_freep(&s->me.score_map);
00483 av_freep(&s->blocks);
00484 av_freep(&s->ac_val_base);
00485 s->block = NULL;
00486 }
00487
00488 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00489 {
00490 #define COPY(a) bak->a = src->a
00491 COPY(edge_emu_buffer);
00492 COPY(me.scratchpad);
00493 COPY(me.temp);
00494 COPY(rd_scratchpad);
00495 COPY(b_scratchpad);
00496 COPY(obmc_scratchpad);
00497 COPY(me.map);
00498 COPY(me.score_map);
00499 COPY(blocks);
00500 COPY(block);
00501 COPY(start_mb_y);
00502 COPY(end_mb_y);
00503 COPY(me.map_generation);
00504 COPY(pb);
00505 COPY(dct_error_sum);
00506 COPY(dct_count[0]);
00507 COPY(dct_count[1]);
00508 COPY(ac_val_base);
00509 COPY(ac_val[0]);
00510 COPY(ac_val[1]);
00511 COPY(ac_val[2]);
00512 #undef COPY
00513 }
00514
00515 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00516 {
00517 MpegEncContext bak;
00518 int i;
00519
00520
00521 backup_duplicate_context(&bak, dst);
00522 memcpy(dst, src, sizeof(MpegEncContext));
00523 backup_duplicate_context(dst, &bak);
00524 for (i = 0; i < 12; i++) {
00525 dst->pblocks[i] = &dst->block[i];
00526 }
00527
00528
00529 }
00530
00531 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00532 const AVCodecContext *src)
00533 {
00534 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00535
00536 if (dst == src)
00537 return 0;
00538
00539
00540
00541 if (!s->context_initialized) {
00542 memcpy(s, s1, sizeof(MpegEncContext));
00543
00544 s->avctx = dst;
00545 s->bitstream_buffer = NULL;
00546 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00547
00548 if (s1->context_initialized){
00549 s->picture_range_start += MAX_PICTURE_COUNT;
00550 s->picture_range_end += MAX_PICTURE_COUNT;
00551 ff_MPV_common_init(s);
00552 }
00553 }
00554
00555 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
00556 int err;
00557 s->context_reinit = 0;
00558 s->height = s1->height;
00559 s->width = s1->width;
00560 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
00561 return err;
00562 }
00563
00564 s->avctx->coded_height = s1->avctx->coded_height;
00565 s->avctx->coded_width = s1->avctx->coded_width;
00566 s->avctx->width = s1->avctx->width;
00567 s->avctx->height = s1->avctx->height;
00568
00569 s->coded_picture_number = s1->coded_picture_number;
00570 s->picture_number = s1->picture_number;
00571 s->input_picture_number = s1->input_picture_number;
00572
00573 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00574 memcpy(&s->last_picture, &s1->last_picture,
00575 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00576
00577 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
00578 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00579 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
00580
00581
00582 s->next_p_frame_damaged = s1->next_p_frame_damaged;
00583 s->workaround_bugs = s1->workaround_bugs;
00584 s->padding_bug_score = s1->padding_bug_score;
00585
00586
00587 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00588 (char *) &s1->shape - (char *) &s1->time_increment_bits);
00589
00590
00591 s->max_b_frames = s1->max_b_frames;
00592 s->low_delay = s1->low_delay;
00593 s->dropable = s1->dropable;
00594
00595
00596 s->divx_packed = s1->divx_packed;
00597
00598 if (s1->bitstream_buffer) {
00599 if (s1->bitstream_buffer_size +
00600 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00601 av_fast_malloc(&s->bitstream_buffer,
00602 &s->allocated_bitstream_buffer_size,
00603 s1->allocated_bitstream_buffer_size);
00604 s->bitstream_buffer_size = s1->bitstream_buffer_size;
00605 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00606 s1->bitstream_buffer_size);
00607 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00608 FF_INPUT_BUFFER_PADDING_SIZE);
00609 }
00610
00611
00612 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00613 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00614
00615 if (!s1->first_field) {
00616 s->last_pict_type = s1->pict_type;
00617 if (s1->current_picture_ptr)
00618 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00619
00620 if (s1->pict_type != AV_PICTURE_TYPE_B) {
00621 s->last_non_b_pict_type = s1->pict_type;
00622 }
00623 }
00624
00625 return 0;
00626 }
00627
00634 void ff_MPV_common_defaults(MpegEncContext *s)
00635 {
00636 s->y_dc_scale_table =
00637 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
00638 s->chroma_qscale_table = ff_default_chroma_qscale_table;
00639 s->progressive_frame = 1;
00640 s->progressive_sequence = 1;
00641 s->picture_structure = PICT_FRAME;
00642
00643 s->coded_picture_number = 0;
00644 s->picture_number = 0;
00645 s->input_picture_number = 0;
00646
00647 s->picture_in_gop_number = 0;
00648
00649 s->f_code = 1;
00650 s->b_code = 1;
00651
00652 s->picture_range_start = 0;
00653 s->picture_range_end = MAX_PICTURE_COUNT;
00654
00655 s->slice_context_count = 1;
00656 }
00657
00663 void ff_MPV_decode_defaults(MpegEncContext *s)
00664 {
00665 ff_MPV_common_defaults(s);
00666 }
00667
00671 static int init_context_frame(MpegEncContext *s)
00672 {
00673 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00674
00675 s->mb_width = (s->width + 15) / 16;
00676 s->mb_stride = s->mb_width + 1;
00677 s->b8_stride = s->mb_width * 2 + 1;
00678 s->b4_stride = s->mb_width * 4 + 1;
00679 mb_array_size = s->mb_height * s->mb_stride;
00680 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00681
00682
00683
00684 s->h_edge_pos = s->mb_width * 16;
00685 s->v_edge_pos = s->mb_height * 16;
00686
00687 s->mb_num = s->mb_width * s->mb_height;
00688
00689 s->block_wrap[0] =
00690 s->block_wrap[1] =
00691 s->block_wrap[2] =
00692 s->block_wrap[3] = s->b8_stride;
00693 s->block_wrap[4] =
00694 s->block_wrap[5] = s->mb_stride;
00695
00696 y_size = s->b8_stride * (2 * s->mb_height + 1);
00697 c_size = s->mb_stride * (s->mb_height + 1);
00698 yc_size = y_size + 2 * c_size;
00699
00700 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail);
00701 for (y = 0; y < s->mb_height; y++)
00702 for (x = 0; x < s->mb_width; x++)
00703 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00704
00705 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width;
00706
00707 if (s->encoding) {
00708
00709 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00710 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00711 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00712 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00713 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00714 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00715 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00716 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00717 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00718 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00719 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00720 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00721
00722
00723 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail)
00724
00725 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00726
00727 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
00728 mb_array_size * sizeof(float), fail);
00729 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
00730 mb_array_size * sizeof(float), fail);
00731
00732 }
00733
00734 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
00735 mb_array_size * sizeof(uint8_t), fail);
00736 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
00737 mb_array_size * sizeof(uint8_t), fail);
00738
00739 if (s->codec_id == AV_CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)) {
00740
00741 for (i = 0; i < 2; i++) {
00742 int j, k;
00743 for (j = 0; j < 2; j++) {
00744 for (k = 0; k < 2; k++) {
00745 FF_ALLOCZ_OR_GOTO(s->avctx,
00746 s->b_field_mv_table_base[i][j][k],
00747 mv_table_size * 2 * sizeof(int16_t),
00748 fail);
00749 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
00750 s->mb_stride + 1;
00751 }
00752 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00753 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00754 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00755 }
00756 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00757 }
00758 }
00759 if (s->out_format == FMT_H263) {
00760
00761 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00762 s->coded_block = s->coded_block_base + s->b8_stride + 1;
00763
00764
00765 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
00766 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
00767 }
00768
00769 if (s->h263_pred || s->h263_plus || !s->encoding) {
00770
00771
00772 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00773 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00774 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00775 s->dc_val[2] = s->dc_val[1] + c_size;
00776 for (i = 0; i < yc_size; i++)
00777 s->dc_val_base[i] = 1024;
00778 }
00779
00780
00781 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00782 memset(s->mbintra_table, 1, mb_array_size);
00783
00784
00785 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00786
00787
00788 return 0;
00789 fail:
00790 return AVERROR(ENOMEM);
00791 }
00792
00797 av_cold int ff_MPV_common_init(MpegEncContext *s)
00798 {
00799 int i, err;
00800 int nb_slices = (HAVE_THREADS &&
00801 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00802 s->avctx->thread_count : 1;
00803
00804 if (s->encoding && s->avctx->slices)
00805 nb_slices = s->avctx->slices;
00806
00807 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00808 s->mb_height = (s->height + 31) / 32 * 2;
00809 else if (s->codec_id != AV_CODEC_ID_H264)
00810 s->mb_height = (s->height + 15) / 16;
00811
00812 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00813 av_log(s->avctx, AV_LOG_ERROR,
00814 "decoding to PIX_FMT_NONE is not supported.\n");
00815 return -1;
00816 }
00817
00818 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00819 int max_slices;
00820 if (s->mb_height)
00821 max_slices = FFMIN(MAX_THREADS, s->mb_height);
00822 else
00823 max_slices = MAX_THREADS;
00824 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00825 " reducing to %d\n", nb_slices, max_slices);
00826 nb_slices = max_slices;
00827 }
00828
00829 if ((s->width || s->height) &&
00830 av_image_check_size(s->width, s->height, 0, s->avctx))
00831 return -1;
00832
00833 ff_dct_common_init(s);
00834
00835 s->flags = s->avctx->flags;
00836 s->flags2 = s->avctx->flags2;
00837
00838
00839 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00840 &s->chroma_y_shift);
00841
00842
00843 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
00844 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
00845
00846 s->avctx->coded_frame = &s->current_picture.f;
00847
00848 if (s->encoding) {
00849 if (s->msmpeg4_version) {
00850 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
00851 2 * 2 * (MAX_LEVEL + 1) *
00852 (MAX_RUN + 1) * 2 * sizeof(int), fail);
00853 }
00854 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00855
00856 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
00857 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
00858 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
00859 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
00860 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
00861 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
00862 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
00863 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
00864
00865 if (s->avctx->noise_reduction) {
00866 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
00867 }
00868 }
00869
00870 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00871 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00872 s->picture_count * sizeof(Picture), fail);
00873 for (i = 0; i < s->picture_count; i++) {
00874 avcodec_get_frame_defaults(&s->picture[i].f);
00875 }
00876
00877 if ((err = init_context_frame(s)))
00878 goto fail;
00879
00880 s->parse_context.state = -1;
00881
00882 s->context_initialized = 1;
00883 s->thread_context[0] = s;
00884
00885
00886 if (nb_slices > 1) {
00887 for (i = 1; i < nb_slices; i++) {
00888 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00889 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00890 }
00891
00892 for (i = 0; i < nb_slices; i++) {
00893 if (init_duplicate_context(s->thread_context[i], s) < 0)
00894 goto fail;
00895 s->thread_context[i]->start_mb_y =
00896 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00897 s->thread_context[i]->end_mb_y =
00898 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00899 }
00900 } else {
00901 if (init_duplicate_context(s, s) < 0)
00902 goto fail;
00903 s->start_mb_y = 0;
00904 s->end_mb_y = s->mb_height;
00905 }
00906 s->slice_context_count = nb_slices;
00907
00908
00909 return 0;
00910 fail:
00911 ff_MPV_common_end(s);
00912 return -1;
00913 }
00914
00920 static int free_context_frame(MpegEncContext *s)
00921 {
00922 int i, j, k;
00923
00924 av_freep(&s->mb_type);
00925 av_freep(&s->p_mv_table_base);
00926 av_freep(&s->b_forw_mv_table_base);
00927 av_freep(&s->b_back_mv_table_base);
00928 av_freep(&s->b_bidir_forw_mv_table_base);
00929 av_freep(&s->b_bidir_back_mv_table_base);
00930 av_freep(&s->b_direct_mv_table_base);
00931 s->p_mv_table = NULL;
00932 s->b_forw_mv_table = NULL;
00933 s->b_back_mv_table = NULL;
00934 s->b_bidir_forw_mv_table = NULL;
00935 s->b_bidir_back_mv_table = NULL;
00936 s->b_direct_mv_table = NULL;
00937 for (i = 0; i < 2; i++) {
00938 for (j = 0; j < 2; j++) {
00939 for (k = 0; k < 2; k++) {
00940 av_freep(&s->b_field_mv_table_base[i][j][k]);
00941 s->b_field_mv_table[i][j][k] = NULL;
00942 }
00943 av_freep(&s->b_field_select_table[i][j]);
00944 av_freep(&s->p_field_mv_table_base[i][j]);
00945 s->p_field_mv_table[i][j] = NULL;
00946 }
00947 av_freep(&s->p_field_select_table[i]);
00948 }
00949
00950 av_freep(&s->dc_val_base);
00951 av_freep(&s->coded_block_base);
00952 av_freep(&s->mbintra_table);
00953 av_freep(&s->cbp_table);
00954 av_freep(&s->pred_dir_table);
00955
00956 av_freep(&s->mbskip_table);
00957
00958 av_freep(&s->error_status_table);
00959 av_freep(&s->er_temp_buffer);
00960 av_freep(&s->mb_index2xy);
00961 av_freep(&s->lambda_table);
00962
00963 av_freep(&s->cplx_tab);
00964 av_freep(&s->bits_tab);
00965
00966 s->linesize = s->uvlinesize = 0;
00967
00968 for (i = 0; i < 3; i++)
00969 av_freep(&s->visualization_buffer[i]);
00970
00971 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
00972 avcodec_default_free_buffers(s->avctx);
00973
00974 return 0;
00975 }
00976
00977 int ff_MPV_common_frame_size_change(MpegEncContext *s)
00978 {
00979 int i, err = 0;
00980
00981 if (s->slice_context_count > 1) {
00982 for (i = 0; i < s->slice_context_count; i++) {
00983 free_duplicate_context(s->thread_context[i]);
00984 }
00985 for (i = 1; i < s->slice_context_count; i++) {
00986 av_freep(&s->thread_context[i]);
00987 }
00988 } else
00989 free_duplicate_context(s);
00990
00991 free_context_frame(s);
00992
00993 if (s->picture)
00994 for (i = 0; i < s->picture_count; i++) {
00995 s->picture[i].needs_realloc = 1;
00996 }
00997
00998 s->last_picture_ptr =
00999 s->next_picture_ptr =
01000 s->current_picture_ptr = NULL;
01001
01002
01003 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
01004 s->mb_height = (s->height + 31) / 32 * 2;
01005 else if (s->codec_id != AV_CODEC_ID_H264)
01006 s->mb_height = (s->height + 15) / 16;
01007
01008 if ((s->width || s->height) &&
01009 av_image_check_size(s->width, s->height, 0, s->avctx))
01010 return AVERROR_INVALIDDATA;
01011
01012 if ((err = init_context_frame(s)))
01013 goto fail;
01014
01015 s->thread_context[0] = s;
01016
01017 if (s->width && s->height) {
01018 int nb_slices = s->slice_context_count;
01019 if (nb_slices > 1) {
01020 for (i = 1; i < nb_slices; i++) {
01021 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
01022 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
01023 }
01024
01025 for (i = 0; i < nb_slices; i++) {
01026 if (init_duplicate_context(s->thread_context[i], s) < 0)
01027 goto fail;
01028 s->thread_context[i]->start_mb_y =
01029 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
01030 s->thread_context[i]->end_mb_y =
01031 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
01032 }
01033 } else {
01034 if (init_duplicate_context(s, s) < 0)
01035 goto fail;
01036 s->start_mb_y = 0;
01037 s->end_mb_y = s->mb_height;
01038 }
01039 s->slice_context_count = nb_slices;
01040 }
01041
01042 return 0;
01043 fail:
01044 ff_MPV_common_end(s);
01045 return err;
01046 }
01047
01048
01049 void ff_MPV_common_end(MpegEncContext *s)
01050 {
01051 int i;
01052
01053 if (s->slice_context_count > 1) {
01054 for (i = 0; i < s->slice_context_count; i++) {
01055 free_duplicate_context(s->thread_context[i]);
01056 }
01057 for (i = 1; i < s->slice_context_count; i++) {
01058 av_freep(&s->thread_context[i]);
01059 }
01060 s->slice_context_count = 1;
01061 } else free_duplicate_context(s);
01062
01063 av_freep(&s->parse_context.buffer);
01064 s->parse_context.buffer_size = 0;
01065
01066 av_freep(&s->bitstream_buffer);
01067 s->allocated_bitstream_buffer_size = 0;
01068
01069 av_freep(&s->avctx->stats_out);
01070 av_freep(&s->ac_stats);
01071
01072 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
01073 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
01074 s->q_chroma_intra_matrix= NULL;
01075 s->q_chroma_intra_matrix16= NULL;
01076 av_freep(&s->q_intra_matrix);
01077 av_freep(&s->q_inter_matrix);
01078 av_freep(&s->q_intra_matrix16);
01079 av_freep(&s->q_inter_matrix16);
01080 av_freep(&s->input_picture);
01081 av_freep(&s->reordered_input_picture);
01082 av_freep(&s->dct_offset);
01083
01084 if (s->picture && !s->avctx->internal->is_copy) {
01085 for (i = 0; i < s->picture_count; i++) {
01086 free_picture(s, &s->picture[i]);
01087 }
01088 }
01089 av_freep(&s->picture);
01090
01091 free_context_frame(s);
01092
01093 s->context_initialized = 0;
01094 s->last_picture_ptr =
01095 s->next_picture_ptr =
01096 s->current_picture_ptr = NULL;
01097 s->linesize = s->uvlinesize = 0;
01098 }
01099
01100 void ff_init_rl(RLTable *rl,
01101 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
01102 {
01103 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
01104 uint8_t index_run[MAX_RUN + 1];
01105 int last, run, level, start, end, i;
01106
01107
01108 if (static_store && rl->max_level[0])
01109 return;
01110
01111
01112 for (last = 0; last < 2; last++) {
01113 if (last == 0) {
01114 start = 0;
01115 end = rl->last;
01116 } else {
01117 start = rl->last;
01118 end = rl->n;
01119 }
01120
01121 memset(max_level, 0, MAX_RUN + 1);
01122 memset(max_run, 0, MAX_LEVEL + 1);
01123 memset(index_run, rl->n, MAX_RUN + 1);
01124 for (i = start; i < end; i++) {
01125 run = rl->table_run[i];
01126 level = rl->table_level[i];
01127 if (index_run[run] == rl->n)
01128 index_run[run] = i;
01129 if (level > max_level[run])
01130 max_level[run] = level;
01131 if (run > max_run[level])
01132 max_run[level] = run;
01133 }
01134 if (static_store)
01135 rl->max_level[last] = static_store[last];
01136 else
01137 rl->max_level[last] = av_malloc(MAX_RUN + 1);
01138 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
01139 if (static_store)
01140 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
01141 else
01142 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
01143 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01144 if (static_store)
01145 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01146 else
01147 rl->index_run[last] = av_malloc(MAX_RUN + 1);
01148 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01149 }
01150 }
01151
01152 void ff_init_vlc_rl(RLTable *rl)
01153 {
01154 int i, q;
01155
01156 for (q = 0; q < 32; q++) {
01157 int qmul = q * 2;
01158 int qadd = (q - 1) | 1;
01159
01160 if (q == 0) {
01161 qmul = 1;
01162 qadd = 0;
01163 }
01164 for (i = 0; i < rl->vlc.table_size; i++) {
01165 int code = rl->vlc.table[i][0];
01166 int len = rl->vlc.table[i][1];
01167 int level, run;
01168
01169 if (len == 0) {
01170 run = 66;
01171 level = MAX_LEVEL;
01172 } else if (len < 0) {
01173 run = 0;
01174 level = code;
01175 } else {
01176 if (code == rl->n) {
01177 run = 66;
01178 level = 0;
01179 } else {
01180 run = rl->table_run[code] + 1;
01181 level = rl->table_level[code] * qmul + qadd;
01182 if (code >= rl->last) run += 192;
01183 }
01184 }
01185 rl->rl_vlc[q][i].len = len;
01186 rl->rl_vlc[q][i].level = level;
01187 rl->rl_vlc[q][i].run = run;
01188 }
01189 }
01190 }
01191
01192 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01193 {
01194 int i;
01195
01196
01197 for (i = 0; i < s->picture_count; i++) {
01198 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01199 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01200 (remove_current || &s->picture[i] != s->current_picture_ptr)
01201 ) {
01202 free_frame_buffer(s, &s->picture[i]);
01203 }
01204 }
01205 }
01206
01207 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
01208 {
01209 if (pic->f.data[0] == NULL)
01210 return 1;
01211 if (pic->needs_realloc)
01212 if (!pic->owner2 || pic->owner2 == s)
01213 return 1;
01214 return 0;
01215 }
01216
01217 static int find_unused_picture(MpegEncContext *s, int shared)
01218 {
01219 int i;
01220
01221 if (shared) {
01222 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01223 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01224 return i;
01225 }
01226 } else {
01227 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01228 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
01229 return i;
01230 }
01231 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01232 if (pic_is_unused(s, &s->picture[i]))
01233 return i;
01234 }
01235 }
01236
01237 av_log(s->avctx, AV_LOG_FATAL,
01238 "Internal error, picture buffer overflow\n");
01239
01240
01241
01242
01243
01244
01245
01246
01247
01248
01249
01250 abort();
01251 return -1;
01252 }
01253
01254 int ff_find_unused_picture(MpegEncContext *s, int shared)
01255 {
01256 int ret = find_unused_picture(s, shared);
01257
01258 if (ret >= 0 && ret < s->picture_range_end) {
01259 if (s->picture[ret].needs_realloc) {
01260 s->picture[ret].needs_realloc = 0;
01261 free_picture(s, &s->picture[ret]);
01262 avcodec_get_frame_defaults(&s->picture[ret].f);
01263 }
01264 }
01265 return ret;
01266 }
01267
01268 static void update_noise_reduction(MpegEncContext *s)
01269 {
01270 int intra, i;
01271
01272 for (intra = 0; intra < 2; intra++) {
01273 if (s->dct_count[intra] > (1 << 16)) {
01274 for (i = 0; i < 64; i++) {
01275 s->dct_error_sum[intra][i] >>= 1;
01276 }
01277 s->dct_count[intra] >>= 1;
01278 }
01279
01280 for (i = 0; i < 64; i++) {
01281 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01282 s->dct_count[intra] +
01283 s->dct_error_sum[intra][i] / 2) /
01284 (s->dct_error_sum[intra][i] + 1);
01285 }
01286 }
01287 }
01288
01293 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01294 {
01295 int i;
01296 Picture *pic;
01297 s->mb_skipped = 0;
01298
01299 if (!ff_thread_can_start_frame(avctx)) {
01300 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
01301 return -1;
01302 }
01303
01304
01305 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
01306 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01307 s->last_picture_ptr != s->next_picture_ptr &&
01308 s->last_picture_ptr->f.data[0]) {
01309 if (s->last_picture_ptr->owner2 == s)
01310 free_frame_buffer(s, s->last_picture_ptr);
01311 }
01312
01313
01314
01315 if (!s->encoding) {
01316 for (i = 0; i < s->picture_count; i++) {
01317 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01318 &s->picture[i] != s->last_picture_ptr &&
01319 &s->picture[i] != s->next_picture_ptr &&
01320 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
01321 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01322 av_log(avctx, AV_LOG_ERROR,
01323 "releasing zombie picture\n");
01324 free_frame_buffer(s, &s->picture[i]);
01325 }
01326 }
01327 }
01328 }
01329
01330 if (!s->encoding) {
01331 ff_release_unused_pictures(s, 1);
01332
01333 if (s->current_picture_ptr &&
01334 s->current_picture_ptr->f.data[0] == NULL) {
01335
01336
01337 pic = s->current_picture_ptr;
01338 } else {
01339 i = ff_find_unused_picture(s, 0);
01340 if (i < 0) {
01341 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
01342 return i;
01343 }
01344 pic = &s->picture[i];
01345 }
01346
01347 pic->f.reference = 0;
01348 if (!s->dropable) {
01349 if (s->codec_id == AV_CODEC_ID_H264)
01350 pic->f.reference = s->picture_structure;
01351 else if (s->pict_type != AV_PICTURE_TYPE_B)
01352 pic->f.reference = 3;
01353 }
01354
01355 pic->f.coded_picture_number = s->coded_picture_number++;
01356
01357 if (ff_alloc_picture(s, pic, 0) < 0)
01358 return -1;
01359
01360 s->current_picture_ptr = pic;
01361
01362 s->current_picture_ptr->f.top_field_first = s->top_field_first;
01363 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
01364 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
01365 if (s->picture_structure != PICT_FRAME)
01366 s->current_picture_ptr->f.top_field_first =
01367 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01368 }
01369 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01370 !s->progressive_sequence;
01371 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
01372 }
01373
01374 s->current_picture_ptr->f.pict_type = s->pict_type;
01375
01376
01377 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01378
01379 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01380
01381 if (s->pict_type != AV_PICTURE_TYPE_B) {
01382 s->last_picture_ptr = s->next_picture_ptr;
01383 if (!s->dropable)
01384 s->next_picture_ptr = s->current_picture_ptr;
01385 }
01386
01387
01388
01389
01390
01391
01392
01393 if (s->codec_id != AV_CODEC_ID_H264) {
01394 if ((s->last_picture_ptr == NULL ||
01395 s->last_picture_ptr->f.data[0] == NULL) &&
01396 (s->pict_type != AV_PICTURE_TYPE_I ||
01397 s->picture_structure != PICT_FRAME)) {
01398 if (s->pict_type != AV_PICTURE_TYPE_I)
01399 av_log(avctx, AV_LOG_ERROR,
01400 "warning: first frame is no keyframe\n");
01401 else if (s->picture_structure != PICT_FRAME)
01402 av_log(avctx, AV_LOG_INFO,
01403 "allocate dummy last picture for field based first keyframe\n");
01404
01405
01406 i = ff_find_unused_picture(s, 0);
01407 if (i < 0) {
01408 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
01409 return i;
01410 }
01411 s->last_picture_ptr = &s->picture[i];
01412 s->last_picture_ptr->f.key_frame = 0;
01413 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
01414 s->last_picture_ptr = NULL;
01415 return -1;
01416 }
01417
01418 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
01419 for(i=0; i<avctx->height; i++)
01420 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
01421 }
01422
01423 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
01424 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
01425 s->last_picture_ptr->f.reference = 3;
01426 }
01427 if ((s->next_picture_ptr == NULL ||
01428 s->next_picture_ptr->f.data[0] == NULL) &&
01429 s->pict_type == AV_PICTURE_TYPE_B) {
01430
01431 i = ff_find_unused_picture(s, 0);
01432 if (i < 0) {
01433 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
01434 return i;
01435 }
01436 s->next_picture_ptr = &s->picture[i];
01437 s->next_picture_ptr->f.key_frame = 0;
01438 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
01439 s->next_picture_ptr = NULL;
01440 return -1;
01441 }
01442 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
01443 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
01444 s->next_picture_ptr->f.reference = 3;
01445 }
01446 }
01447
01448 if (s->last_picture_ptr)
01449 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01450 if (s->next_picture_ptr)
01451 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01452
01453 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01454 (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
01455 if (s->next_picture_ptr)
01456 s->next_picture_ptr->owner2 = s;
01457 if (s->last_picture_ptr)
01458 s->last_picture_ptr->owner2 = s;
01459 }
01460
01461 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01462 s->last_picture_ptr->f.data[0]));
01463
01464 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01465 int i;
01466 for (i = 0; i < 4; i++) {
01467 if (s->picture_structure == PICT_BOTTOM_FIELD) {
01468 s->current_picture.f.data[i] +=
01469 s->current_picture.f.linesize[i];
01470 }
01471 s->current_picture.f.linesize[i] *= 2;
01472 s->last_picture.f.linesize[i] *= 2;
01473 s->next_picture.f.linesize[i] *= 2;
01474 }
01475 }
01476
01477 s->err_recognition = avctx->err_recognition;
01478
01479
01480
01481
01482 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
01483 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01484 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01485 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01486 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01487 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01488 } else {
01489 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01490 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01491 }
01492
01493 if (s->dct_error_sum) {
01494 assert(s->avctx->noise_reduction && s->encoding);
01495 update_noise_reduction(s);
01496 }
01497
01498 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01499 return ff_xvmc_field_start(s, avctx);
01500
01501 return 0;
01502 }
01503
01504
01505
01506 void ff_MPV_frame_end(MpegEncContext *s)
01507 {
01508 int i;
01509
01510
01511 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01512 ff_xvmc_field_end(s);
01513 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
01514 !s->avctx->hwaccel &&
01515 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01516 s->unrestricted_mv &&
01517 s->current_picture.f.reference &&
01518 !s->intra_only &&
01519 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
01520 !s->avctx->lowres
01521 ) {
01522 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01523 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01524 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
01525 s->h_edge_pos, s->v_edge_pos,
01526 EDGE_WIDTH, EDGE_WIDTH,
01527 EDGE_TOP | EDGE_BOTTOM);
01528 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
01529 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01530 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01531 EDGE_TOP | EDGE_BOTTOM);
01532 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
01533 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01534 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01535 EDGE_TOP | EDGE_BOTTOM);
01536 }
01537
01538 emms_c();
01539
01540 s->last_pict_type = s->pict_type;
01541 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01542 if (s->pict_type!= AV_PICTURE_TYPE_B) {
01543 s->last_non_b_pict_type = s->pict_type;
01544 }
01545 #if 0
01546
01547 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01548 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01549 s->picture[i] = s->current_picture;
01550 break;
01551 }
01552 }
01553 assert(i < MAX_PICTURE_COUNT);
01554 #endif
01555
01556 if (s->encoding) {
01557
01558 for (i = 0; i < s->picture_count; i++) {
01559 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01560 ) {
01561 free_frame_buffer(s, &s->picture[i]);
01562 }
01563 }
01564 }
01565
01566 #if 0
01567 memset(&s->last_picture, 0, sizeof(Picture));
01568 memset(&s->next_picture, 0, sizeof(Picture));
01569 memset(&s->current_picture, 0, sizeof(Picture));
01570 #endif
01571 s->avctx->coded_frame = &s->current_picture_ptr->f;
01572
01573 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
01574 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
01575 }
01576 }
01577
01585 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01586 int w, int h, int stride, int color)
01587 {
01588 int x, y, fr, f;
01589
01590 sx = av_clip(sx, 0, w - 1);
01591 sy = av_clip(sy, 0, h - 1);
01592 ex = av_clip(ex, 0, w - 1);
01593 ey = av_clip(ey, 0, h - 1);
01594
01595 buf[sy * stride + sx] += color;
01596
01597 if (FFABS(ex - sx) > FFABS(ey - sy)) {
01598 if (sx > ex) {
01599 FFSWAP(int, sx, ex);
01600 FFSWAP(int, sy, ey);
01601 }
01602 buf += sx + sy * stride;
01603 ex -= sx;
01604 f = ((ey - sy) << 16) / ex;
01605 for(x= 0; x <= ex; x++){
01606 y = (x * f) >> 16;
01607 fr = (x * f) & 0xFFFF;
01608 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01609 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
01610 }
01611 } else {
01612 if (sy > ey) {
01613 FFSWAP(int, sx, ex);
01614 FFSWAP(int, sy, ey);
01615 }
01616 buf += sx + sy * stride;
01617 ey -= sy;
01618 if (ey)
01619 f = ((ex - sx) << 16) / ey;
01620 else
01621 f = 0;
01622 for(y= 0; y <= ey; y++){
01623 x = (y*f) >> 16;
01624 fr = (y*f) & 0xFFFF;
01625 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01626 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
01627 }
01628 }
01629 }
01630
01638 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01639 int ey, int w, int h, int stride, int color)
01640 {
01641 int dx,dy;
01642
01643 sx = av_clip(sx, -100, w + 100);
01644 sy = av_clip(sy, -100, h + 100);
01645 ex = av_clip(ex, -100, w + 100);
01646 ey = av_clip(ey, -100, h + 100);
01647
01648 dx = ex - sx;
01649 dy = ey - sy;
01650
01651 if (dx * dx + dy * dy > 3 * 3) {
01652 int rx = dx + dy;
01653 int ry = -dx + dy;
01654 int length = ff_sqrt((rx * rx + ry * ry) << 8);
01655
01656
01657 rx = ROUNDED_DIV(rx * 3 << 4, length);
01658 ry = ROUNDED_DIV(ry * 3 << 4, length);
01659
01660 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01661 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01662 }
01663 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01664 }
01665
01669 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01670 {
01671 if ( s->avctx->hwaccel || !pict || !pict->mb_type
01672 || (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
01673 return;
01674
01675
01676 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01677 int x,y;
01678
01679 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01680 av_get_picture_type_char(pict->pict_type));
01681 for (y = 0; y < s->mb_height; y++) {
01682 for (x = 0; x < s->mb_width; x++) {
01683 if (s->avctx->debug & FF_DEBUG_SKIP) {
01684 int count = s->mbskip_table[x + y * s->mb_stride];
01685 if (count > 9)
01686 count = 9;
01687 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01688 }
01689 if (s->avctx->debug & FF_DEBUG_QP) {
01690 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01691 pict->qscale_table[x + y * s->mb_stride]);
01692 }
01693 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01694 int mb_type = pict->mb_type[x + y * s->mb_stride];
01695
01696 if (IS_PCM(mb_type))
01697 av_log(s->avctx, AV_LOG_DEBUG, "P");
01698 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01699 av_log(s->avctx, AV_LOG_DEBUG, "A");
01700 else if (IS_INTRA4x4(mb_type))
01701 av_log(s->avctx, AV_LOG_DEBUG, "i");
01702 else if (IS_INTRA16x16(mb_type))
01703 av_log(s->avctx, AV_LOG_DEBUG, "I");
01704 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01705 av_log(s->avctx, AV_LOG_DEBUG, "d");
01706 else if (IS_DIRECT(mb_type))
01707 av_log(s->avctx, AV_LOG_DEBUG, "D");
01708 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01709 av_log(s->avctx, AV_LOG_DEBUG, "g");
01710 else if (IS_GMC(mb_type))
01711 av_log(s->avctx, AV_LOG_DEBUG, "G");
01712 else if (IS_SKIP(mb_type))
01713 av_log(s->avctx, AV_LOG_DEBUG, "S");
01714 else if (!USES_LIST(mb_type, 1))
01715 av_log(s->avctx, AV_LOG_DEBUG, ">");
01716 else if (!USES_LIST(mb_type, 0))
01717 av_log(s->avctx, AV_LOG_DEBUG, "<");
01718 else {
01719 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01720 av_log(s->avctx, AV_LOG_DEBUG, "X");
01721 }
01722
01723
01724 if (IS_8X8(mb_type))
01725 av_log(s->avctx, AV_LOG_DEBUG, "+");
01726 else if (IS_16X8(mb_type))
01727 av_log(s->avctx, AV_LOG_DEBUG, "-");
01728 else if (IS_8X16(mb_type))
01729 av_log(s->avctx, AV_LOG_DEBUG, "|");
01730 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01731 av_log(s->avctx, AV_LOG_DEBUG, " ");
01732 else
01733 av_log(s->avctx, AV_LOG_DEBUG, "?");
01734
01735
01736 if (IS_INTERLACED(mb_type))
01737 av_log(s->avctx, AV_LOG_DEBUG, "=");
01738 else
01739 av_log(s->avctx, AV_LOG_DEBUG, " ");
01740 }
01741
01742 }
01743 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01744 }
01745 }
01746
01747 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01748 (s->avctx->debug_mv)) {
01749 const int shift = 1 + s->quarter_sample;
01750 int mb_y;
01751 uint8_t *ptr;
01752 int i;
01753 int h_chroma_shift, v_chroma_shift, block_height;
01754 const int width = s->avctx->width;
01755 const int height = s->avctx->height;
01756 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01757 const int mv_stride = (s->mb_width << mv_sample_log2) +
01758 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
01759 s->low_delay = 0;
01760
01761 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01762 &h_chroma_shift, &v_chroma_shift);
01763 for (i = 0; i < 3; i++) {
01764 size_t size= (i == 0) ? pict->linesize[i] * FFALIGN(height, 16):
01765 pict->linesize[i] * FFALIGN(height, 16) >> v_chroma_shift;
01766 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
01767 memcpy(s->visualization_buffer[i], pict->data[i], size);
01768 pict->data[i] = s->visualization_buffer[i];
01769 }
01770 pict->type = FF_BUFFER_TYPE_COPY;
01771 pict->opaque= NULL;
01772 ptr = pict->data[0];
01773 block_height = 16 >> v_chroma_shift;
01774
01775 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01776 int mb_x;
01777 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01778 const int mb_index = mb_x + mb_y * s->mb_stride;
01779 if ((s->avctx->debug_mv) && pict->motion_val) {
01780 int type;
01781 for (type = 0; type < 3; type++) {
01782 int direction = 0;
01783 switch (type) {
01784 case 0:
01785 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01786 (pict->pict_type!= AV_PICTURE_TYPE_P))
01787 continue;
01788 direction = 0;
01789 break;
01790 case 1:
01791 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01792 (pict->pict_type!= AV_PICTURE_TYPE_B))
01793 continue;
01794 direction = 0;
01795 break;
01796 case 2:
01797 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01798 (pict->pict_type!= AV_PICTURE_TYPE_B))
01799 continue;
01800 direction = 1;
01801 break;
01802 }
01803 if (!USES_LIST(pict->mb_type[mb_index], direction))
01804 continue;
01805
01806 if (IS_8X8(pict->mb_type[mb_index])) {
01807 int i;
01808 for (i = 0; i < 4; i++) {
01809 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01810 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01811 int xy = (mb_x * 2 + (i & 1) +
01812 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01813 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01814 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01815 draw_arrow(ptr, sx, sy, mx, my, width,
01816 height, s->linesize, 100);
01817 }
01818 } else if (IS_16X8(pict->mb_type[mb_index])) {
01819 int i;
01820 for (i = 0; i < 2; i++) {
01821 int sx = mb_x * 16 + 8;
01822 int sy = mb_y * 16 + 4 + 8 * i;
01823 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01824 int mx = (pict->motion_val[direction][xy][0] >> shift);
01825 int my = (pict->motion_val[direction][xy][1] >> shift);
01826
01827 if (IS_INTERLACED(pict->mb_type[mb_index]))
01828 my *= 2;
01829
01830 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01831 height, s->linesize, 100);
01832 }
01833 } else if (IS_8X16(pict->mb_type[mb_index])) {
01834 int i;
01835 for (i = 0; i < 2; i++) {
01836 int sx = mb_x * 16 + 4 + 8 * i;
01837 int sy = mb_y * 16 + 8;
01838 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01839 int mx = pict->motion_val[direction][xy][0] >> shift;
01840 int my = pict->motion_val[direction][xy][1] >> shift;
01841
01842 if (IS_INTERLACED(pict->mb_type[mb_index]))
01843 my *= 2;
01844
01845 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01846 height, s->linesize, 100);
01847 }
01848 } else {
01849 int sx= mb_x * 16 + 8;
01850 int sy= mb_y * 16 + 8;
01851 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
01852 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01853 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01854 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01855 }
01856 }
01857 }
01858 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
01859 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01860 0x0101010101010101ULL;
01861 int y;
01862 for (y = 0; y < block_height; y++) {
01863 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01864 (block_height * mb_y + y) *
01865 pict->linesize[1]) = c;
01866 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01867 (block_height * mb_y + y) *
01868 pict->linesize[2]) = c;
01869 }
01870 }
01871 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01872 pict->motion_val) {
01873 int mb_type = pict->mb_type[mb_index];
01874 uint64_t u,v;
01875 int y;
01876 #define COLOR(theta, r) \
01877 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01878 v = (int)(128 + r * sin(theta * 3.141592 / 180));
01879
01880
01881 u = v = 128;
01882 if (IS_PCM(mb_type)) {
01883 COLOR(120, 48)
01884 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01885 IS_INTRA16x16(mb_type)) {
01886 COLOR(30, 48)
01887 } else if (IS_INTRA4x4(mb_type)) {
01888 COLOR(90, 48)
01889 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01890
01891 } else if (IS_DIRECT(mb_type)) {
01892 COLOR(150, 48)
01893 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01894 COLOR(170, 48)
01895 } else if (IS_GMC(mb_type)) {
01896 COLOR(190, 48)
01897 } else if (IS_SKIP(mb_type)) {
01898
01899 } else if (!USES_LIST(mb_type, 1)) {
01900 COLOR(240, 48)
01901 } else if (!USES_LIST(mb_type, 0)) {
01902 COLOR(0, 48)
01903 } else {
01904 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01905 COLOR(300,48)
01906 }
01907
01908 u *= 0x0101010101010101ULL;
01909 v *= 0x0101010101010101ULL;
01910 for (y = 0; y < block_height; y++) {
01911 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01912 (block_height * mb_y + y) * pict->linesize[1]) = u;
01913 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01914 (block_height * mb_y + y) * pict->linesize[2]) = v;
01915 }
01916
01917
01918 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01919 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01920 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01921 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01922 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01923 }
01924 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01925 for (y = 0; y < 16; y++)
01926 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01927 pict->linesize[0]] ^= 0x80;
01928 }
01929 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01930 int dm = 1 << (mv_sample_log2 - 2);
01931 for (i = 0; i < 4; i++) {
01932 int sx = mb_x * 16 + 8 * (i & 1);
01933 int sy = mb_y * 16 + 8 * (i >> 1);
01934 int xy = (mb_x * 2 + (i & 1) +
01935 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01936
01937 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01938 if (mv[0] != mv[dm] ||
01939 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01940 for (y = 0; y < 8; y++)
01941 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01942 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01943 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01944 pict->linesize[0]) ^= 0x8080808080808080ULL;
01945 }
01946 }
01947
01948 if (IS_INTERLACED(mb_type) &&
01949 s->codec_id == AV_CODEC_ID_H264) {
01950
01951 }
01952 }
01953 s->mbskip_table[mb_index] = 0;
01954 }
01955 }
01956 }
01957 }
01958
01959 static inline int hpel_motion_lowres(MpegEncContext *s,
01960 uint8_t *dest, uint8_t *src,
01961 int field_based, int field_select,
01962 int src_x, int src_y,
01963 int width, int height, int stride,
01964 int h_edge_pos, int v_edge_pos,
01965 int w, int h, h264_chroma_mc_func *pix_op,
01966 int motion_x, int motion_y)
01967 {
01968 const int lowres = s->avctx->lowres;
01969 const int op_index = FFMIN(lowres, 2);
01970 const int s_mask = (2 << lowres) - 1;
01971 int emu = 0;
01972 int sx, sy;
01973
01974 if (s->quarter_sample) {
01975 motion_x /= 2;
01976 motion_y /= 2;
01977 }
01978
01979 sx = motion_x & s_mask;
01980 sy = motion_y & s_mask;
01981 src_x += motion_x >> lowres + 1;
01982 src_y += motion_y >> lowres + 1;
01983
01984 src += src_y * stride + src_x;
01985
01986 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
01987 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01988 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01989 (h + 1) << field_based, src_x,
01990 src_y << field_based,
01991 h_edge_pos,
01992 v_edge_pos);
01993 src = s->edge_emu_buffer;
01994 emu = 1;
01995 }
01996
01997 sx = (sx << 2) >> lowres;
01998 sy = (sy << 2) >> lowres;
01999 if (field_select)
02000 src += s->linesize;
02001 pix_op[op_index](dest, src, stride, h, sx, sy);
02002 return emu;
02003 }
02004
02005
02006 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
02007 uint8_t *dest_y,
02008 uint8_t *dest_cb,
02009 uint8_t *dest_cr,
02010 int field_based,
02011 int bottom_field,
02012 int field_select,
02013 uint8_t **ref_picture,
02014 h264_chroma_mc_func *pix_op,
02015 int motion_x, int motion_y,
02016 int h, int mb_y)
02017 {
02018 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
02019 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
02020 uvsx, uvsy;
02021 const int lowres = s->avctx->lowres;
02022 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
02023 const int block_s = 8>>lowres;
02024 const int s_mask = (2 << lowres) - 1;
02025 const int h_edge_pos = s->h_edge_pos >> lowres;
02026 const int v_edge_pos = s->v_edge_pos >> lowres;
02027 linesize = s->current_picture.f.linesize[0] << field_based;
02028 uvlinesize = s->current_picture.f.linesize[1] << field_based;
02029
02030
02031 if (s->quarter_sample) {
02032 motion_x /= 2;
02033 motion_y /= 2;
02034 }
02035
02036 if(field_based){
02037 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
02038 }
02039
02040 sx = motion_x & s_mask;
02041 sy = motion_y & s_mask;
02042 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
02043 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
02044
02045 if (s->out_format == FMT_H263) {
02046 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
02047 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
02048 uvsrc_x = src_x >> 1;
02049 uvsrc_y = src_y >> 1;
02050 } else if (s->out_format == FMT_H261) {
02051
02052 mx = motion_x / 4;
02053 my = motion_y / 4;
02054 uvsx = (2 * mx) & s_mask;
02055 uvsy = (2 * my) & s_mask;
02056 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
02057 uvsrc_y = mb_y * block_s + (my >> lowres);
02058 } else {
02059 if(s->chroma_y_shift){
02060 mx = motion_x / 2;
02061 my = motion_y / 2;
02062 uvsx = mx & s_mask;
02063 uvsy = my & s_mask;
02064 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
02065 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
02066 } else {
02067 if(s->chroma_x_shift){
02068
02069 mx = motion_x / 2;
02070 uvsx = mx & s_mask;
02071 uvsy = motion_y & s_mask;
02072 uvsrc_y = src_y;
02073 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
02074 } else {
02075
02076 uvsx = motion_x & s_mask;
02077 uvsy = motion_y & s_mask;
02078 uvsrc_x = src_x;
02079 uvsrc_y = src_y;
02080 }
02081 }
02082 }
02083
02084 ptr_y = ref_picture[0] + src_y * linesize + src_x;
02085 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
02086 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
02087
02088 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
02089 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
02090 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
02091 s->linesize, 17, 17 + field_based,
02092 src_x, src_y << field_based, h_edge_pos,
02093 v_edge_pos);
02094 ptr_y = s->edge_emu_buffer;
02095 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
02096 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
02097 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
02098 9 + field_based,
02099 uvsrc_x, uvsrc_y << field_based,
02100 h_edge_pos >> 1, v_edge_pos >> 1);
02101 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
02102 9 + field_based,
02103 uvsrc_x, uvsrc_y << field_based,
02104 h_edge_pos >> 1, v_edge_pos >> 1);
02105 ptr_cb = uvbuf;
02106 ptr_cr = uvbuf + 16;
02107 }
02108 }
02109
02110
02111 if (bottom_field) {
02112 dest_y += s->linesize;
02113 dest_cb += s->uvlinesize;
02114 dest_cr += s->uvlinesize;
02115 }
02116
02117 if (field_select) {
02118 ptr_y += s->linesize;
02119 ptr_cb += s->uvlinesize;
02120 ptr_cr += s->uvlinesize;
02121 }
02122
02123 sx = (sx << 2) >> lowres;
02124 sy = (sy << 2) >> lowres;
02125 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
02126
02127 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
02128 uvsx = (uvsx << 2) >> lowres;
02129 uvsy = (uvsy << 2) >> lowres;
02130 if (h >> s->chroma_y_shift) {
02131 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
02132 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
02133 }
02134 }
02135
02136 }
02137
02138 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
02139 uint8_t *dest_cb, uint8_t *dest_cr,
02140 uint8_t **ref_picture,
02141 h264_chroma_mc_func * pix_op,
02142 int mx, int my)
02143 {
02144 const int lowres = s->avctx->lowres;
02145 const int op_index = FFMIN(lowres, 2);
02146 const int block_s = 8 >> lowres;
02147 const int s_mask = (2 << lowres) - 1;
02148 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
02149 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
02150 int emu = 0, src_x, src_y, offset, sx, sy;
02151 uint8_t *ptr;
02152
02153 if (s->quarter_sample) {
02154 mx /= 2;
02155 my /= 2;
02156 }
02157
02158
02159
02160 mx = ff_h263_round_chroma(mx);
02161 my = ff_h263_round_chroma(my);
02162
02163 sx = mx & s_mask;
02164 sy = my & s_mask;
02165 src_x = s->mb_x * block_s + (mx >> lowres + 1);
02166 src_y = s->mb_y * block_s + (my >> lowres + 1);
02167
02168 offset = src_y * s->uvlinesize + src_x;
02169 ptr = ref_picture[1] + offset;
02170 if (s->flags & CODEC_FLAG_EMU_EDGE) {
02171 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
02172 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
02173 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
02174 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
02175 ptr = s->edge_emu_buffer;
02176 emu = 1;
02177 }
02178 }
02179 sx = (sx << 2) >> lowres;
02180 sy = (sy << 2) >> lowres;
02181 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02182
02183 ptr = ref_picture[2] + offset;
02184 if (emu) {
02185 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02186 src_x, src_y, h_edge_pos, v_edge_pos);
02187 ptr = s->edge_emu_buffer;
02188 }
02189 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02190 }
02191
02203 static inline void MPV_motion_lowres(MpegEncContext *s,
02204 uint8_t *dest_y, uint8_t *dest_cb,
02205 uint8_t *dest_cr,
02206 int dir, uint8_t **ref_picture,
02207 h264_chroma_mc_func *pix_op)
02208 {
02209 int mx, my;
02210 int mb_x, mb_y, i;
02211 const int lowres = s->avctx->lowres;
02212 const int block_s = 8 >>lowres;
02213
02214 mb_x = s->mb_x;
02215 mb_y = s->mb_y;
02216
02217 switch (s->mv_type) {
02218 case MV_TYPE_16X16:
02219 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02220 0, 0, 0,
02221 ref_picture, pix_op,
02222 s->mv[dir][0][0], s->mv[dir][0][1],
02223 2 * block_s, mb_y);
02224 break;
02225 case MV_TYPE_8X8:
02226 mx = 0;
02227 my = 0;
02228 for (i = 0; i < 4; i++) {
02229 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02230 s->linesize) * block_s,
02231 ref_picture[0], 0, 0,
02232 (2 * mb_x + (i & 1)) * block_s,
02233 (2 * mb_y + (i >> 1)) * block_s,
02234 s->width, s->height, s->linesize,
02235 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02236 block_s, block_s, pix_op,
02237 s->mv[dir][i][0], s->mv[dir][i][1]);
02238
02239 mx += s->mv[dir][i][0];
02240 my += s->mv[dir][i][1];
02241 }
02242
02243 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02244 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02245 pix_op, mx, my);
02246 break;
02247 case MV_TYPE_FIELD:
02248 if (s->picture_structure == PICT_FRAME) {
02249
02250 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02251 1, 0, s->field_select[dir][0],
02252 ref_picture, pix_op,
02253 s->mv[dir][0][0], s->mv[dir][0][1],
02254 block_s, mb_y);
02255
02256 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02257 1, 1, s->field_select[dir][1],
02258 ref_picture, pix_op,
02259 s->mv[dir][1][0], s->mv[dir][1][1],
02260 block_s, mb_y);
02261 } else {
02262 if (s->picture_structure != s->field_select[dir][0] + 1 &&
02263 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02264 ref_picture = s->current_picture_ptr->f.data;
02265
02266 }
02267 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02268 0, 0, s->field_select[dir][0],
02269 ref_picture, pix_op,
02270 s->mv[dir][0][0],
02271 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02272 }
02273 break;
02274 case MV_TYPE_16X8:
02275 for (i = 0; i < 2; i++) {
02276 uint8_t **ref2picture;
02277
02278 if (s->picture_structure == s->field_select[dir][i] + 1 ||
02279 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02280 ref2picture = ref_picture;
02281 } else {
02282 ref2picture = s->current_picture_ptr->f.data;
02283 }
02284
02285 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02286 0, 0, s->field_select[dir][i],
02287 ref2picture, pix_op,
02288 s->mv[dir][i][0], s->mv[dir][i][1] +
02289 2 * block_s * i, block_s, mb_y >> 1);
02290
02291 dest_y += 2 * block_s * s->linesize;
02292 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02293 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02294 }
02295 break;
02296 case MV_TYPE_DMV:
02297 if (s->picture_structure == PICT_FRAME) {
02298 for (i = 0; i < 2; i++) {
02299 int j;
02300 for (j = 0; j < 2; j++) {
02301 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02302 1, j, j ^ i,
02303 ref_picture, pix_op,
02304 s->mv[dir][2 * i + j][0],
02305 s->mv[dir][2 * i + j][1],
02306 block_s, mb_y);
02307 }
02308 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02309 }
02310 } else {
02311 for (i = 0; i < 2; i++) {
02312 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02313 0, 0, s->picture_structure != i + 1,
02314 ref_picture, pix_op,
02315 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02316 2 * block_s, mb_y >> 1);
02317
02318
02319 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02320
02321
02322
02323 if (!s->first_field) {
02324 ref_picture = s->current_picture_ptr->f.data;
02325 }
02326 }
02327 }
02328 break;
02329 default:
02330 av_assert2(0);
02331 }
02332 }
02333
02337 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02338 {
02339 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02340 int my, off, i, mvs;
02341
02342 if (s->picture_structure != PICT_FRAME) goto unhandled;
02343
02344 switch (s->mv_type) {
02345 case MV_TYPE_16X16:
02346 mvs = 1;
02347 break;
02348 case MV_TYPE_16X8:
02349 mvs = 2;
02350 break;
02351 case MV_TYPE_8X8:
02352 mvs = 4;
02353 break;
02354 default:
02355 goto unhandled;
02356 }
02357
02358 for (i = 0; i < mvs; i++) {
02359 my = s->mv[dir][i][1]<<qpel_shift;
02360 my_max = FFMAX(my_max, my);
02361 my_min = FFMIN(my_min, my);
02362 }
02363
02364 off = (FFMAX(-my_min, my_max) + 63) >> 6;
02365
02366 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02367 unhandled:
02368 return s->mb_height-1;
02369 }
02370
02371
02372 static inline void put_dct(MpegEncContext *s,
02373 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02374 {
02375 s->dct_unquantize_intra(s, block, i, qscale);
02376 s->dsp.idct_put (dest, line_size, block);
02377 }
02378
02379
02380 static inline void add_dct(MpegEncContext *s,
02381 DCTELEM *block, int i, uint8_t *dest, int line_size)
02382 {
02383 if (s->block_last_index[i] >= 0) {
02384 s->dsp.idct_add (dest, line_size, block);
02385 }
02386 }
02387
02388 static inline void add_dequant_dct(MpegEncContext *s,
02389 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02390 {
02391 if (s->block_last_index[i] >= 0) {
02392 s->dct_unquantize_inter(s, block, i, qscale);
02393
02394 s->dsp.idct_add (dest, line_size, block);
02395 }
02396 }
02397
02401 void ff_clean_intra_table_entries(MpegEncContext *s)
02402 {
02403 int wrap = s->b8_stride;
02404 int xy = s->block_index[0];
02405
02406 s->dc_val[0][xy ] =
02407 s->dc_val[0][xy + 1 ] =
02408 s->dc_val[0][xy + wrap] =
02409 s->dc_val[0][xy + 1 + wrap] = 1024;
02410
02411 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
02412 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02413 if (s->msmpeg4_version>=3) {
02414 s->coded_block[xy ] =
02415 s->coded_block[xy + 1 ] =
02416 s->coded_block[xy + wrap] =
02417 s->coded_block[xy + 1 + wrap] = 0;
02418 }
02419
02420 wrap = s->mb_stride;
02421 xy = s->mb_x + s->mb_y * wrap;
02422 s->dc_val[1][xy] =
02423 s->dc_val[2][xy] = 1024;
02424
02425 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02426 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02427
02428 s->mbintra_table[xy]= 0;
02429 }
02430
02431
02432
02433
02434
02435
02436
02437
02438
02439
02440
02441 static av_always_inline
02442 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02443 int lowres_flag, int is_mpeg12)
02444 {
02445 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02446 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02447 ff_xvmc_decode_mb(s);
02448 return;
02449 }
02450
02451 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02452
02453 int i,j;
02454 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02455 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02456 for(i=0; i<6; i++){
02457 for(j=0; j<64; j++){
02458 *dct++ = block[i][s->dsp.idct_permutation[j]];
02459 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02460 }
02461 av_log(s->avctx, AV_LOG_DEBUG, "\n");
02462 }
02463 }
02464
02465 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02466
02467
02468 if (!s->mb_intra) {
02469 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02470 if(s->mbintra_table[mb_xy])
02471 ff_clean_intra_table_entries(s);
02472 } else {
02473 s->last_dc[0] =
02474 s->last_dc[1] =
02475 s->last_dc[2] = 128 << s->intra_dc_precision;
02476 }
02477 }
02478 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02479 s->mbintra_table[mb_xy]=1;
02480
02481 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
02482 uint8_t *dest_y, *dest_cb, *dest_cr;
02483 int dct_linesize, dct_offset;
02484 op_pixels_func (*op_pix)[4];
02485 qpel_mc_func (*op_qpix)[16];
02486 const int linesize = s->current_picture.f.linesize[0];
02487 const int uvlinesize = s->current_picture.f.linesize[1];
02488 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02489 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02490
02491
02492
02493 if(!s->encoding){
02494 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02495
02496 if (s->mb_skipped) {
02497 s->mb_skipped= 0;
02498 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
02499 *mbskip_ptr = 1;
02500 } else if(!s->current_picture.f.reference) {
02501 *mbskip_ptr = 1;
02502 } else{
02503 *mbskip_ptr = 0;
02504 }
02505 }
02506
02507 dct_linesize = linesize << s->interlaced_dct;
02508 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
02509
02510 if(readable){
02511 dest_y= s->dest[0];
02512 dest_cb= s->dest[1];
02513 dest_cr= s->dest[2];
02514 }else{
02515 dest_y = s->b_scratchpad;
02516 dest_cb= s->b_scratchpad+16*linesize;
02517 dest_cr= s->b_scratchpad+32*linesize;
02518 }
02519
02520 if (!s->mb_intra) {
02521
02522
02523 if(!s->encoding){
02524
02525 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02526 if (s->mv_dir & MV_DIR_FORWARD) {
02527 ff_thread_await_progress(&s->last_picture_ptr->f,
02528 ff_MPV_lowest_referenced_row(s, 0),
02529 0);
02530 }
02531 if (s->mv_dir & MV_DIR_BACKWARD) {
02532 ff_thread_await_progress(&s->next_picture_ptr->f,
02533 ff_MPV_lowest_referenced_row(s, 1),
02534 0);
02535 }
02536 }
02537
02538 if(lowres_flag){
02539 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02540
02541 if (s->mv_dir & MV_DIR_FORWARD) {
02542 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02543 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02544 }
02545 if (s->mv_dir & MV_DIR_BACKWARD) {
02546 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02547 }
02548 }else{
02549 op_qpix= s->me.qpel_put;
02550 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02551 op_pix = s->dsp.put_pixels_tab;
02552 }else{
02553 op_pix = s->dsp.put_no_rnd_pixels_tab;
02554 }
02555 if (s->mv_dir & MV_DIR_FORWARD) {
02556 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02557 op_pix = s->dsp.avg_pixels_tab;
02558 op_qpix= s->me.qpel_avg;
02559 }
02560 if (s->mv_dir & MV_DIR_BACKWARD) {
02561 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02562 }
02563 }
02564 }
02565
02566
02567 if(s->avctx->skip_idct){
02568 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02569 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02570 || s->avctx->skip_idct >= AVDISCARD_ALL)
02571 goto skip_idct;
02572 }
02573
02574
02575 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
02576 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
02577 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02578 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02579 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02580 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02581
02582 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02583 if (s->chroma_y_shift){
02584 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02585 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02586 }else{
02587 dct_linesize >>= 1;
02588 dct_offset >>=1;
02589 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02590 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02591 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02592 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02593 }
02594 }
02595 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
02596 add_dct(s, block[0], 0, dest_y , dct_linesize);
02597 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02598 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02599 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02600
02601 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02602 if(s->chroma_y_shift){
02603 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02604 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02605 }else{
02606
02607 dct_linesize = uvlinesize << s->interlaced_dct;
02608 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
02609
02610 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02611 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02612 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02613 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02614 if(!s->chroma_x_shift){
02615 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02616 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02617 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02618 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02619 }
02620 }
02621 }
02622 }
02623 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02624 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02625 }
02626 } else {
02627
02628 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
02629 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02630 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02631 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02632 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02633
02634 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02635 if(s->chroma_y_shift){
02636 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02637 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02638 }else{
02639 dct_offset >>=1;
02640 dct_linesize >>=1;
02641 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02642 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02643 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02644 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02645 }
02646 }
02647 }else{
02648 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02649 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02650 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02651 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02652
02653 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02654 if(s->chroma_y_shift){
02655 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02656 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02657 }else{
02658
02659 dct_linesize = uvlinesize << s->interlaced_dct;
02660 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
02661
02662 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02663 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02664 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02665 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02666 if(!s->chroma_x_shift){
02667 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
02668 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
02669 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02670 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02671 }
02672 }
02673 }
02674 }
02675 }
02676 skip_idct:
02677 if(!readable){
02678 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02679 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02680 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02681 }
02682 }
02683 }
02684
02685 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02686 #if !CONFIG_SMALL
02687 if(s->out_format == FMT_MPEG1) {
02688 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02689 else MPV_decode_mb_internal(s, block, 0, 1);
02690 } else
02691 #endif
02692 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02693 else MPV_decode_mb_internal(s, block, 0, 0);
02694 }
02695
02699 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02700 const int field_pic= s->picture_structure != PICT_FRAME;
02701 if(field_pic){
02702 h <<= 1;
02703 y <<= 1;
02704 }
02705
02706 if (!s->avctx->hwaccel
02707 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02708 && s->unrestricted_mv
02709 && s->current_picture.f.reference
02710 && !s->intra_only
02711 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02712 int sides = 0, edge_h;
02713 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02714 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02715 if (y==0) sides |= EDGE_TOP;
02716 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02717
02718 edge_h= FFMIN(h, s->v_edge_pos - y);
02719
02720 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
02721 s->linesize, s->h_edge_pos, edge_h,
02722 EDGE_WIDTH, EDGE_WIDTH, sides);
02723 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02724 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02725 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02726 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02727 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02728 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02729 }
02730
02731 h= FFMIN(h, s->avctx->height - y);
02732
02733 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02734
02735 if (s->avctx->draw_horiz_band) {
02736 AVFrame *src;
02737 int offset[AV_NUM_DATA_POINTERS];
02738 int i;
02739
02740 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02741 src = &s->current_picture_ptr->f;
02742 else if(s->last_picture_ptr)
02743 src = &s->last_picture_ptr->f;
02744 else
02745 return;
02746
02747 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02748 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02749 offset[i] = 0;
02750 }else{
02751 offset[0]= y * s->linesize;
02752 offset[1]=
02753 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02754 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02755 offset[i] = 0;
02756 }
02757
02758 emms_c();
02759
02760 s->avctx->draw_horiz_band(s->avctx, src, offset,
02761 y, s->picture_structure, h);
02762 }
02763 }
02764
02765 void ff_init_block_index(MpegEncContext *s){
02766 const int linesize = s->current_picture.f.linesize[0];
02767 const int uvlinesize = s->current_picture.f.linesize[1];
02768 const int mb_size= 4 - s->avctx->lowres;
02769
02770 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02771 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02772 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02773 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02774 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02775 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02776
02777
02778 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
02779 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02780 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02781
02782 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02783 {
02784 if(s->picture_structure==PICT_FRAME){
02785 s->dest[0] += s->mb_y * linesize << mb_size;
02786 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02787 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02788 }else{
02789 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02790 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02791 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02792 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02793 }
02794 }
02795 }
02796
02797 void ff_mpeg_flush(AVCodecContext *avctx){
02798 int i;
02799 MpegEncContext *s = avctx->priv_data;
02800
02801 if(s==NULL || s->picture==NULL)
02802 return;
02803
02804 for(i=0; i<s->picture_count; i++){
02805 if (s->picture[i].f.data[0] &&
02806 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02807 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02808 free_frame_buffer(s, &s->picture[i]);
02809 }
02810 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02811
02812 s->mb_x= s->mb_y= 0;
02813 s->closed_gop= 0;
02814
02815 s->parse_context.state= -1;
02816 s->parse_context.frame_start_found= 0;
02817 s->parse_context.overread= 0;
02818 s->parse_context.overread_index= 0;
02819 s->parse_context.index= 0;
02820 s->parse_context.last_index= 0;
02821 s->bitstream_buffer_size=0;
02822 s->pp_time=0;
02823 }
02824
02825 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02826 DCTELEM *block, int n, int qscale)
02827 {
02828 int i, level, nCoeffs;
02829 const uint16_t *quant_matrix;
02830
02831 nCoeffs= s->block_last_index[n];
02832
02833 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02834
02835 quant_matrix = s->intra_matrix;
02836 for(i=1;i<=nCoeffs;i++) {
02837 int j= s->intra_scantable.permutated[i];
02838 level = block[j];
02839 if (level) {
02840 if (level < 0) {
02841 level = -level;
02842 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02843 level = (level - 1) | 1;
02844 level = -level;
02845 } else {
02846 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02847 level = (level - 1) | 1;
02848 }
02849 block[j] = level;
02850 }
02851 }
02852 }
02853
02854 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02855 DCTELEM *block, int n, int qscale)
02856 {
02857 int i, level, nCoeffs;
02858 const uint16_t *quant_matrix;
02859
02860 nCoeffs= s->block_last_index[n];
02861
02862 quant_matrix = s->inter_matrix;
02863 for(i=0; i<=nCoeffs; i++) {
02864 int j= s->intra_scantable.permutated[i];
02865 level = block[j];
02866 if (level) {
02867 if (level < 0) {
02868 level = -level;
02869 level = (((level << 1) + 1) * qscale *
02870 ((int) (quant_matrix[j]))) >> 4;
02871 level = (level - 1) | 1;
02872 level = -level;
02873 } else {
02874 level = (((level << 1) + 1) * qscale *
02875 ((int) (quant_matrix[j]))) >> 4;
02876 level = (level - 1) | 1;
02877 }
02878 block[j] = level;
02879 }
02880 }
02881 }
02882
02883 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02884 DCTELEM *block, int n, int qscale)
02885 {
02886 int i, level, nCoeffs;
02887 const uint16_t *quant_matrix;
02888
02889 if(s->alternate_scan) nCoeffs= 63;
02890 else nCoeffs= s->block_last_index[n];
02891
02892 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02893 quant_matrix = s->intra_matrix;
02894 for(i=1;i<=nCoeffs;i++) {
02895 int j= s->intra_scantable.permutated[i];
02896 level = block[j];
02897 if (level) {
02898 if (level < 0) {
02899 level = -level;
02900 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02901 level = -level;
02902 } else {
02903 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02904 }
02905 block[j] = level;
02906 }
02907 }
02908 }
02909
02910 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02911 DCTELEM *block, int n, int qscale)
02912 {
02913 int i, level, nCoeffs;
02914 const uint16_t *quant_matrix;
02915 int sum=-1;
02916
02917 if(s->alternate_scan) nCoeffs= 63;
02918 else nCoeffs= s->block_last_index[n];
02919
02920 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02921 sum += block[0];
02922 quant_matrix = s->intra_matrix;
02923 for(i=1;i<=nCoeffs;i++) {
02924 int j= s->intra_scantable.permutated[i];
02925 level = block[j];
02926 if (level) {
02927 if (level < 0) {
02928 level = -level;
02929 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02930 level = -level;
02931 } else {
02932 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02933 }
02934 block[j] = level;
02935 sum+=level;
02936 }
02937 }
02938 block[63]^=sum&1;
02939 }
02940
02941 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02942 DCTELEM *block, int n, int qscale)
02943 {
02944 int i, level, nCoeffs;
02945 const uint16_t *quant_matrix;
02946 int sum=-1;
02947
02948 if(s->alternate_scan) nCoeffs= 63;
02949 else nCoeffs= s->block_last_index[n];
02950
02951 quant_matrix = s->inter_matrix;
02952 for(i=0; i<=nCoeffs; i++) {
02953 int j= s->intra_scantable.permutated[i];
02954 level = block[j];
02955 if (level) {
02956 if (level < 0) {
02957 level = -level;
02958 level = (((level << 1) + 1) * qscale *
02959 ((int) (quant_matrix[j]))) >> 4;
02960 level = -level;
02961 } else {
02962 level = (((level << 1) + 1) * qscale *
02963 ((int) (quant_matrix[j]))) >> 4;
02964 }
02965 block[j] = level;
02966 sum+=level;
02967 }
02968 }
02969 block[63]^=sum&1;
02970 }
02971
02972 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02973 DCTELEM *block, int n, int qscale)
02974 {
02975 int i, level, qmul, qadd;
02976 int nCoeffs;
02977
02978 assert(s->block_last_index[n]>=0);
02979
02980 qmul = qscale << 1;
02981
02982 if (!s->h263_aic) {
02983 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02984 qadd = (qscale - 1) | 1;
02985 }else{
02986 qadd = 0;
02987 }
02988 if(s->ac_pred)
02989 nCoeffs=63;
02990 else
02991 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02992
02993 for(i=1; i<=nCoeffs; i++) {
02994 level = block[i];
02995 if (level) {
02996 if (level < 0) {
02997 level = level * qmul - qadd;
02998 } else {
02999 level = level * qmul + qadd;
03000 }
03001 block[i] = level;
03002 }
03003 }
03004 }
03005
03006 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
03007 DCTELEM *block, int n, int qscale)
03008 {
03009 int i, level, qmul, qadd;
03010 int nCoeffs;
03011
03012 assert(s->block_last_index[n]>=0);
03013
03014 qadd = (qscale - 1) | 1;
03015 qmul = qscale << 1;
03016
03017 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
03018
03019 for(i=0; i<=nCoeffs; i++) {
03020 level = block[i];
03021 if (level) {
03022 if (level < 0) {
03023 level = level * qmul - qadd;
03024 } else {
03025 level = level * qmul + qadd;
03026 }
03027 block[i] = level;
03028 }
03029 }
03030 }
03031
03035 void ff_set_qscale(MpegEncContext * s, int qscale)
03036 {
03037 if (qscale < 1)
03038 qscale = 1;
03039 else if (qscale > 31)
03040 qscale = 31;
03041
03042 s->qscale = qscale;
03043 s->chroma_qscale= s->chroma_qscale_table[qscale];
03044
03045 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
03046 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
03047 }
03048
03049 void ff_MPV_report_decode_progress(MpegEncContext *s)
03050 {
03051 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
03052 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
03053 }