00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/imgutils.h"
00031 #include "avcodec.h"
00032 #include "dsputil.h"
00033 #include "internal.h"
00034 #include "mathops.h"
00035 #include "mpegvideo.h"
00036 #include "mjpegenc.h"
00037 #include "msmpeg4.h"
00038 #include "xvmc_internal.h"
00039 #include "thread.h"
00040 #include <limits.h>
00041
00042
00043
00044
00045 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00046 DCTELEM *block, int n, int qscale);
00047 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00058 DCTELEM *block, int n, int qscale);
00059
00060
00061
00062
00063
00064 static const uint8_t ff_default_chroma_qscale_table[32] = {
00065
00066 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
00067 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00068 };
00069
00070 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00071
00072 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00073 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00074 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00075 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00076 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 };
00081
00082 static const uint8_t mpeg2_dc_scale_table1[128] = {
00083
00084 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00086 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00087 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00088 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00089 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092 };
00093
00094 static const uint8_t mpeg2_dc_scale_table2[128] = {
00095
00096 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00097 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00098 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00099 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104 };
00105
00106 static const uint8_t mpeg2_dc_scale_table3[128] = {
00107
00108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116 };
00117
00118 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00119 ff_mpeg1_dc_scale_table,
00120 mpeg2_dc_scale_table1,
00121 mpeg2_dc_scale_table2,
00122 mpeg2_dc_scale_table3,
00123 };
00124
00125 const enum AVPixelFormat ff_pixfmt_list_420[] = {
00126 AV_PIX_FMT_YUV420P,
00127 AV_PIX_FMT_NONE
00128 };
00129
00130 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
00131 AV_PIX_FMT_DXVA2_VLD,
00132 AV_PIX_FMT_VAAPI_VLD,
00133 AV_PIX_FMT_VDA_VLD,
00134 AV_PIX_FMT_YUV420P,
00135 AV_PIX_FMT_NONE
00136 };
00137
00138 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *av_restrict p,
00139 const uint8_t *end,
00140 uint32_t *av_restrict state)
00141 {
00142 int i;
00143
00144 assert(p <= end);
00145 if (p >= end)
00146 return end;
00147
00148 for (i = 0; i < 3; i++) {
00149 uint32_t tmp = *state << 8;
00150 *state = tmp + *(p++);
00151 if (tmp == 0x100 || p == end)
00152 return p;
00153 }
00154
00155 while (p < end) {
00156 if (p[-1] > 1 ) p += 3;
00157 else if (p[-2] ) p += 2;
00158 else if (p[-3]|(p[-1]-1)) p++;
00159 else {
00160 p++;
00161 break;
00162 }
00163 }
00164
00165 p = FFMIN(p, end) - 4;
00166 *state = AV_RB32(p);
00167
00168 return p + 4;
00169 }
00170
00171
00172 av_cold int ff_dct_common_init(MpegEncContext *s)
00173 {
00174 ff_dsputil_init(&s->dsp, s->avctx);
00175
00176 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00177 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00178 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00179 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00180 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00181 if (s->flags & CODEC_FLAG_BITEXACT)
00182 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00183 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00184
00185 #if ARCH_X86
00186 ff_MPV_common_init_x86(s);
00187 #elif ARCH_ALPHA
00188 ff_MPV_common_init_axp(s);
00189 #elif ARCH_ARM
00190 ff_MPV_common_init_arm(s);
00191 #elif HAVE_ALTIVEC
00192 ff_MPV_common_init_altivec(s);
00193 #elif ARCH_BFIN
00194 ff_MPV_common_init_bfin(s);
00195 #endif
00196
00197
00198
00199
00200 if (s->alternate_scan) {
00201 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00202 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00203 } else {
00204 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00205 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00206 }
00207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00208 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00209
00210 return 0;
00211 }
00212
00213 void ff_copy_picture(Picture *dst, Picture *src)
00214 {
00215 *dst = *src;
00216 dst->f.type = FF_BUFFER_TYPE_COPY;
00217 }
00218
00222 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00223 {
00224
00225
00226 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
00227 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
00228 s->codec_id != AV_CODEC_ID_MSS2)
00229 ff_thread_release_buffer(s->avctx, &pic->f);
00230 else
00231 avcodec_default_release_buffer(s->avctx, &pic->f);
00232 av_freep(&pic->f.hwaccel_picture_private);
00233 }
00234
00238 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00239 {
00240 int r;
00241
00242 if (s->avctx->hwaccel) {
00243 assert(!pic->f.hwaccel_picture_private);
00244 if (s->avctx->hwaccel->priv_data_size) {
00245 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00246 if (!pic->f.hwaccel_picture_private) {
00247 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00248 return -1;
00249 }
00250 }
00251 }
00252
00253 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
00254 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
00255 s->codec_id != AV_CODEC_ID_MSS2)
00256 r = ff_thread_get_buffer(s->avctx, &pic->f);
00257 else
00258 r = avcodec_default_get_buffer(s->avctx, &pic->f);
00259
00260 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00261 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00262 r, pic->f.type, pic->f.data[0]);
00263 av_freep(&pic->f.hwaccel_picture_private);
00264 return -1;
00265 }
00266
00267 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
00268 s->uvlinesize != pic->f.linesize[1])) {
00269 av_log(s->avctx, AV_LOG_ERROR,
00270 "get_buffer() failed (stride changed)\n");
00271 free_frame_buffer(s, pic);
00272 return -1;
00273 }
00274
00275 if (pic->f.linesize[1] != pic->f.linesize[2]) {
00276 av_log(s->avctx, AV_LOG_ERROR,
00277 "get_buffer() failed (uv stride mismatch)\n");
00278 free_frame_buffer(s, pic);
00279 return -1;
00280 }
00281
00282 return 0;
00283 }
00284
00289 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00290 {
00291 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00292
00293
00294
00295 const int mb_array_size = s->mb_stride * s->mb_height;
00296 const int b8_array_size = s->b8_stride * s->mb_height * 2;
00297 const int b4_array_size = s->b4_stride * s->mb_height * 4;
00298 int i;
00299 int r = -1;
00300
00301 if (shared) {
00302 assert(pic->f.data[0]);
00303 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00304 pic->f.type = FF_BUFFER_TYPE_SHARED;
00305 } else {
00306 assert(!pic->f.data[0]);
00307
00308 if (alloc_frame_buffer(s, pic) < 0)
00309 return -1;
00310
00311 s->linesize = pic->f.linesize[0];
00312 s->uvlinesize = pic->f.linesize[1];
00313 }
00314
00315 if (pic->f.qscale_table == NULL) {
00316 if (s->encoding) {
00317 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00318 mb_array_size * sizeof(int16_t), fail)
00319 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00320 mb_array_size * sizeof(int16_t), fail)
00321 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00322 mb_array_size * sizeof(int8_t ), fail)
00323 }
00324
00325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00326 mb_array_size * sizeof(uint8_t) + 2, fail)
00327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00328 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00329 fail)
00330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00331 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00332 fail)
00333 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00334 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00335 if (s->out_format == FMT_H264) {
00336 for (i = 0; i < 2; i++) {
00337 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00338 2 * (b4_array_size + 4) * sizeof(int16_t),
00339 fail)
00340 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00341 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00342 4 * mb_array_size * sizeof(uint8_t), fail)
00343 }
00344 pic->f.motion_subsample_log2 = 2;
00345 } else if (s->out_format == FMT_H263 || s->encoding ||
00346 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00347 for (i = 0; i < 2; i++) {
00348 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00349 2 * (b8_array_size + 4) * sizeof(int16_t),
00350 fail)
00351 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00352 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00353 4 * mb_array_size * sizeof(uint8_t), fail)
00354 }
00355 pic->f.motion_subsample_log2 = 3;
00356 }
00357 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00359 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00360 }
00361 pic->f.qstride = s->mb_stride;
00362 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00363 1 * sizeof(AVPanScan), fail)
00364 }
00365
00366 pic->owner2 = s;
00367
00368 return 0;
00369 fail:
00370 if (r >= 0)
00371 free_frame_buffer(s, pic);
00372 return -1;
00373 }
00374
00378 static void free_picture(MpegEncContext *s, Picture *pic)
00379 {
00380 int i;
00381
00382 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00383 free_frame_buffer(s, pic);
00384 }
00385
00386 av_freep(&pic->mb_var);
00387 av_freep(&pic->mc_mb_var);
00388 av_freep(&pic->mb_mean);
00389 av_freep(&pic->f.mbskip_table);
00390 av_freep(&pic->qscale_table_base);
00391 pic->f.qscale_table = NULL;
00392 av_freep(&pic->mb_type_base);
00393 pic->f.mb_type = NULL;
00394 av_freep(&pic->f.dct_coeff);
00395 av_freep(&pic->f.pan_scan);
00396 pic->f.mb_type = NULL;
00397 for (i = 0; i < 2; i++) {
00398 av_freep(&pic->motion_val_base[i]);
00399 av_freep(&pic->f.ref_index[i]);
00400 pic->f.motion_val[i] = NULL;
00401 }
00402
00403 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00404 for (i = 0; i < 4; i++) {
00405 pic->f.base[i] =
00406 pic->f.data[i] = NULL;
00407 }
00408 pic->f.type = 0;
00409 }
00410 }
00411
00412 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00413 {
00414 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00415 int c_size = s->mb_stride * (s->mb_height + 1);
00416 int yc_size = y_size + 2 * c_size;
00417 int i;
00418
00419
00420
00421 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00422 (s->width + 95) * 2 * 21 * 4, fail);
00423
00424
00425
00426 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00427 (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00428 s->me.temp = s->me.scratchpad;
00429 s->rd_scratchpad = s->me.scratchpad;
00430 s->b_scratchpad = s->me.scratchpad;
00431 s->obmc_scratchpad = s->me.scratchpad + 16;
00432 if (s->encoding) {
00433 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00434 ME_MAP_SIZE * sizeof(uint32_t), fail)
00435 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00436 ME_MAP_SIZE * sizeof(uint32_t), fail)
00437 if (s->avctx->noise_reduction) {
00438 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00439 2 * 64 * sizeof(int), fail)
00440 }
00441 }
00442 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00443 s->block = s->blocks[0];
00444
00445 for (i = 0; i < 12; i++) {
00446 s->pblocks[i] = &s->block[i];
00447 }
00448
00449 if (s->out_format == FMT_H263) {
00450
00451 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00452 yc_size * sizeof(int16_t) * 16, fail);
00453 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00454 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00455 s->ac_val[2] = s->ac_val[1] + c_size;
00456 }
00457
00458 return 0;
00459 fail:
00460 return -1;
00461 }
00462
00463 static void free_duplicate_context(MpegEncContext *s)
00464 {
00465 if (s == NULL)
00466 return;
00467
00468 av_freep(&s->edge_emu_buffer);
00469 av_freep(&s->me.scratchpad);
00470 s->me.temp =
00471 s->rd_scratchpad =
00472 s->b_scratchpad =
00473 s->obmc_scratchpad = NULL;
00474
00475 av_freep(&s->dct_error_sum);
00476 av_freep(&s->me.map);
00477 av_freep(&s->me.score_map);
00478 av_freep(&s->blocks);
00479 av_freep(&s->ac_val_base);
00480 s->block = NULL;
00481 }
00482
00483 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00484 {
00485 #define COPY(a) bak->a = src->a
00486 COPY(edge_emu_buffer);
00487 COPY(me.scratchpad);
00488 COPY(me.temp);
00489 COPY(rd_scratchpad);
00490 COPY(b_scratchpad);
00491 COPY(obmc_scratchpad);
00492 COPY(me.map);
00493 COPY(me.score_map);
00494 COPY(blocks);
00495 COPY(block);
00496 COPY(start_mb_y);
00497 COPY(end_mb_y);
00498 COPY(me.map_generation);
00499 COPY(pb);
00500 COPY(dct_error_sum);
00501 COPY(dct_count[0]);
00502 COPY(dct_count[1]);
00503 COPY(ac_val_base);
00504 COPY(ac_val[0]);
00505 COPY(ac_val[1]);
00506 COPY(ac_val[2]);
00507 #undef COPY
00508 }
00509
00510 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00511 {
00512 MpegEncContext bak;
00513 int i;
00514
00515
00516 backup_duplicate_context(&bak, dst);
00517 memcpy(dst, src, sizeof(MpegEncContext));
00518 backup_duplicate_context(dst, &bak);
00519 for (i = 0; i < 12; i++) {
00520 dst->pblocks[i] = &dst->block[i];
00521 }
00522
00523
00524 }
00525
00526 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00527 const AVCodecContext *src)
00528 {
00529 int i;
00530 int err;
00531 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00532
00533 if (dst == src)
00534 return 0;
00535
00536 av_assert0(s != s1);
00537
00538
00539
00540 if (!s->context_initialized) {
00541 memcpy(s, s1, sizeof(MpegEncContext));
00542
00543 s->avctx = dst;
00544 s->bitstream_buffer = NULL;
00545 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00546
00547 if (s1->context_initialized){
00548 s->picture_range_start += MAX_PICTURE_COUNT;
00549 s->picture_range_end += MAX_PICTURE_COUNT;
00550 if((err = ff_MPV_common_init(s)) < 0)
00551 return err;
00552 }
00553 }
00554
00555 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
00556 s->context_reinit = 0;
00557 s->height = s1->height;
00558 s->width = s1->width;
00559 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
00560 return err;
00561 }
00562
00563 s->avctx->coded_height = s1->avctx->coded_height;
00564 s->avctx->coded_width = s1->avctx->coded_width;
00565 s->avctx->width = s1->avctx->width;
00566 s->avctx->height = s1->avctx->height;
00567
00568 s->coded_picture_number = s1->coded_picture_number;
00569 s->picture_number = s1->picture_number;
00570 s->input_picture_number = s1->input_picture_number;
00571
00572 av_assert0(!s->picture || s->picture != s1->picture);
00573 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00574 memcpy(&s->last_picture, &s1->last_picture,
00575 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00576
00577
00578 for (i = 0; i < s->picture_count; i++)
00579 s->picture[i].f.extended_data = s->picture[i].f.data;
00580
00581 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
00582 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00583 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
00584
00585
00586 s->next_p_frame_damaged = s1->next_p_frame_damaged;
00587 s->workaround_bugs = s1->workaround_bugs;
00588 s->padding_bug_score = s1->padding_bug_score;
00589
00590
00591 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00592 (char *) &s1->shape - (char *) &s1->time_increment_bits);
00593
00594
00595 s->max_b_frames = s1->max_b_frames;
00596 s->low_delay = s1->low_delay;
00597 s->droppable = s1->droppable;
00598
00599
00600 s->divx_packed = s1->divx_packed;
00601
00602 if (s1->bitstream_buffer) {
00603 if (s1->bitstream_buffer_size +
00604 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00605 av_fast_malloc(&s->bitstream_buffer,
00606 &s->allocated_bitstream_buffer_size,
00607 s1->allocated_bitstream_buffer_size);
00608 s->bitstream_buffer_size = s1->bitstream_buffer_size;
00609 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00610 s1->bitstream_buffer_size);
00611 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00612 FF_INPUT_BUFFER_PADDING_SIZE);
00613 }
00614
00615
00616 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00617 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00618
00619 if (!s1->first_field) {
00620 s->last_pict_type = s1->pict_type;
00621 if (s1->current_picture_ptr)
00622 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00623
00624 if (s1->pict_type != AV_PICTURE_TYPE_B) {
00625 s->last_non_b_pict_type = s1->pict_type;
00626 }
00627 }
00628
00629 return 0;
00630 }
00631
00638 void ff_MPV_common_defaults(MpegEncContext *s)
00639 {
00640 s->y_dc_scale_table =
00641 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
00642 s->chroma_qscale_table = ff_default_chroma_qscale_table;
00643 s->progressive_frame = 1;
00644 s->progressive_sequence = 1;
00645 s->picture_structure = PICT_FRAME;
00646
00647 s->coded_picture_number = 0;
00648 s->picture_number = 0;
00649 s->input_picture_number = 0;
00650
00651 s->picture_in_gop_number = 0;
00652
00653 s->f_code = 1;
00654 s->b_code = 1;
00655
00656 s->picture_range_start = 0;
00657 s->picture_range_end = MAX_PICTURE_COUNT;
00658
00659 s->slice_context_count = 1;
00660 }
00661
00667 void ff_MPV_decode_defaults(MpegEncContext *s)
00668 {
00669 ff_MPV_common_defaults(s);
00670 }
00671
00675 static int init_context_frame(MpegEncContext *s)
00676 {
00677 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00678
00679 s->mb_width = (s->width + 15) / 16;
00680 s->mb_stride = s->mb_width + 1;
00681 s->b8_stride = s->mb_width * 2 + 1;
00682 s->b4_stride = s->mb_width * 4 + 1;
00683 mb_array_size = s->mb_height * s->mb_stride;
00684 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00685
00686
00687
00688 s->h_edge_pos = s->mb_width * 16;
00689 s->v_edge_pos = s->mb_height * 16;
00690
00691 s->mb_num = s->mb_width * s->mb_height;
00692
00693 s->block_wrap[0] =
00694 s->block_wrap[1] =
00695 s->block_wrap[2] =
00696 s->block_wrap[3] = s->b8_stride;
00697 s->block_wrap[4] =
00698 s->block_wrap[5] = s->mb_stride;
00699
00700 y_size = s->b8_stride * (2 * s->mb_height + 1);
00701 c_size = s->mb_stride * (s->mb_height + 1);
00702 yc_size = y_size + 2 * c_size;
00703
00704 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail);
00705 for (y = 0; y < s->mb_height; y++)
00706 for (x = 0; x < s->mb_width; x++)
00707 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00708
00709 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width;
00710
00711 if (s->encoding) {
00712
00713 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00714 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00715 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00716 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00717 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00718 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
00719 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00720 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00721 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00722 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00723 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00724 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00725
00726
00727 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail)
00728
00729 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00730
00731 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
00732 mb_array_size * sizeof(float), fail);
00733 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
00734 mb_array_size * sizeof(float), fail);
00735
00736 }
00737
00738 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
00739 mb_array_size * sizeof(uint8_t), fail);
00740 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
00741 mb_array_size * sizeof(uint8_t), fail);
00742
00743 if (s->codec_id == AV_CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)) {
00744
00745 for (i = 0; i < 2; i++) {
00746 int j, k;
00747 for (j = 0; j < 2; j++) {
00748 for (k = 0; k < 2; k++) {
00749 FF_ALLOCZ_OR_GOTO(s->avctx,
00750 s->b_field_mv_table_base[i][j][k],
00751 mv_table_size * 2 * sizeof(int16_t),
00752 fail);
00753 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
00754 s->mb_stride + 1;
00755 }
00756 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00757 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00758 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00759 }
00760 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00761 }
00762 }
00763 if (s->out_format == FMT_H263) {
00764
00765 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00766 s->coded_block = s->coded_block_base + s->b8_stride + 1;
00767
00768
00769 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
00770 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
00771 }
00772
00773 if (s->h263_pred || s->h263_plus || !s->encoding) {
00774
00775
00776 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00777 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00778 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00779 s->dc_val[2] = s->dc_val[1] + c_size;
00780 for (i = 0; i < yc_size; i++)
00781 s->dc_val_base[i] = 1024;
00782 }
00783
00784
00785 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00786 memset(s->mbintra_table, 1, mb_array_size);
00787
00788
00789 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00790
00791
00792 return 0;
00793 fail:
00794 return AVERROR(ENOMEM);
00795 }
00796
00801 av_cold int ff_MPV_common_init(MpegEncContext *s)
00802 {
00803 int i;
00804 int nb_slices = (HAVE_THREADS &&
00805 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00806 s->avctx->thread_count : 1;
00807
00808 if (s->encoding && s->avctx->slices)
00809 nb_slices = s->avctx->slices;
00810
00811 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00812 s->mb_height = (s->height + 31) / 32 * 2;
00813 else if (s->codec_id != AV_CODEC_ID_H264)
00814 s->mb_height = (s->height + 15) / 16;
00815
00816 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
00817 av_log(s->avctx, AV_LOG_ERROR,
00818 "decoding to AV_PIX_FMT_NONE is not supported.\n");
00819 return -1;
00820 }
00821
00822 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00823 int max_slices;
00824 if (s->mb_height)
00825 max_slices = FFMIN(MAX_THREADS, s->mb_height);
00826 else
00827 max_slices = MAX_THREADS;
00828 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00829 " reducing to %d\n", nb_slices, max_slices);
00830 nb_slices = max_slices;
00831 }
00832
00833 if ((s->width || s->height) &&
00834 av_image_check_size(s->width, s->height, 0, s->avctx))
00835 return -1;
00836
00837 ff_dct_common_init(s);
00838
00839 s->flags = s->avctx->flags;
00840 s->flags2 = s->avctx->flags2;
00841
00842
00843 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
00844
00845
00846 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
00847 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
00848
00849 s->avctx->coded_frame = &s->current_picture.f;
00850
00851 if (s->encoding) {
00852 if (s->msmpeg4_version) {
00853 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
00854 2 * 2 * (MAX_LEVEL + 1) *
00855 (MAX_RUN + 1) * 2 * sizeof(int), fail);
00856 }
00857 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00858
00859 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
00860 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
00861 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
00862 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
00863 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
00864 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
00865 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
00866 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
00867
00868 if (s->avctx->noise_reduction) {
00869 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
00870 }
00871 }
00872
00873 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00874 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00875 s->picture_count * sizeof(Picture), fail);
00876 for (i = 0; i < s->picture_count; i++) {
00877 avcodec_get_frame_defaults(&s->picture[i].f);
00878 }
00879
00880 if (init_context_frame(s))
00881 goto fail;
00882
00883 s->parse_context.state = -1;
00884
00885 s->context_initialized = 1;
00886 s->thread_context[0] = s;
00887
00888
00889 if (nb_slices > 1) {
00890 for (i = 1; i < nb_slices; i++) {
00891 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00892 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00893 }
00894
00895 for (i = 0; i < nb_slices; i++) {
00896 if (init_duplicate_context(s->thread_context[i], s) < 0)
00897 goto fail;
00898 s->thread_context[i]->start_mb_y =
00899 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00900 s->thread_context[i]->end_mb_y =
00901 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00902 }
00903 } else {
00904 if (init_duplicate_context(s, s) < 0)
00905 goto fail;
00906 s->start_mb_y = 0;
00907 s->end_mb_y = s->mb_height;
00908 }
00909 s->slice_context_count = nb_slices;
00910
00911
00912 return 0;
00913 fail:
00914 ff_MPV_common_end(s);
00915 return -1;
00916 }
00917
00923 static int free_context_frame(MpegEncContext *s)
00924 {
00925 int i, j, k;
00926
00927 av_freep(&s->mb_type);
00928 av_freep(&s->p_mv_table_base);
00929 av_freep(&s->b_forw_mv_table_base);
00930 av_freep(&s->b_back_mv_table_base);
00931 av_freep(&s->b_bidir_forw_mv_table_base);
00932 av_freep(&s->b_bidir_back_mv_table_base);
00933 av_freep(&s->b_direct_mv_table_base);
00934 s->p_mv_table = NULL;
00935 s->b_forw_mv_table = NULL;
00936 s->b_back_mv_table = NULL;
00937 s->b_bidir_forw_mv_table = NULL;
00938 s->b_bidir_back_mv_table = NULL;
00939 s->b_direct_mv_table = NULL;
00940 for (i = 0; i < 2; i++) {
00941 for (j = 0; j < 2; j++) {
00942 for (k = 0; k < 2; k++) {
00943 av_freep(&s->b_field_mv_table_base[i][j][k]);
00944 s->b_field_mv_table[i][j][k] = NULL;
00945 }
00946 av_freep(&s->b_field_select_table[i][j]);
00947 av_freep(&s->p_field_mv_table_base[i][j]);
00948 s->p_field_mv_table[i][j] = NULL;
00949 }
00950 av_freep(&s->p_field_select_table[i]);
00951 }
00952
00953 av_freep(&s->dc_val_base);
00954 av_freep(&s->coded_block_base);
00955 av_freep(&s->mbintra_table);
00956 av_freep(&s->cbp_table);
00957 av_freep(&s->pred_dir_table);
00958
00959 av_freep(&s->mbskip_table);
00960
00961 av_freep(&s->error_status_table);
00962 av_freep(&s->er_temp_buffer);
00963 av_freep(&s->mb_index2xy);
00964 av_freep(&s->lambda_table);
00965
00966 av_freep(&s->cplx_tab);
00967 av_freep(&s->bits_tab);
00968
00969 s->linesize = s->uvlinesize = 0;
00970
00971 for (i = 0; i < 3; i++)
00972 av_freep(&s->visualization_buffer[i]);
00973
00974 return 0;
00975 }
00976
00977 int ff_MPV_common_frame_size_change(MpegEncContext *s)
00978 {
00979 int i, err = 0;
00980
00981 if (s->slice_context_count > 1) {
00982 for (i = 0; i < s->slice_context_count; i++) {
00983 free_duplicate_context(s->thread_context[i]);
00984 }
00985 for (i = 1; i < s->slice_context_count; i++) {
00986 av_freep(&s->thread_context[i]);
00987 }
00988 } else
00989 free_duplicate_context(s);
00990
00991 free_context_frame(s);
00992
00993 if (s->picture)
00994 for (i = 0; i < s->picture_count; i++) {
00995 s->picture[i].needs_realloc = 1;
00996 }
00997
00998 s->last_picture_ptr =
00999 s->next_picture_ptr =
01000 s->current_picture_ptr = NULL;
01001
01002
01003 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
01004 s->mb_height = (s->height + 31) / 32 * 2;
01005 else if (s->codec_id != AV_CODEC_ID_H264)
01006 s->mb_height = (s->height + 15) / 16;
01007
01008 if ((s->width || s->height) &&
01009 av_image_check_size(s->width, s->height, 0, s->avctx))
01010 return AVERROR_INVALIDDATA;
01011
01012 if ((err = init_context_frame(s)))
01013 goto fail;
01014
01015 s->thread_context[0] = s;
01016
01017 if (s->width && s->height) {
01018 int nb_slices = s->slice_context_count;
01019 if (nb_slices > 1) {
01020 for (i = 1; i < nb_slices; i++) {
01021 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
01022 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
01023 }
01024
01025 for (i = 0; i < nb_slices; i++) {
01026 if (init_duplicate_context(s->thread_context[i], s) < 0)
01027 goto fail;
01028 s->thread_context[i]->start_mb_y =
01029 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
01030 s->thread_context[i]->end_mb_y =
01031 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
01032 }
01033 } else {
01034 if (init_duplicate_context(s, s) < 0)
01035 goto fail;
01036 s->start_mb_y = 0;
01037 s->end_mb_y = s->mb_height;
01038 }
01039 s->slice_context_count = nb_slices;
01040 }
01041
01042 return 0;
01043 fail:
01044 ff_MPV_common_end(s);
01045 return err;
01046 }
01047
01048
01049 void ff_MPV_common_end(MpegEncContext *s)
01050 {
01051 int i;
01052
01053 if (s->slice_context_count > 1) {
01054 for (i = 0; i < s->slice_context_count; i++) {
01055 free_duplicate_context(s->thread_context[i]);
01056 }
01057 for (i = 1; i < s->slice_context_count; i++) {
01058 av_freep(&s->thread_context[i]);
01059 }
01060 s->slice_context_count = 1;
01061 } else free_duplicate_context(s);
01062
01063 av_freep(&s->parse_context.buffer);
01064 s->parse_context.buffer_size = 0;
01065
01066 av_freep(&s->bitstream_buffer);
01067 s->allocated_bitstream_buffer_size = 0;
01068
01069 av_freep(&s->avctx->stats_out);
01070 av_freep(&s->ac_stats);
01071
01072 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
01073 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
01074 s->q_chroma_intra_matrix= NULL;
01075 s->q_chroma_intra_matrix16= NULL;
01076 av_freep(&s->q_intra_matrix);
01077 av_freep(&s->q_inter_matrix);
01078 av_freep(&s->q_intra_matrix16);
01079 av_freep(&s->q_inter_matrix16);
01080 av_freep(&s->input_picture);
01081 av_freep(&s->reordered_input_picture);
01082 av_freep(&s->dct_offset);
01083
01084 if (s->picture && !s->avctx->internal->is_copy) {
01085 for (i = 0; i < s->picture_count; i++) {
01086 free_picture(s, &s->picture[i]);
01087 }
01088 }
01089 av_freep(&s->picture);
01090
01091 free_context_frame(s);
01092
01093 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
01094 avcodec_default_free_buffers(s->avctx);
01095
01096 s->context_initialized = 0;
01097 s->last_picture_ptr =
01098 s->next_picture_ptr =
01099 s->current_picture_ptr = NULL;
01100 s->linesize = s->uvlinesize = 0;
01101 }
01102
01103 void ff_init_rl(RLTable *rl,
01104 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
01105 {
01106 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
01107 uint8_t index_run[MAX_RUN + 1];
01108 int last, run, level, start, end, i;
01109
01110
01111 if (static_store && rl->max_level[0])
01112 return;
01113
01114
01115 for (last = 0; last < 2; last++) {
01116 if (last == 0) {
01117 start = 0;
01118 end = rl->last;
01119 } else {
01120 start = rl->last;
01121 end = rl->n;
01122 }
01123
01124 memset(max_level, 0, MAX_RUN + 1);
01125 memset(max_run, 0, MAX_LEVEL + 1);
01126 memset(index_run, rl->n, MAX_RUN + 1);
01127 for (i = start; i < end; i++) {
01128 run = rl->table_run[i];
01129 level = rl->table_level[i];
01130 if (index_run[run] == rl->n)
01131 index_run[run] = i;
01132 if (level > max_level[run])
01133 max_level[run] = level;
01134 if (run > max_run[level])
01135 max_run[level] = run;
01136 }
01137 if (static_store)
01138 rl->max_level[last] = static_store[last];
01139 else
01140 rl->max_level[last] = av_malloc(MAX_RUN + 1);
01141 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
01142 if (static_store)
01143 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
01144 else
01145 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
01146 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01147 if (static_store)
01148 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01149 else
01150 rl->index_run[last] = av_malloc(MAX_RUN + 1);
01151 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01152 }
01153 }
01154
01155 void ff_init_vlc_rl(RLTable *rl)
01156 {
01157 int i, q;
01158
01159 for (q = 0; q < 32; q++) {
01160 int qmul = q * 2;
01161 int qadd = (q - 1) | 1;
01162
01163 if (q == 0) {
01164 qmul = 1;
01165 qadd = 0;
01166 }
01167 for (i = 0; i < rl->vlc.table_size; i++) {
01168 int code = rl->vlc.table[i][0];
01169 int len = rl->vlc.table[i][1];
01170 int level, run;
01171
01172 if (len == 0) {
01173 run = 66;
01174 level = MAX_LEVEL;
01175 } else if (len < 0) {
01176 run = 0;
01177 level = code;
01178 } else {
01179 if (code == rl->n) {
01180 run = 66;
01181 level = 0;
01182 } else {
01183 run = rl->table_run[code] + 1;
01184 level = rl->table_level[code] * qmul + qadd;
01185 if (code >= rl->last) run += 192;
01186 }
01187 }
01188 rl->rl_vlc[q][i].len = len;
01189 rl->rl_vlc[q][i].level = level;
01190 rl->rl_vlc[q][i].run = run;
01191 }
01192 }
01193 }
01194
01195 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01196 {
01197 int i;
01198
01199
01200 for (i = 0; i < s->picture_count; i++) {
01201 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01202 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01203 (remove_current || &s->picture[i] != s->current_picture_ptr)
01204 ) {
01205 free_frame_buffer(s, &s->picture[i]);
01206 }
01207 }
01208 }
01209
01210 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
01211 {
01212 if (pic->f.data[0] == NULL)
01213 return 1;
01214 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
01215 if (!pic->owner2 || pic->owner2 == s)
01216 return 1;
01217 return 0;
01218 }
01219
01220 static int find_unused_picture(MpegEncContext *s, int shared)
01221 {
01222 int i;
01223
01224 if (shared) {
01225 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01226 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01227 return i;
01228 }
01229 } else {
01230 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01231 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
01232 return i;
01233 }
01234 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01235 if (pic_is_unused(s, &s->picture[i]))
01236 return i;
01237 }
01238 }
01239
01240 av_log(s->avctx, AV_LOG_FATAL,
01241 "Internal error, picture buffer overflow\n");
01242
01243
01244
01245
01246
01247
01248
01249
01250
01251
01252
01253 abort();
01254 return -1;
01255 }
01256
01257 int ff_find_unused_picture(MpegEncContext *s, int shared)
01258 {
01259 int ret = find_unused_picture(s, shared);
01260
01261 if (ret >= 0 && ret < s->picture_range_end) {
01262 if (s->picture[ret].needs_realloc) {
01263 s->picture[ret].needs_realloc = 0;
01264 free_picture(s, &s->picture[ret]);
01265 avcodec_get_frame_defaults(&s->picture[ret].f);
01266 }
01267 }
01268 return ret;
01269 }
01270
01271 static void update_noise_reduction(MpegEncContext *s)
01272 {
01273 int intra, i;
01274
01275 for (intra = 0; intra < 2; intra++) {
01276 if (s->dct_count[intra] > (1 << 16)) {
01277 for (i = 0; i < 64; i++) {
01278 s->dct_error_sum[intra][i] >>= 1;
01279 }
01280 s->dct_count[intra] >>= 1;
01281 }
01282
01283 for (i = 0; i < 64; i++) {
01284 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01285 s->dct_count[intra] +
01286 s->dct_error_sum[intra][i] / 2) /
01287 (s->dct_error_sum[intra][i] + 1);
01288 }
01289 }
01290 }
01291
01296 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01297 {
01298 int i;
01299 Picture *pic;
01300 s->mb_skipped = 0;
01301
01302 if (!ff_thread_can_start_frame(avctx)) {
01303 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
01304 return -1;
01305 }
01306
01307
01308 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
01309 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01310 s->last_picture_ptr != s->next_picture_ptr &&
01311 s->last_picture_ptr->f.data[0]) {
01312 if (s->last_picture_ptr->owner2 == s)
01313 free_frame_buffer(s, s->last_picture_ptr);
01314 }
01315
01316
01317
01318 if (!s->encoding) {
01319 for (i = 0; i < s->picture_count; i++) {
01320 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01321 &s->picture[i] != s->last_picture_ptr &&
01322 &s->picture[i] != s->next_picture_ptr &&
01323 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
01324 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01325 av_log(avctx, AV_LOG_ERROR,
01326 "releasing zombie picture\n");
01327 free_frame_buffer(s, &s->picture[i]);
01328 }
01329 }
01330 }
01331 }
01332
01333 if (!s->encoding) {
01334 ff_release_unused_pictures(s, 1);
01335
01336 if (s->current_picture_ptr &&
01337 s->current_picture_ptr->f.data[0] == NULL) {
01338
01339
01340 pic = s->current_picture_ptr;
01341 } else {
01342 i = ff_find_unused_picture(s, 0);
01343 if (i < 0) {
01344 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
01345 return i;
01346 }
01347 pic = &s->picture[i];
01348 }
01349
01350 pic->f.reference = 0;
01351 if (!s->droppable) {
01352 if (s->codec_id == AV_CODEC_ID_H264)
01353 pic->f.reference = s->picture_structure;
01354 else if (s->pict_type != AV_PICTURE_TYPE_B)
01355 pic->f.reference = 3;
01356 }
01357
01358 pic->f.coded_picture_number = s->coded_picture_number++;
01359
01360 if (ff_alloc_picture(s, pic, 0) < 0)
01361 return -1;
01362
01363 s->current_picture_ptr = pic;
01364
01365 s->current_picture_ptr->f.top_field_first = s->top_field_first;
01366 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
01367 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
01368 if (s->picture_structure != PICT_FRAME)
01369 s->current_picture_ptr->f.top_field_first =
01370 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01371 }
01372 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01373 !s->progressive_sequence;
01374 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
01375 }
01376
01377 s->current_picture_ptr->f.pict_type = s->pict_type;
01378
01379
01380 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01381
01382 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01383
01384 if (s->pict_type != AV_PICTURE_TYPE_B) {
01385 s->last_picture_ptr = s->next_picture_ptr;
01386 if (!s->droppable)
01387 s->next_picture_ptr = s->current_picture_ptr;
01388 }
01389 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
01390 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
01391 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
01392 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
01393 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
01394 s->pict_type, s->droppable);
01395
01396 if (s->codec_id != AV_CODEC_ID_H264) {
01397 if ((s->last_picture_ptr == NULL ||
01398 s->last_picture_ptr->f.data[0] == NULL) &&
01399 (s->pict_type != AV_PICTURE_TYPE_I ||
01400 s->picture_structure != PICT_FRAME)) {
01401 if (s->pict_type != AV_PICTURE_TYPE_I)
01402 av_log(avctx, AV_LOG_ERROR,
01403 "warning: first frame is no keyframe\n");
01404 else if (s->picture_structure != PICT_FRAME)
01405 av_log(avctx, AV_LOG_INFO,
01406 "allocate dummy last picture for field based first keyframe\n");
01407
01408
01409 i = ff_find_unused_picture(s, 0);
01410 if (i < 0) {
01411 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
01412 return i;
01413 }
01414 s->last_picture_ptr = &s->picture[i];
01415 s->last_picture_ptr->f.key_frame = 0;
01416 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
01417 s->last_picture_ptr = NULL;
01418 return -1;
01419 }
01420
01421 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
01422 for(i=0; i<avctx->height; i++)
01423 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
01424 }
01425
01426 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
01427 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
01428 s->last_picture_ptr->f.reference = 3;
01429 }
01430 if ((s->next_picture_ptr == NULL ||
01431 s->next_picture_ptr->f.data[0] == NULL) &&
01432 s->pict_type == AV_PICTURE_TYPE_B) {
01433
01434 i = ff_find_unused_picture(s, 0);
01435 if (i < 0) {
01436 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
01437 return i;
01438 }
01439 s->next_picture_ptr = &s->picture[i];
01440 s->next_picture_ptr->f.key_frame = 0;
01441 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
01442 s->next_picture_ptr = NULL;
01443 return -1;
01444 }
01445 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
01446 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
01447 s->next_picture_ptr->f.reference = 3;
01448 }
01449 }
01450
01451 if (s->last_picture_ptr)
01452 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01453 if (s->next_picture_ptr)
01454 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01455
01456 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
01457 if (s->next_picture_ptr)
01458 s->next_picture_ptr->owner2 = s;
01459 if (s->last_picture_ptr)
01460 s->last_picture_ptr->owner2 = s;
01461 }
01462
01463 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01464 s->last_picture_ptr->f.data[0]));
01465
01466 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01467 int i;
01468 for (i = 0; i < 4; i++) {
01469 if (s->picture_structure == PICT_BOTTOM_FIELD) {
01470 s->current_picture.f.data[i] +=
01471 s->current_picture.f.linesize[i];
01472 }
01473 s->current_picture.f.linesize[i] *= 2;
01474 s->last_picture.f.linesize[i] *= 2;
01475 s->next_picture.f.linesize[i] *= 2;
01476 }
01477 }
01478
01479 s->err_recognition = avctx->err_recognition;
01480
01481
01482
01483
01484 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
01485 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01486 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01487 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01488 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01489 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01490 } else {
01491 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01492 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01493 }
01494
01495 if (s->dct_error_sum) {
01496 assert(s->avctx->noise_reduction && s->encoding);
01497 update_noise_reduction(s);
01498 }
01499
01500 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01501 return ff_xvmc_field_start(s, avctx);
01502
01503 return 0;
01504 }
01505
01506
01507
01508 void ff_MPV_frame_end(MpegEncContext *s)
01509 {
01510 int i;
01511
01512
01513 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01514 ff_xvmc_field_end(s);
01515 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
01516 !s->avctx->hwaccel &&
01517 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01518 s->unrestricted_mv &&
01519 s->current_picture.f.reference &&
01520 !s->intra_only &&
01521 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
01522 !s->avctx->lowres
01523 ) {
01524 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
01525 int hshift = desc->log2_chroma_w;
01526 int vshift = desc->log2_chroma_h;
01527 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
01528 s->h_edge_pos, s->v_edge_pos,
01529 EDGE_WIDTH, EDGE_WIDTH,
01530 EDGE_TOP | EDGE_BOTTOM);
01531 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
01532 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01533 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01534 EDGE_TOP | EDGE_BOTTOM);
01535 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
01536 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01537 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01538 EDGE_TOP | EDGE_BOTTOM);
01539 }
01540
01541 emms_c();
01542
01543 s->last_pict_type = s->pict_type;
01544 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01545 if (s->pict_type!= AV_PICTURE_TYPE_B) {
01546 s->last_non_b_pict_type = s->pict_type;
01547 }
01548 #if 0
01549
01550 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01551 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01552 s->picture[i] = s->current_picture;
01553 break;
01554 }
01555 }
01556 assert(i < MAX_PICTURE_COUNT);
01557 #endif
01558
01559 if (s->encoding) {
01560
01561 for (i = 0; i < s->picture_count; i++) {
01562 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01563 ) {
01564 free_frame_buffer(s, &s->picture[i]);
01565 }
01566 }
01567 }
01568
01569 #if 0
01570 memset(&s->last_picture, 0, sizeof(Picture));
01571 memset(&s->next_picture, 0, sizeof(Picture));
01572 memset(&s->current_picture, 0, sizeof(Picture));
01573 #endif
01574 s->avctx->coded_frame = &s->current_picture_ptr->f;
01575
01576 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
01577 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
01578 }
01579 }
01580
01588 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01589 int w, int h, int stride, int color)
01590 {
01591 int x, y, fr, f;
01592
01593 sx = av_clip(sx, 0, w - 1);
01594 sy = av_clip(sy, 0, h - 1);
01595 ex = av_clip(ex, 0, w - 1);
01596 ey = av_clip(ey, 0, h - 1);
01597
01598 buf[sy * stride + sx] += color;
01599
01600 if (FFABS(ex - sx) > FFABS(ey - sy)) {
01601 if (sx > ex) {
01602 FFSWAP(int, sx, ex);
01603 FFSWAP(int, sy, ey);
01604 }
01605 buf += sx + sy * stride;
01606 ex -= sx;
01607 f = ((ey - sy) << 16) / ex;
01608 for(x= 0; x <= ex; x++){
01609 y = (x * f) >> 16;
01610 fr = (x * f) & 0xFFFF;
01611 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01612 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
01613 }
01614 } else {
01615 if (sy > ey) {
01616 FFSWAP(int, sx, ex);
01617 FFSWAP(int, sy, ey);
01618 }
01619 buf += sx + sy * stride;
01620 ey -= sy;
01621 if (ey)
01622 f = ((ex - sx) << 16) / ey;
01623 else
01624 f = 0;
01625 for(y= 0; y <= ey; y++){
01626 x = (y*f) >> 16;
01627 fr = (y*f) & 0xFFFF;
01628 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01629 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
01630 }
01631 }
01632 }
01633
01641 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01642 int ey, int w, int h, int stride, int color)
01643 {
01644 int dx,dy;
01645
01646 sx = av_clip(sx, -100, w + 100);
01647 sy = av_clip(sy, -100, h + 100);
01648 ex = av_clip(ex, -100, w + 100);
01649 ey = av_clip(ey, -100, h + 100);
01650
01651 dx = ex - sx;
01652 dy = ey - sy;
01653
01654 if (dx * dx + dy * dy > 3 * 3) {
01655 int rx = dx + dy;
01656 int ry = -dx + dy;
01657 int length = ff_sqrt((rx * rx + ry * ry) << 8);
01658
01659
01660 rx = ROUNDED_DIV(rx * 3 << 4, length);
01661 ry = ROUNDED_DIV(ry * 3 << 4, length);
01662
01663 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01664 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01665 }
01666 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01667 }
01668
01672 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01673 {
01674 if ( s->avctx->hwaccel || !pict || !pict->mb_type
01675 || (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
01676 return;
01677
01678
01679 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01680 int x,y;
01681
01682 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01683 av_get_picture_type_char(pict->pict_type));
01684 for (y = 0; y < s->mb_height; y++) {
01685 for (x = 0; x < s->mb_width; x++) {
01686 if (s->avctx->debug & FF_DEBUG_SKIP) {
01687 int count = s->mbskip_table[x + y * s->mb_stride];
01688 if (count > 9)
01689 count = 9;
01690 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01691 }
01692 if (s->avctx->debug & FF_DEBUG_QP) {
01693 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01694 pict->qscale_table[x + y * s->mb_stride]);
01695 }
01696 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01697 int mb_type = pict->mb_type[x + y * s->mb_stride];
01698
01699 if (IS_PCM(mb_type))
01700 av_log(s->avctx, AV_LOG_DEBUG, "P");
01701 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01702 av_log(s->avctx, AV_LOG_DEBUG, "A");
01703 else if (IS_INTRA4x4(mb_type))
01704 av_log(s->avctx, AV_LOG_DEBUG, "i");
01705 else if (IS_INTRA16x16(mb_type))
01706 av_log(s->avctx, AV_LOG_DEBUG, "I");
01707 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01708 av_log(s->avctx, AV_LOG_DEBUG, "d");
01709 else if (IS_DIRECT(mb_type))
01710 av_log(s->avctx, AV_LOG_DEBUG, "D");
01711 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01712 av_log(s->avctx, AV_LOG_DEBUG, "g");
01713 else if (IS_GMC(mb_type))
01714 av_log(s->avctx, AV_LOG_DEBUG, "G");
01715 else if (IS_SKIP(mb_type))
01716 av_log(s->avctx, AV_LOG_DEBUG, "S");
01717 else if (!USES_LIST(mb_type, 1))
01718 av_log(s->avctx, AV_LOG_DEBUG, ">");
01719 else if (!USES_LIST(mb_type, 0))
01720 av_log(s->avctx, AV_LOG_DEBUG, "<");
01721 else {
01722 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01723 av_log(s->avctx, AV_LOG_DEBUG, "X");
01724 }
01725
01726
01727 if (IS_8X8(mb_type))
01728 av_log(s->avctx, AV_LOG_DEBUG, "+");
01729 else if (IS_16X8(mb_type))
01730 av_log(s->avctx, AV_LOG_DEBUG, "-");
01731 else if (IS_8X16(mb_type))
01732 av_log(s->avctx, AV_LOG_DEBUG, "|");
01733 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01734 av_log(s->avctx, AV_LOG_DEBUG, " ");
01735 else
01736 av_log(s->avctx, AV_LOG_DEBUG, "?");
01737
01738
01739 if (IS_INTERLACED(mb_type))
01740 av_log(s->avctx, AV_LOG_DEBUG, "=");
01741 else
01742 av_log(s->avctx, AV_LOG_DEBUG, " ");
01743 }
01744 }
01745 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01746 }
01747 }
01748
01749 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01750 (s->avctx->debug_mv)) {
01751 const int shift = 1 + s->quarter_sample;
01752 int mb_y;
01753 uint8_t *ptr;
01754 int i;
01755 int h_chroma_shift, v_chroma_shift, block_height;
01756 const int width = s->avctx->width;
01757 const int height = s->avctx->height;
01758 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01759 const int mv_stride = (s->mb_width << mv_sample_log2) +
01760 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
01761 s->low_delay = 0;
01762
01763 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01764
01765 for (i = 0; i < 3; i++) {
01766 size_t size= (i == 0) ? pict->linesize[i] * FFALIGN(height, 16):
01767 pict->linesize[i] * FFALIGN(height, 16) >> v_chroma_shift;
01768 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
01769 memcpy(s->visualization_buffer[i], pict->data[i], size);
01770 pict->data[i] = s->visualization_buffer[i];
01771 }
01772 pict->type = FF_BUFFER_TYPE_COPY;
01773 pict->opaque= NULL;
01774 ptr = pict->data[0];
01775 block_height = 16 >> v_chroma_shift;
01776
01777 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01778 int mb_x;
01779 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01780 const int mb_index = mb_x + mb_y * s->mb_stride;
01781 if ((s->avctx->debug_mv) && pict->motion_val[0]) {
01782 int type;
01783 for (type = 0; type < 3; type++) {
01784 int direction = 0;
01785 switch (type) {
01786 case 0:
01787 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01788 (pict->pict_type!= AV_PICTURE_TYPE_P))
01789 continue;
01790 direction = 0;
01791 break;
01792 case 1:
01793 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01794 (pict->pict_type!= AV_PICTURE_TYPE_B))
01795 continue;
01796 direction = 0;
01797 break;
01798 case 2:
01799 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01800 (pict->pict_type!= AV_PICTURE_TYPE_B))
01801 continue;
01802 direction = 1;
01803 break;
01804 }
01805 if (!USES_LIST(pict->mb_type[mb_index], direction))
01806 continue;
01807
01808 if (IS_8X8(pict->mb_type[mb_index])) {
01809 int i;
01810 for (i = 0; i < 4; i++) {
01811 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01812 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01813 int xy = (mb_x * 2 + (i & 1) +
01814 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01815 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01816 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01817 draw_arrow(ptr, sx, sy, mx, my, width,
01818 height, s->linesize, 100);
01819 }
01820 } else if (IS_16X8(pict->mb_type[mb_index])) {
01821 int i;
01822 for (i = 0; i < 2; i++) {
01823 int sx = mb_x * 16 + 8;
01824 int sy = mb_y * 16 + 4 + 8 * i;
01825 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01826 int mx = (pict->motion_val[direction][xy][0] >> shift);
01827 int my = (pict->motion_val[direction][xy][1] >> shift);
01828
01829 if (IS_INTERLACED(pict->mb_type[mb_index]))
01830 my *= 2;
01831
01832 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01833 height, s->linesize, 100);
01834 }
01835 } else if (IS_8X16(pict->mb_type[mb_index])) {
01836 int i;
01837 for (i = 0; i < 2; i++) {
01838 int sx = mb_x * 16 + 4 + 8 * i;
01839 int sy = mb_y * 16 + 8;
01840 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01841 int mx = pict->motion_val[direction][xy][0] >> shift;
01842 int my = pict->motion_val[direction][xy][1] >> shift;
01843
01844 if (IS_INTERLACED(pict->mb_type[mb_index]))
01845 my *= 2;
01846
01847 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01848 height, s->linesize, 100);
01849 }
01850 } else {
01851 int sx= mb_x * 16 + 8;
01852 int sy= mb_y * 16 + 8;
01853 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
01854 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01855 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01856 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01857 }
01858 }
01859 }
01860 if ((s->avctx->debug & FF_DEBUG_VIS_QP)) {
01861 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01862 0x0101010101010101ULL;
01863 int y;
01864 for (y = 0; y < block_height; y++) {
01865 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01866 (block_height * mb_y + y) *
01867 pict->linesize[1]) = c;
01868 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01869 (block_height * mb_y + y) *
01870 pict->linesize[2]) = c;
01871 }
01872 }
01873 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01874 pict->motion_val[0]) {
01875 int mb_type = pict->mb_type[mb_index];
01876 uint64_t u,v;
01877 int y;
01878 #define COLOR(theta, r) \
01879 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01880 v = (int)(128 + r * sin(theta * 3.141592 / 180));
01881
01882
01883 u = v = 128;
01884 if (IS_PCM(mb_type)) {
01885 COLOR(120, 48)
01886 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01887 IS_INTRA16x16(mb_type)) {
01888 COLOR(30, 48)
01889 } else if (IS_INTRA4x4(mb_type)) {
01890 COLOR(90, 48)
01891 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01892
01893 } else if (IS_DIRECT(mb_type)) {
01894 COLOR(150, 48)
01895 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01896 COLOR(170, 48)
01897 } else if (IS_GMC(mb_type)) {
01898 COLOR(190, 48)
01899 } else if (IS_SKIP(mb_type)) {
01900
01901 } else if (!USES_LIST(mb_type, 1)) {
01902 COLOR(240, 48)
01903 } else if (!USES_LIST(mb_type, 0)) {
01904 COLOR(0, 48)
01905 } else {
01906 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01907 COLOR(300,48)
01908 }
01909
01910 u *= 0x0101010101010101ULL;
01911 v *= 0x0101010101010101ULL;
01912 for (y = 0; y < block_height; y++) {
01913 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01914 (block_height * mb_y + y) * pict->linesize[1]) = u;
01915 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01916 (block_height * mb_y + y) * pict->linesize[2]) = v;
01917 }
01918
01919
01920 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01921 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01922 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01923 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01924 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01925 }
01926 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01927 for (y = 0; y < 16; y++)
01928 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01929 pict->linesize[0]] ^= 0x80;
01930 }
01931 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01932 int dm = 1 << (mv_sample_log2 - 2);
01933 for (i = 0; i < 4; i++) {
01934 int sx = mb_x * 16 + 8 * (i & 1);
01935 int sy = mb_y * 16 + 8 * (i >> 1);
01936 int xy = (mb_x * 2 + (i & 1) +
01937 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01938
01939 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01940 if (mv[0] != mv[dm] ||
01941 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01942 for (y = 0; y < 8; y++)
01943 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01944 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01945 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01946 pict->linesize[0]) ^= 0x8080808080808080ULL;
01947 }
01948 }
01949
01950 if (IS_INTERLACED(mb_type) &&
01951 s->codec_id == AV_CODEC_ID_H264) {
01952
01953 }
01954 }
01955 s->mbskip_table[mb_index] = 0;
01956 }
01957 }
01958 }
01959 }
01960
01961 static inline int hpel_motion_lowres(MpegEncContext *s,
01962 uint8_t *dest, uint8_t *src,
01963 int field_based, int field_select,
01964 int src_x, int src_y,
01965 int width, int height, int stride,
01966 int h_edge_pos, int v_edge_pos,
01967 int w, int h, h264_chroma_mc_func *pix_op,
01968 int motion_x, int motion_y)
01969 {
01970 const int lowres = s->avctx->lowres;
01971 const int op_index = FFMIN(lowres, 2);
01972 const int s_mask = (2 << lowres) - 1;
01973 int emu = 0;
01974 int sx, sy;
01975
01976 if (s->quarter_sample) {
01977 motion_x /= 2;
01978 motion_y /= 2;
01979 }
01980
01981 sx = motion_x & s_mask;
01982 sy = motion_y & s_mask;
01983 src_x += motion_x >> lowres + 1;
01984 src_y += motion_y >> lowres + 1;
01985
01986 src += src_y * stride + src_x;
01987
01988 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
01989 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01990 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01991 (h + 1) << field_based, src_x,
01992 src_y << field_based,
01993 h_edge_pos,
01994 v_edge_pos);
01995 src = s->edge_emu_buffer;
01996 emu = 1;
01997 }
01998
01999 sx = (sx << 2) >> lowres;
02000 sy = (sy << 2) >> lowres;
02001 if (field_select)
02002 src += s->linesize;
02003 pix_op[op_index](dest, src, stride, h, sx, sy);
02004 return emu;
02005 }
02006
02007
02008 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
02009 uint8_t *dest_y,
02010 uint8_t *dest_cb,
02011 uint8_t *dest_cr,
02012 int field_based,
02013 int bottom_field,
02014 int field_select,
02015 uint8_t **ref_picture,
02016 h264_chroma_mc_func *pix_op,
02017 int motion_x, int motion_y,
02018 int h, int mb_y)
02019 {
02020 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
02021 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
02022 uvsx, uvsy;
02023 const int lowres = s->avctx->lowres;
02024 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
02025 const int block_s = 8>>lowres;
02026 const int s_mask = (2 << lowres) - 1;
02027 const int h_edge_pos = s->h_edge_pos >> lowres;
02028 const int v_edge_pos = s->v_edge_pos >> lowres;
02029 linesize = s->current_picture.f.linesize[0] << field_based;
02030 uvlinesize = s->current_picture.f.linesize[1] << field_based;
02031
02032
02033 if (s->quarter_sample) {
02034 motion_x /= 2;
02035 motion_y /= 2;
02036 }
02037
02038 if(field_based){
02039 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
02040 }
02041
02042 sx = motion_x & s_mask;
02043 sy = motion_y & s_mask;
02044 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
02045 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
02046
02047 if (s->out_format == FMT_H263) {
02048 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
02049 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
02050 uvsrc_x = src_x >> 1;
02051 uvsrc_y = src_y >> 1;
02052 } else if (s->out_format == FMT_H261) {
02053
02054 mx = motion_x / 4;
02055 my = motion_y / 4;
02056 uvsx = (2 * mx) & s_mask;
02057 uvsy = (2 * my) & s_mask;
02058 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
02059 uvsrc_y = mb_y * block_s + (my >> lowres);
02060 } else {
02061 if(s->chroma_y_shift){
02062 mx = motion_x / 2;
02063 my = motion_y / 2;
02064 uvsx = mx & s_mask;
02065 uvsy = my & s_mask;
02066 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
02067 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
02068 } else {
02069 if(s->chroma_x_shift){
02070
02071 mx = motion_x / 2;
02072 uvsx = mx & s_mask;
02073 uvsy = motion_y & s_mask;
02074 uvsrc_y = src_y;
02075 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
02076 } else {
02077
02078 uvsx = motion_x & s_mask;
02079 uvsy = motion_y & s_mask;
02080 uvsrc_x = src_x;
02081 uvsrc_y = src_y;
02082 }
02083 }
02084 }
02085
02086 ptr_y = ref_picture[0] + src_y * linesize + src_x;
02087 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
02088 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
02089
02090 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
02091 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
02092 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
02093 linesize >> field_based, 17, 17 + field_based,
02094 src_x, src_y << field_based, h_edge_pos,
02095 v_edge_pos);
02096 ptr_y = s->edge_emu_buffer;
02097 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
02098 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
02099 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
02100 9 + field_based,
02101 uvsrc_x, uvsrc_y << field_based,
02102 h_edge_pos >> 1, v_edge_pos >> 1);
02103 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
02104 9 + field_based,
02105 uvsrc_x, uvsrc_y << field_based,
02106 h_edge_pos >> 1, v_edge_pos >> 1);
02107 ptr_cb = uvbuf;
02108 ptr_cr = uvbuf + 16;
02109 }
02110 }
02111
02112
02113 if (bottom_field) {
02114 dest_y += s->linesize;
02115 dest_cb += s->uvlinesize;
02116 dest_cr += s->uvlinesize;
02117 }
02118
02119 if (field_select) {
02120 ptr_y += s->linesize;
02121 ptr_cb += s->uvlinesize;
02122 ptr_cr += s->uvlinesize;
02123 }
02124
02125 sx = (sx << 2) >> lowres;
02126 sy = (sy << 2) >> lowres;
02127 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
02128
02129 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
02130 uvsx = (uvsx << 2) >> lowres;
02131 uvsy = (uvsy << 2) >> lowres;
02132 if (h >> s->chroma_y_shift) {
02133 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
02134 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
02135 }
02136 }
02137
02138 }
02139
02140 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
02141 uint8_t *dest_cb, uint8_t *dest_cr,
02142 uint8_t **ref_picture,
02143 h264_chroma_mc_func * pix_op,
02144 int mx, int my)
02145 {
02146 const int lowres = s->avctx->lowres;
02147 const int op_index = FFMIN(lowres, 2);
02148 const int block_s = 8 >> lowres;
02149 const int s_mask = (2 << lowres) - 1;
02150 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
02151 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
02152 int emu = 0, src_x, src_y, offset, sx, sy;
02153 uint8_t *ptr;
02154
02155 if (s->quarter_sample) {
02156 mx /= 2;
02157 my /= 2;
02158 }
02159
02160
02161
02162 mx = ff_h263_round_chroma(mx);
02163 my = ff_h263_round_chroma(my);
02164
02165 sx = mx & s_mask;
02166 sy = my & s_mask;
02167 src_x = s->mb_x * block_s + (mx >> lowres + 1);
02168 src_y = s->mb_y * block_s + (my >> lowres + 1);
02169
02170 offset = src_y * s->uvlinesize + src_x;
02171 ptr = ref_picture[1] + offset;
02172 if (s->flags & CODEC_FLAG_EMU_EDGE) {
02173 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
02174 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
02175 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
02176 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
02177 ptr = s->edge_emu_buffer;
02178 emu = 1;
02179 }
02180 }
02181 sx = (sx << 2) >> lowres;
02182 sy = (sy << 2) >> lowres;
02183 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02184
02185 ptr = ref_picture[2] + offset;
02186 if (emu) {
02187 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02188 src_x, src_y, h_edge_pos, v_edge_pos);
02189 ptr = s->edge_emu_buffer;
02190 }
02191 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02192 }
02193
02205 static inline void MPV_motion_lowres(MpegEncContext *s,
02206 uint8_t *dest_y, uint8_t *dest_cb,
02207 uint8_t *dest_cr,
02208 int dir, uint8_t **ref_picture,
02209 h264_chroma_mc_func *pix_op)
02210 {
02211 int mx, my;
02212 int mb_x, mb_y, i;
02213 const int lowres = s->avctx->lowres;
02214 const int block_s = 8 >>lowres;
02215
02216 mb_x = s->mb_x;
02217 mb_y = s->mb_y;
02218
02219 switch (s->mv_type) {
02220 case MV_TYPE_16X16:
02221 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02222 0, 0, 0,
02223 ref_picture, pix_op,
02224 s->mv[dir][0][0], s->mv[dir][0][1],
02225 2 * block_s, mb_y);
02226 break;
02227 case MV_TYPE_8X8:
02228 mx = 0;
02229 my = 0;
02230 for (i = 0; i < 4; i++) {
02231 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02232 s->linesize) * block_s,
02233 ref_picture[0], 0, 0,
02234 (2 * mb_x + (i & 1)) * block_s,
02235 (2 * mb_y + (i >> 1)) * block_s,
02236 s->width, s->height, s->linesize,
02237 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02238 block_s, block_s, pix_op,
02239 s->mv[dir][i][0], s->mv[dir][i][1]);
02240
02241 mx += s->mv[dir][i][0];
02242 my += s->mv[dir][i][1];
02243 }
02244
02245 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02246 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02247 pix_op, mx, my);
02248 break;
02249 case MV_TYPE_FIELD:
02250 if (s->picture_structure == PICT_FRAME) {
02251
02252 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02253 1, 0, s->field_select[dir][0],
02254 ref_picture, pix_op,
02255 s->mv[dir][0][0], s->mv[dir][0][1],
02256 block_s, mb_y);
02257
02258 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02259 1, 1, s->field_select[dir][1],
02260 ref_picture, pix_op,
02261 s->mv[dir][1][0], s->mv[dir][1][1],
02262 block_s, mb_y);
02263 } else {
02264 if (s->picture_structure != s->field_select[dir][0] + 1 &&
02265 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02266 ref_picture = s->current_picture_ptr->f.data;
02267
02268 }
02269 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02270 0, 0, s->field_select[dir][0],
02271 ref_picture, pix_op,
02272 s->mv[dir][0][0],
02273 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02274 }
02275 break;
02276 case MV_TYPE_16X8:
02277 for (i = 0; i < 2; i++) {
02278 uint8_t **ref2picture;
02279
02280 if (s->picture_structure == s->field_select[dir][i] + 1 ||
02281 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02282 ref2picture = ref_picture;
02283 } else {
02284 ref2picture = s->current_picture_ptr->f.data;
02285 }
02286
02287 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02288 0, 0, s->field_select[dir][i],
02289 ref2picture, pix_op,
02290 s->mv[dir][i][0], s->mv[dir][i][1] +
02291 2 * block_s * i, block_s, mb_y >> 1);
02292
02293 dest_y += 2 * block_s * s->linesize;
02294 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02295 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02296 }
02297 break;
02298 case MV_TYPE_DMV:
02299 if (s->picture_structure == PICT_FRAME) {
02300 for (i = 0; i < 2; i++) {
02301 int j;
02302 for (j = 0; j < 2; j++) {
02303 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02304 1, j, j ^ i,
02305 ref_picture, pix_op,
02306 s->mv[dir][2 * i + j][0],
02307 s->mv[dir][2 * i + j][1],
02308 block_s, mb_y);
02309 }
02310 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02311 }
02312 } else {
02313 for (i = 0; i < 2; i++) {
02314 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02315 0, 0, s->picture_structure != i + 1,
02316 ref_picture, pix_op,
02317 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02318 2 * block_s, mb_y >> 1);
02319
02320
02321 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02322
02323
02324
02325 if (!s->first_field) {
02326 ref_picture = s->current_picture_ptr->f.data;
02327 }
02328 }
02329 }
02330 break;
02331 default:
02332 av_assert2(0);
02333 }
02334 }
02335
02339 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02340 {
02341 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02342 int my, off, i, mvs;
02343
02344 if (s->picture_structure != PICT_FRAME || s->mcsel)
02345 goto unhandled;
02346
02347 switch (s->mv_type) {
02348 case MV_TYPE_16X16:
02349 mvs = 1;
02350 break;
02351 case MV_TYPE_16X8:
02352 mvs = 2;
02353 break;
02354 case MV_TYPE_8X8:
02355 mvs = 4;
02356 break;
02357 default:
02358 goto unhandled;
02359 }
02360
02361 for (i = 0; i < mvs; i++) {
02362 my = s->mv[dir][i][1]<<qpel_shift;
02363 my_max = FFMAX(my_max, my);
02364 my_min = FFMIN(my_min, my);
02365 }
02366
02367 off = (FFMAX(-my_min, my_max) + 63) >> 6;
02368
02369 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02370 unhandled:
02371 return s->mb_height-1;
02372 }
02373
02374
02375 static inline void put_dct(MpegEncContext *s,
02376 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02377 {
02378 s->dct_unquantize_intra(s, block, i, qscale);
02379 s->dsp.idct_put (dest, line_size, block);
02380 }
02381
02382
02383 static inline void add_dct(MpegEncContext *s,
02384 DCTELEM *block, int i, uint8_t *dest, int line_size)
02385 {
02386 if (s->block_last_index[i] >= 0) {
02387 s->dsp.idct_add (dest, line_size, block);
02388 }
02389 }
02390
02391 static inline void add_dequant_dct(MpegEncContext *s,
02392 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02393 {
02394 if (s->block_last_index[i] >= 0) {
02395 s->dct_unquantize_inter(s, block, i, qscale);
02396
02397 s->dsp.idct_add (dest, line_size, block);
02398 }
02399 }
02400
02404 void ff_clean_intra_table_entries(MpegEncContext *s)
02405 {
02406 int wrap = s->b8_stride;
02407 int xy = s->block_index[0];
02408
02409 s->dc_val[0][xy ] =
02410 s->dc_val[0][xy + 1 ] =
02411 s->dc_val[0][xy + wrap] =
02412 s->dc_val[0][xy + 1 + wrap] = 1024;
02413
02414 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
02415 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02416 if (s->msmpeg4_version>=3) {
02417 s->coded_block[xy ] =
02418 s->coded_block[xy + 1 ] =
02419 s->coded_block[xy + wrap] =
02420 s->coded_block[xy + 1 + wrap] = 0;
02421 }
02422
02423 wrap = s->mb_stride;
02424 xy = s->mb_x + s->mb_y * wrap;
02425 s->dc_val[1][xy] =
02426 s->dc_val[2][xy] = 1024;
02427
02428 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02429 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02430
02431 s->mbintra_table[xy]= 0;
02432 }
02433
02434
02435
02436
02437
02438
02439
02440
02441
02442
02443
02444 static av_always_inline
02445 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02446 int lowres_flag, int is_mpeg12)
02447 {
02448 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02449 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02450 ff_xvmc_decode_mb(s);
02451 return;
02452 }
02453
02454 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02455
02456 int i,j;
02457 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02458 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02459 for(i=0; i<6; i++){
02460 for(j=0; j<64; j++){
02461 *dct++ = block[i][s->dsp.idct_permutation[j]];
02462 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02463 }
02464 av_log(s->avctx, AV_LOG_DEBUG, "\n");
02465 }
02466 }
02467
02468 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02469
02470
02471 if (!s->mb_intra) {
02472 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02473 if(s->mbintra_table[mb_xy])
02474 ff_clean_intra_table_entries(s);
02475 } else {
02476 s->last_dc[0] =
02477 s->last_dc[1] =
02478 s->last_dc[2] = 128 << s->intra_dc_precision;
02479 }
02480 }
02481 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02482 s->mbintra_table[mb_xy]=1;
02483
02484 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
02485 uint8_t *dest_y, *dest_cb, *dest_cr;
02486 int dct_linesize, dct_offset;
02487 op_pixels_func (*op_pix)[4];
02488 qpel_mc_func (*op_qpix)[16];
02489 const int linesize = s->current_picture.f.linesize[0];
02490 const int uvlinesize = s->current_picture.f.linesize[1];
02491 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02492 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02493
02494
02495
02496 if(!s->encoding){
02497 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02498
02499 if (s->mb_skipped) {
02500 s->mb_skipped= 0;
02501 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
02502 *mbskip_ptr = 1;
02503 } else if(!s->current_picture.f.reference) {
02504 *mbskip_ptr = 1;
02505 } else{
02506 *mbskip_ptr = 0;
02507 }
02508 }
02509
02510 dct_linesize = linesize << s->interlaced_dct;
02511 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
02512
02513 if(readable){
02514 dest_y= s->dest[0];
02515 dest_cb= s->dest[1];
02516 dest_cr= s->dest[2];
02517 }else{
02518 dest_y = s->b_scratchpad;
02519 dest_cb= s->b_scratchpad+16*linesize;
02520 dest_cr= s->b_scratchpad+32*linesize;
02521 }
02522
02523 if (!s->mb_intra) {
02524
02525
02526 if(!s->encoding){
02527
02528 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02529 if (s->mv_dir & MV_DIR_FORWARD) {
02530 ff_thread_await_progress(&s->last_picture_ptr->f,
02531 ff_MPV_lowest_referenced_row(s, 0),
02532 0);
02533 }
02534 if (s->mv_dir & MV_DIR_BACKWARD) {
02535 ff_thread_await_progress(&s->next_picture_ptr->f,
02536 ff_MPV_lowest_referenced_row(s, 1),
02537 0);
02538 }
02539 }
02540
02541 if(lowres_flag){
02542 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02543
02544 if (s->mv_dir & MV_DIR_FORWARD) {
02545 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02546 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02547 }
02548 if (s->mv_dir & MV_DIR_BACKWARD) {
02549 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02550 }
02551 }else{
02552 op_qpix= s->me.qpel_put;
02553 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02554 op_pix = s->dsp.put_pixels_tab;
02555 }else{
02556 op_pix = s->dsp.put_no_rnd_pixels_tab;
02557 }
02558 if (s->mv_dir & MV_DIR_FORWARD) {
02559 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02560 op_pix = s->dsp.avg_pixels_tab;
02561 op_qpix= s->me.qpel_avg;
02562 }
02563 if (s->mv_dir & MV_DIR_BACKWARD) {
02564 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02565 }
02566 }
02567 }
02568
02569
02570 if(s->avctx->skip_idct){
02571 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02572 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02573 || s->avctx->skip_idct >= AVDISCARD_ALL)
02574 goto skip_idct;
02575 }
02576
02577
02578 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
02579 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
02580 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02581 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02582 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02583 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02584
02585 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02586 if (s->chroma_y_shift){
02587 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02588 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02589 }else{
02590 dct_linesize >>= 1;
02591 dct_offset >>=1;
02592 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02593 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02594 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02595 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02596 }
02597 }
02598 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
02599 add_dct(s, block[0], 0, dest_y , dct_linesize);
02600 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02601 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02602 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02603
02604 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02605 if(s->chroma_y_shift){
02606 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02607 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02608 }else{
02609
02610 dct_linesize = uvlinesize << s->interlaced_dct;
02611 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
02612
02613 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02614 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02615 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02616 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02617 if(!s->chroma_x_shift){
02618 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02619 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02620 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02621 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02622 }
02623 }
02624 }
02625 }
02626 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02627 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02628 }
02629 } else {
02630
02631 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
02632 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02633 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02634 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02635 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02636
02637 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02638 if(s->chroma_y_shift){
02639 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02640 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02641 }else{
02642 dct_offset >>=1;
02643 dct_linesize >>=1;
02644 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02645 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02646 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02647 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02648 }
02649 }
02650 }else{
02651 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02652 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02653 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02654 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02655
02656 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02657 if(s->chroma_y_shift){
02658 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02659 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02660 }else{
02661
02662 dct_linesize = uvlinesize << s->interlaced_dct;
02663 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
02664
02665 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02666 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02667 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02668 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02669 if(!s->chroma_x_shift){
02670 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
02671 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
02672 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02673 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02674 }
02675 }
02676 }
02677 }
02678 }
02679 skip_idct:
02680 if(!readable){
02681 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02682 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02683 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02684 }
02685 }
02686 }
02687
02688 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02689 #if !CONFIG_SMALL
02690 if(s->out_format == FMT_MPEG1) {
02691 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02692 else MPV_decode_mb_internal(s, block, 0, 1);
02693 } else
02694 #endif
02695 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02696 else MPV_decode_mb_internal(s, block, 0, 0);
02697 }
02698
02702 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02703 const int field_pic= s->picture_structure != PICT_FRAME;
02704 if(field_pic){
02705 h <<= 1;
02706 y <<= 1;
02707 }
02708
02709 if (!s->avctx->hwaccel
02710 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02711 && s->unrestricted_mv
02712 && s->current_picture.f.reference
02713 && !s->intra_only
02714 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02715 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
02716 int sides = 0, edge_h;
02717 int hshift = desc->log2_chroma_w;
02718 int vshift = desc->log2_chroma_h;
02719 if (y==0) sides |= EDGE_TOP;
02720 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02721
02722 edge_h= FFMIN(h, s->v_edge_pos - y);
02723
02724 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
02725 s->linesize, s->h_edge_pos, edge_h,
02726 EDGE_WIDTH, EDGE_WIDTH, sides);
02727 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02728 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02729 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02730 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02731 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02732 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02733 }
02734
02735 h= FFMIN(h, s->avctx->height - y);
02736
02737 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02738
02739 if (s->avctx->draw_horiz_band) {
02740 AVFrame *src;
02741 int offset[AV_NUM_DATA_POINTERS];
02742 int i;
02743
02744 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02745 src = &s->current_picture_ptr->f;
02746 else if(s->last_picture_ptr)
02747 src = &s->last_picture_ptr->f;
02748 else
02749 return;
02750
02751 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02752 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02753 offset[i] = 0;
02754 }else{
02755 offset[0]= y * s->linesize;
02756 offset[1]=
02757 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02758 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02759 offset[i] = 0;
02760 }
02761
02762 emms_c();
02763
02764 s->avctx->draw_horiz_band(s->avctx, src, offset,
02765 y, s->picture_structure, h);
02766 }
02767 }
02768
02769 void ff_init_block_index(MpegEncContext *s){
02770 const int linesize = s->current_picture.f.linesize[0];
02771 const int uvlinesize = s->current_picture.f.linesize[1];
02772 const int mb_size= 4 - s->avctx->lowres;
02773
02774 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02775 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02776 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02777 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02778 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02779 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02780
02781
02782 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
02783 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02784 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02785
02786 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02787 {
02788 if(s->picture_structure==PICT_FRAME){
02789 s->dest[0] += s->mb_y * linesize << mb_size;
02790 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02791 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02792 }else{
02793 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02794 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02795 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02796 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02797 }
02798 }
02799 }
02800
02801 void ff_mpeg_flush(AVCodecContext *avctx){
02802 int i;
02803 MpegEncContext *s = avctx->priv_data;
02804
02805 if(s==NULL || s->picture==NULL)
02806 return;
02807
02808 for(i=0; i<s->picture_count; i++){
02809 if (s->picture[i].f.data[0] &&
02810 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02811 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02812 free_frame_buffer(s, &s->picture[i]);
02813 }
02814 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02815
02816 s->mb_x= s->mb_y= 0;
02817 s->closed_gop= 0;
02818
02819 s->parse_context.state= -1;
02820 s->parse_context.frame_start_found= 0;
02821 s->parse_context.overread= 0;
02822 s->parse_context.overread_index= 0;
02823 s->parse_context.index= 0;
02824 s->parse_context.last_index= 0;
02825 s->bitstream_buffer_size=0;
02826 s->pp_time=0;
02827 }
02828
02829 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02830 DCTELEM *block, int n, int qscale)
02831 {
02832 int i, level, nCoeffs;
02833 const uint16_t *quant_matrix;
02834
02835 nCoeffs= s->block_last_index[n];
02836
02837 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02838
02839 quant_matrix = s->intra_matrix;
02840 for(i=1;i<=nCoeffs;i++) {
02841 int j= s->intra_scantable.permutated[i];
02842 level = block[j];
02843 if (level) {
02844 if (level < 0) {
02845 level = -level;
02846 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02847 level = (level - 1) | 1;
02848 level = -level;
02849 } else {
02850 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02851 level = (level - 1) | 1;
02852 }
02853 block[j] = level;
02854 }
02855 }
02856 }
02857
02858 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02859 DCTELEM *block, int n, int qscale)
02860 {
02861 int i, level, nCoeffs;
02862 const uint16_t *quant_matrix;
02863
02864 nCoeffs= s->block_last_index[n];
02865
02866 quant_matrix = s->inter_matrix;
02867 for(i=0; i<=nCoeffs; i++) {
02868 int j= s->intra_scantable.permutated[i];
02869 level = block[j];
02870 if (level) {
02871 if (level < 0) {
02872 level = -level;
02873 level = (((level << 1) + 1) * qscale *
02874 ((int) (quant_matrix[j]))) >> 4;
02875 level = (level - 1) | 1;
02876 level = -level;
02877 } else {
02878 level = (((level << 1) + 1) * qscale *
02879 ((int) (quant_matrix[j]))) >> 4;
02880 level = (level - 1) | 1;
02881 }
02882 block[j] = level;
02883 }
02884 }
02885 }
02886
02887 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02888 DCTELEM *block, int n, int qscale)
02889 {
02890 int i, level, nCoeffs;
02891 const uint16_t *quant_matrix;
02892
02893 if(s->alternate_scan) nCoeffs= 63;
02894 else nCoeffs= s->block_last_index[n];
02895
02896 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02897 quant_matrix = s->intra_matrix;
02898 for(i=1;i<=nCoeffs;i++) {
02899 int j= s->intra_scantable.permutated[i];
02900 level = block[j];
02901 if (level) {
02902 if (level < 0) {
02903 level = -level;
02904 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02905 level = -level;
02906 } else {
02907 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02908 }
02909 block[j] = level;
02910 }
02911 }
02912 }
02913
02914 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02915 DCTELEM *block, int n, int qscale)
02916 {
02917 int i, level, nCoeffs;
02918 const uint16_t *quant_matrix;
02919 int sum=-1;
02920
02921 if(s->alternate_scan) nCoeffs= 63;
02922 else nCoeffs= s->block_last_index[n];
02923
02924 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02925 sum += block[0];
02926 quant_matrix = s->intra_matrix;
02927 for(i=1;i<=nCoeffs;i++) {
02928 int j= s->intra_scantable.permutated[i];
02929 level = block[j];
02930 if (level) {
02931 if (level < 0) {
02932 level = -level;
02933 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02934 level = -level;
02935 } else {
02936 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02937 }
02938 block[j] = level;
02939 sum+=level;
02940 }
02941 }
02942 block[63]^=sum&1;
02943 }
02944
02945 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02946 DCTELEM *block, int n, int qscale)
02947 {
02948 int i, level, nCoeffs;
02949 const uint16_t *quant_matrix;
02950 int sum=-1;
02951
02952 if(s->alternate_scan) nCoeffs= 63;
02953 else nCoeffs= s->block_last_index[n];
02954
02955 quant_matrix = s->inter_matrix;
02956 for(i=0; i<=nCoeffs; i++) {
02957 int j= s->intra_scantable.permutated[i];
02958 level = block[j];
02959 if (level) {
02960 if (level < 0) {
02961 level = -level;
02962 level = (((level << 1) + 1) * qscale *
02963 ((int) (quant_matrix[j]))) >> 4;
02964 level = -level;
02965 } else {
02966 level = (((level << 1) + 1) * qscale *
02967 ((int) (quant_matrix[j]))) >> 4;
02968 }
02969 block[j] = level;
02970 sum+=level;
02971 }
02972 }
02973 block[63]^=sum&1;
02974 }
02975
02976 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02977 DCTELEM *block, int n, int qscale)
02978 {
02979 int i, level, qmul, qadd;
02980 int nCoeffs;
02981
02982 assert(s->block_last_index[n]>=0);
02983
02984 qmul = qscale << 1;
02985
02986 if (!s->h263_aic) {
02987 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
02988 qadd = (qscale - 1) | 1;
02989 }else{
02990 qadd = 0;
02991 }
02992 if(s->ac_pred)
02993 nCoeffs=63;
02994 else
02995 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02996
02997 for(i=1; i<=nCoeffs; i++) {
02998 level = block[i];
02999 if (level) {
03000 if (level < 0) {
03001 level = level * qmul - qadd;
03002 } else {
03003 level = level * qmul + qadd;
03004 }
03005 block[i] = level;
03006 }
03007 }
03008 }
03009
03010 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
03011 DCTELEM *block, int n, int qscale)
03012 {
03013 int i, level, qmul, qadd;
03014 int nCoeffs;
03015
03016 assert(s->block_last_index[n]>=0);
03017
03018 qadd = (qscale - 1) | 1;
03019 qmul = qscale << 1;
03020
03021 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
03022
03023 for(i=0; i<=nCoeffs; i++) {
03024 level = block[i];
03025 if (level) {
03026 if (level < 0) {
03027 level = level * qmul - qadd;
03028 } else {
03029 level = level * qmul + qadd;
03030 }
03031 block[i] = level;
03032 }
03033 }
03034 }
03035
03039 void ff_set_qscale(MpegEncContext * s, int qscale)
03040 {
03041 if (qscale < 1)
03042 qscale = 1;
03043 else if (qscale > 31)
03044 qscale = 31;
03045
03046 s->qscale = qscale;
03047 s->chroma_qscale= s->chroma_qscale_table[qscale];
03048
03049 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
03050 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
03051 }
03052
03053 void ff_MPV_report_decode_progress(MpegEncContext *s)
03054 {
03055 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
03056 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
03057 }