00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00026 #include "avcodec.h"
00027 #include "bytestream.h"
00028
00029 #include "vp56.h"
00030 #include "vp56data.h"
00031
00032
00033 void ff_vp56_init_dequant(VP56Context *s, int quantizer)
00034 {
00035 s->quantizer = quantizer;
00036 s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
00037 s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
00038 memset(s->qscale_table, quantizer, s->mb_width);
00039 }
00040
00041 static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
00042 VP56Frame ref_frame)
00043 {
00044 int nb_pred = 0;
00045 VP56mv vect[2] = {{0,0}, {0,0}};
00046 int pos, offset;
00047 VP56mv mvp;
00048
00049 for (pos=0; pos<12; pos++) {
00050 mvp.x = col + vp56_candidate_predictor_pos[pos][0];
00051 mvp.y = row + vp56_candidate_predictor_pos[pos][1];
00052 if (mvp.x < 0 || mvp.x >= s->mb_width ||
00053 mvp.y < 0 || mvp.y >= s->mb_height)
00054 continue;
00055 offset = mvp.x + s->mb_width*mvp.y;
00056
00057 if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
00058 continue;
00059 if ((s->macroblocks[offset].mv.x == vect[0].x &&
00060 s->macroblocks[offset].mv.y == vect[0].y) ||
00061 (s->macroblocks[offset].mv.x == 0 &&
00062 s->macroblocks[offset].mv.y == 0))
00063 continue;
00064
00065 vect[nb_pred++] = s->macroblocks[offset].mv;
00066 if (nb_pred > 1) {
00067 nb_pred = -1;
00068 break;
00069 }
00070 s->vector_candidate_pos = pos;
00071 }
00072
00073 s->vector_candidate[0] = vect[0];
00074 s->vector_candidate[1] = vect[1];
00075
00076 return nb_pred+1;
00077 }
00078
00079 static void vp56_parse_mb_type_models(VP56Context *s)
00080 {
00081 VP56RangeCoder *c = &s->c;
00082 VP56Model *model = s->modelp;
00083 int i, ctx, type;
00084
00085 for (ctx=0; ctx<3; ctx++) {
00086 if (vp56_rac_get_prob(c, 174)) {
00087 int idx = vp56_rac_gets(c, 4);
00088 memcpy(model->mb_types_stats[ctx],
00089 vp56_pre_def_mb_type_stats[idx][ctx],
00090 sizeof(model->mb_types_stats[ctx]));
00091 }
00092 if (vp56_rac_get_prob(c, 254)) {
00093 for (type=0; type<10; type++) {
00094 for(i=0; i<2; i++) {
00095 if (vp56_rac_get_prob(c, 205)) {
00096 int delta, sign = vp56_rac_get(c);
00097
00098 delta = vp56_rac_get_tree(c, vp56_pmbtm_tree,
00099 vp56_mb_type_model_model);
00100 if (!delta)
00101 delta = 4 * vp56_rac_gets(c, 7);
00102 model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
00103 }
00104 }
00105 }
00106 }
00107 }
00108
00109
00110 for (ctx=0; ctx<3; ctx++) {
00111 int p[10];
00112
00113 for (type=0; type<10; type++)
00114 p[type] = 100 * model->mb_types_stats[ctx][type][1];
00115
00116 for (type=0; type<10; type++) {
00117 int p02, p34, p0234, p17, p56, p89, p5689, p156789;
00118
00119
00120 model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
00121
00122 p[type] = 0;
00123
00124
00125 p02 = p[0] + p[2];
00126 p34 = p[3] + p[4];
00127 p0234 = p02 + p34;
00128 p17 = p[1] + p[7];
00129 p56 = p[5] + p[6];
00130 p89 = p[8] + p[9];
00131 p5689 = p56 + p89;
00132 p156789 = p17 + p5689;
00133
00134 model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
00135 model->mb_type[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
00136 model->mb_type[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
00137 model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
00138 model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
00139 model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
00140 model->mb_type[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
00141 model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
00142 model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
00143
00144
00145 p[type] = 100 * model->mb_types_stats[ctx][type][1];
00146 }
00147 }
00148 }
00149
00150 static VP56mb vp56_parse_mb_type(VP56Context *s,
00151 VP56mb prev_type, int ctx)
00152 {
00153 uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
00154 VP56RangeCoder *c = &s->c;
00155
00156 if (vp56_rac_get_prob(c, mb_type_model[0]))
00157 return prev_type;
00158 else
00159 return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model);
00160 }
00161
00162 static void vp56_decode_4mv(VP56Context *s, int row, int col)
00163 {
00164 VP56mv mv = {0,0};
00165 int type[4];
00166 int b;
00167
00168
00169 for (b=0; b<4; b++) {
00170 type[b] = vp56_rac_gets(&s->c, 2);
00171 if (type[b])
00172 type[b]++;
00173 }
00174
00175
00176 for (b=0; b<4; b++) {
00177 switch (type[b]) {
00178 case VP56_MB_INTER_NOVEC_PF:
00179 s->mv[b] = (VP56mv) {0,0};
00180 break;
00181 case VP56_MB_INTER_DELTA_PF:
00182 s->parse_vector_adjustment(s, &s->mv[b]);
00183 break;
00184 case VP56_MB_INTER_V1_PF:
00185 s->mv[b] = s->vector_candidate[0];
00186 break;
00187 case VP56_MB_INTER_V2_PF:
00188 s->mv[b] = s->vector_candidate[1];
00189 break;
00190 }
00191 mv.x += s->mv[b].x;
00192 mv.y += s->mv[b].y;
00193 }
00194
00195
00196 s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
00197
00198
00199 if (s->avctx->codec->id == AV_CODEC_ID_VP5) {
00200 s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
00201 s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
00202 } else {
00203 s->mv[4] = s->mv[5] = (VP56mv) {mv.x/4, mv.y/4};
00204 }
00205 }
00206
00207 static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
00208 {
00209 VP56mv *mv, vect = {0,0};
00210 int ctx, b;
00211
00212 ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS);
00213 s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
00214 s->macroblocks[row * s->mb_width + col].type = s->mb_type;
00215
00216 switch (s->mb_type) {
00217 case VP56_MB_INTER_V1_PF:
00218 mv = &s->vector_candidate[0];
00219 break;
00220
00221 case VP56_MB_INTER_V2_PF:
00222 mv = &s->vector_candidate[1];
00223 break;
00224
00225 case VP56_MB_INTER_V1_GF:
00226 vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
00227 mv = &s->vector_candidate[0];
00228 break;
00229
00230 case VP56_MB_INTER_V2_GF:
00231 vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
00232 mv = &s->vector_candidate[1];
00233 break;
00234
00235 case VP56_MB_INTER_DELTA_PF:
00236 s->parse_vector_adjustment(s, &vect);
00237 mv = &vect;
00238 break;
00239
00240 case VP56_MB_INTER_DELTA_GF:
00241 vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
00242 s->parse_vector_adjustment(s, &vect);
00243 mv = &vect;
00244 break;
00245
00246 case VP56_MB_INTER_4V:
00247 vp56_decode_4mv(s, row, col);
00248 return s->mb_type;
00249
00250 default:
00251 mv = &vect;
00252 break;
00253 }
00254
00255 s->macroblocks[row*s->mb_width + col].mv = *mv;
00256
00257
00258 for (b=0; b<6; b++)
00259 s->mv[b] = *mv;
00260
00261 return s->mb_type;
00262 }
00263
00264 static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
00265 {
00266 int idx = s->scantable.permutated[0];
00267 int b;
00268
00269 for (b=0; b<6; b++) {
00270 VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
00271 VP56RefDc *lb = &s->left_block[ff_vp56_b6to4[b]];
00272 int count = 0;
00273 int dc = 0;
00274 int i;
00275
00276 if (ref_frame == lb->ref_frame) {
00277 dc += lb->dc_coeff;
00278 count++;
00279 }
00280 if (ref_frame == ab->ref_frame) {
00281 dc += ab->dc_coeff;
00282 count++;
00283 }
00284 if (s->avctx->codec->id == AV_CODEC_ID_VP5)
00285 for (i=0; i<2; i++)
00286 if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
00287 dc += ab[-1+2*i].dc_coeff;
00288 count++;
00289 }
00290 if (count == 0)
00291 dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame];
00292 else if (count == 2)
00293 dc /= 2;
00294
00295 s->block_coeff[b][idx] += dc;
00296 s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
00297 ab->dc_coeff = s->block_coeff[b][idx];
00298 ab->ref_frame = ref_frame;
00299 lb->dc_coeff = s->block_coeff[b][idx];
00300 lb->ref_frame = ref_frame;
00301 s->block_coeff[b][idx] *= s->dequant_dc;
00302 }
00303 }
00304
00305 static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv,
00306 int stride, int dx, int dy)
00307 {
00308 int t = vp56_filter_threshold[s->quantizer];
00309 if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t);
00310 if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
00311 }
00312
00313 static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
00314 int stride, int x, int y)
00315 {
00316 uint8_t *dst=s->framep[VP56_FRAME_CURRENT]->data[plane]+s->block_offset[b];
00317 uint8_t *src_block;
00318 int src_offset;
00319 int overlap_offset = 0;
00320 int mask = s->vp56_coord_div[b] - 1;
00321 int deblock_filtering = s->deblock_filtering;
00322 int dx;
00323 int dy;
00324
00325 if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
00326 (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
00327 && !s->framep[VP56_FRAME_CURRENT]->key_frame))
00328 deblock_filtering = 0;
00329
00330 dx = s->mv[b].x / s->vp56_coord_div[b];
00331 dy = s->mv[b].y / s->vp56_coord_div[b];
00332
00333 if (b >= 4) {
00334 x /= 2;
00335 y /= 2;
00336 }
00337 x += dx - 2;
00338 y += dy - 2;
00339
00340 if (x<0 || x+12>=s->plane_width[plane] ||
00341 y<0 || y+12>=s->plane_height[plane]) {
00342 s->dsp.emulated_edge_mc(s->edge_emu_buffer,
00343 src + s->block_offset[b] + (dy-2)*stride + (dx-2),
00344 stride, 12, 12, x, y,
00345 s->plane_width[plane],
00346 s->plane_height[plane]);
00347 src_block = s->edge_emu_buffer;
00348 src_offset = 2 + 2*stride;
00349 } else if (deblock_filtering) {
00350
00351
00352 s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer,
00353 src + s->block_offset[b] + (dy-2)*stride + (dx-2),
00354 stride, 12);
00355 src_block = s->edge_emu_buffer;
00356 src_offset = 2 + 2*stride;
00357 } else {
00358 src_block = src;
00359 src_offset = s->block_offset[b] + dy*stride + dx;
00360 }
00361
00362 if (deblock_filtering)
00363 vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
00364
00365 if (s->mv[b].x & mask)
00366 overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
00367 if (s->mv[b].y & mask)
00368 overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
00369
00370 if (overlap_offset) {
00371 if (s->filter)
00372 s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
00373 stride, s->mv[b], mask, s->filter_selection, b<4);
00374 else
00375 s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset,
00376 src_block+src_offset+overlap_offset,
00377 stride, 8);
00378 } else {
00379 s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
00380 }
00381 }
00382
00383 static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
00384 {
00385 AVFrame *frame_current, *frame_ref;
00386 VP56mb mb_type;
00387 VP56Frame ref_frame;
00388 int b, ab, b_max, plane, off;
00389
00390 if (s->framep[VP56_FRAME_CURRENT]->key_frame)
00391 mb_type = VP56_MB_INTRA;
00392 else
00393 mb_type = vp56_decode_mv(s, row, col);
00394 ref_frame = vp56_reference_frame[mb_type];
00395
00396 s->dsp.clear_blocks(*s->block_coeff);
00397
00398 s->parse_coeff(s);
00399
00400 vp56_add_predictors_dc(s, ref_frame);
00401
00402 frame_current = s->framep[VP56_FRAME_CURRENT];
00403 frame_ref = s->framep[ref_frame];
00404 if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
00405 return;
00406
00407 ab = 6*is_alpha;
00408 b_max = 6 - 2*is_alpha;
00409
00410 switch (mb_type) {
00411 case VP56_MB_INTRA:
00412 for (b=0; b<b_max; b++) {
00413 plane = ff_vp56_b2p[b+ab];
00414 s->vp3dsp.idct_put(frame_current->data[plane] + s->block_offset[b],
00415 s->stride[plane], s->block_coeff[b]);
00416 }
00417 break;
00418
00419 case VP56_MB_INTER_NOVEC_PF:
00420 case VP56_MB_INTER_NOVEC_GF:
00421 for (b=0; b<b_max; b++) {
00422 plane = ff_vp56_b2p[b+ab];
00423 off = s->block_offset[b];
00424 s->dsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
00425 frame_ref->data[plane] + off,
00426 s->stride[plane], 8);
00427 s->vp3dsp.idct_add(frame_current->data[plane] + off,
00428 s->stride[plane], s->block_coeff[b]);
00429 }
00430 break;
00431
00432 case VP56_MB_INTER_DELTA_PF:
00433 case VP56_MB_INTER_V1_PF:
00434 case VP56_MB_INTER_V2_PF:
00435 case VP56_MB_INTER_DELTA_GF:
00436 case VP56_MB_INTER_4V:
00437 case VP56_MB_INTER_V1_GF:
00438 case VP56_MB_INTER_V2_GF:
00439 for (b=0; b<b_max; b++) {
00440 int x_off = b==1 || b==3 ? 8 : 0;
00441 int y_off = b==2 || b==3 ? 8 : 0;
00442 plane = ff_vp56_b2p[b+ab];
00443 vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
00444 16*col+x_off, 16*row+y_off);
00445 s->vp3dsp.idct_add(frame_current->data[plane] + s->block_offset[b],
00446 s->stride[plane], s->block_coeff[b]);
00447 }
00448 break;
00449 }
00450 }
00451
00452 static int vp56_size_changed(VP56Context *s)
00453 {
00454 AVCodecContext *avctx = s->avctx;
00455 int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0];
00456 int i;
00457
00458 s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
00459 s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2;
00460 s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
00461 s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
00462
00463 for (i=0; i<4; i++)
00464 s->stride[i] = s->flip * s->framep[VP56_FRAME_CURRENT]->linesize[i];
00465
00466 s->mb_width = (avctx->coded_width +15) / 16;
00467 s->mb_height = (avctx->coded_height+15) / 16;
00468
00469 if (s->mb_width > 1000 || s->mb_height > 1000) {
00470 avcodec_set_dimensions(avctx, 0, 0);
00471 av_log(avctx, AV_LOG_ERROR, "picture too big\n");
00472 return -1;
00473 }
00474
00475 s->qscale_table = av_realloc(s->qscale_table, s->mb_width);
00476 s->above_blocks = av_realloc(s->above_blocks,
00477 (4*s->mb_width+6) * sizeof(*s->above_blocks));
00478 s->macroblocks = av_realloc(s->macroblocks,
00479 s->mb_width*s->mb_height*sizeof(*s->macroblocks));
00480 av_free(s->edge_emu_buffer_alloc);
00481 s->edge_emu_buffer_alloc = av_malloc(16*stride);
00482 s->edge_emu_buffer = s->edge_emu_buffer_alloc;
00483 if (s->flip < 0)
00484 s->edge_emu_buffer += 15 * stride;
00485
00486 if (s->alpha_context)
00487 return vp56_size_changed(s->alpha_context);
00488
00489 return 0;
00490 }
00491
00492 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int);
00493
00494 int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
00495 AVPacket *avpkt)
00496 {
00497 const uint8_t *buf = avpkt->data;
00498 VP56Context *s = avctx->priv_data;
00499 AVFrame *p = 0;
00500 int remaining_buf_size = avpkt->size;
00501 int av_uninit(alpha_offset);
00502 int i, res;
00503
00504
00505 for (i = 0; i < 4; ++i) {
00506 if (!s->frames[i].data[0]) {
00507 p = &s->frames[i];
00508 break;
00509 }
00510 }
00511 av_assert0(p != 0);
00512 s->framep[VP56_FRAME_CURRENT] = p;
00513 if (s->alpha_context)
00514 s->alpha_context->framep[VP56_FRAME_CURRENT] = p;
00515
00516 if (s->has_alpha) {
00517 if (remaining_buf_size < 3)
00518 return -1;
00519 alpha_offset = bytestream_get_be24(&buf);
00520 remaining_buf_size -= 3;
00521 if (remaining_buf_size < alpha_offset)
00522 return -1;
00523 }
00524
00525 res = s->parse_header(s, buf, remaining_buf_size);
00526 if (!res)
00527 return -1;
00528
00529 if (res == 2) {
00530 for (i = 0; i < 4; i++) {
00531 if (s->frames[i].data[0])
00532 avctx->release_buffer(avctx, &s->frames[i]);
00533 }
00534 }
00535
00536 p->reference = 3;
00537 if (avctx->get_buffer(avctx, p) < 0) {
00538 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
00539 return -1;
00540 }
00541
00542 if (res == 2) {
00543 if (vp56_size_changed(s)) {
00544 avctx->release_buffer(avctx, p);
00545 return -1;
00546 }
00547 }
00548
00549 if (s->has_alpha) {
00550 buf += alpha_offset;
00551 remaining_buf_size -= alpha_offset;
00552
00553 res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
00554 if (res != 1) {
00555 avctx->release_buffer(avctx, p);
00556 return -1;
00557 }
00558 }
00559
00560 avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, s->has_alpha + 1);
00561
00562
00563 for (i = 0; i < 4; ++i) {
00564 AVFrame *victim = &s->frames[i];
00565 if (!victim->data[0])
00566 continue;
00567 if (victim != s->framep[VP56_FRAME_PREVIOUS] &&
00568 victim != s->framep[VP56_FRAME_GOLDEN] &&
00569 (!s->has_alpha || victim != s->alpha_context->framep[VP56_FRAME_GOLDEN]))
00570 avctx->release_buffer(avctx, victim);
00571 }
00572
00573 p->qstride = 0;
00574 p->qscale_table = s->qscale_table;
00575 p->qscale_type = FF_QSCALE_TYPE_VP56;
00576 *(AVFrame*)data = *p;
00577 *data_size = sizeof(AVFrame);
00578
00579 return avpkt->size;
00580 }
00581
00582 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
00583 int jobnr, int threadnr)
00584 {
00585 VP56Context *s0 = avctx->priv_data;
00586 int is_alpha = (jobnr == 1);
00587 VP56Context *s = is_alpha ? s0->alpha_context : s0;
00588 AVFrame *const p = s->framep[VP56_FRAME_CURRENT];
00589 int mb_row, mb_col, mb_row_flip, mb_offset = 0;
00590 int block, y, uv, stride_y, stride_uv;
00591
00592 if (p->key_frame) {
00593 p->pict_type = AV_PICTURE_TYPE_I;
00594 s->default_models_init(s);
00595 for (block=0; block<s->mb_height*s->mb_width; block++)
00596 s->macroblocks[block].type = VP56_MB_INTRA;
00597 } else {
00598 p->pict_type = AV_PICTURE_TYPE_P;
00599 vp56_parse_mb_type_models(s);
00600 s->parse_vector_models(s);
00601 s->mb_type = VP56_MB_INTER_NOVEC_PF;
00602 }
00603
00604 if (s->parse_coeff_models(s))
00605 goto next;
00606
00607 memset(s->prev_dc, 0, sizeof(s->prev_dc));
00608 s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
00609 s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
00610
00611 for (block=0; block < 4*s->mb_width+6; block++) {
00612 s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
00613 s->above_blocks[block].dc_coeff = 0;
00614 s->above_blocks[block].not_null_dc = 0;
00615 }
00616 s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
00617 s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
00618
00619 stride_y = p->linesize[0];
00620 stride_uv = p->linesize[1];
00621
00622 if (s->flip < 0)
00623 mb_offset = 7;
00624
00625
00626 for (mb_row=0; mb_row<s->mb_height; mb_row++) {
00627 if (s->flip < 0)
00628 mb_row_flip = s->mb_height - mb_row - 1;
00629 else
00630 mb_row_flip = mb_row;
00631
00632 for (block=0; block<4; block++) {
00633 s->left_block[block].ref_frame = VP56_FRAME_NONE;
00634 s->left_block[block].dc_coeff = 0;
00635 s->left_block[block].not_null_dc = 0;
00636 }
00637 memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
00638 memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
00639
00640 s->above_block_idx[0] = 1;
00641 s->above_block_idx[1] = 2;
00642 s->above_block_idx[2] = 1;
00643 s->above_block_idx[3] = 2;
00644 s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
00645 s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
00646
00647 s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
00648 s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
00649 s->block_offset[1] = s->block_offset[0] + 8;
00650 s->block_offset[3] = s->block_offset[2] + 8;
00651 s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
00652 s->block_offset[5] = s->block_offset[4];
00653
00654 for (mb_col=0; mb_col<s->mb_width; mb_col++) {
00655 vp56_decode_mb(s, mb_row, mb_col, is_alpha);
00656
00657 for (y=0; y<4; y++) {
00658 s->above_block_idx[y] += 2;
00659 s->block_offset[y] += 16;
00660 }
00661
00662 for (uv=4; uv<6; uv++) {
00663 s->above_block_idx[uv] += 1;
00664 s->block_offset[uv] += 8;
00665 }
00666 }
00667 }
00668
00669 next:
00670 if (p->key_frame || s->golden_frame) {
00671 s->framep[VP56_FRAME_GOLDEN] = p;
00672 }
00673
00674 FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT],
00675 s->framep[VP56_FRAME_PREVIOUS]);
00676 return 0;
00677 }
00678
00679 av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
00680 {
00681 VP56Context *s = avctx->priv_data;
00682 ff_vp56_init_context(avctx, s, flip, has_alpha);
00683 }
00684
00685 av_cold void ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
00686 int flip, int has_alpha)
00687 {
00688 int i;
00689
00690 s->avctx = avctx;
00691 avctx->pix_fmt = has_alpha ? PIX_FMT_YUVA420P : PIX_FMT_YUV420P;
00692
00693 ff_dsputil_init(&s->dsp, avctx);
00694 ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
00695 ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);
00696 ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm);
00697 ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);
00698
00699 for (i=0; i<4; i++) {
00700 s->framep[i] = &s->frames[i];
00701 avcodec_get_frame_defaults(&s->frames[i]);
00702 }
00703 s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];
00704 s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];
00705 s->edge_emu_buffer_alloc = NULL;
00706
00707 s->above_blocks = NULL;
00708 s->macroblocks = NULL;
00709 s->quantizer = -1;
00710 s->deblock_filtering = 1;
00711 s->golden_frame = 0;
00712
00713 s->filter = NULL;
00714
00715 s->has_alpha = has_alpha;
00716
00717 s->modelp = &s->model;
00718
00719 if (flip) {
00720 s->flip = -1;
00721 s->frbi = 2;
00722 s->srbi = 0;
00723 } else {
00724 s->flip = 1;
00725 s->frbi = 0;
00726 s->srbi = 2;
00727 }
00728 }
00729
00730 av_cold int ff_vp56_free(AVCodecContext *avctx)
00731 {
00732 VP56Context *s = avctx->priv_data;
00733 return ff_vp56_free_context(s);
00734 }
00735
00736 av_cold int ff_vp56_free_context(VP56Context *s)
00737 {
00738 AVCodecContext *avctx = s->avctx;
00739 int i;
00740
00741 av_freep(&s->qscale_table);
00742 av_freep(&s->above_blocks);
00743 av_freep(&s->macroblocks);
00744 av_freep(&s->edge_emu_buffer_alloc);
00745 for (i = 0; i < 4; ++i) {
00746 if (s->frames[i].data[0])
00747 avctx->release_buffer(avctx, &s->frames[i]);
00748 }
00749 return 0;
00750 }