00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027 #define RC_VARIANCE 1 // use variance or ssd for fast rc
00028
00029 #include "libavutil/opt.h"
00030 #include "avcodec.h"
00031 #include "dsputil.h"
00032 #include "internal.h"
00033 #include "mpegvideo.h"
00034 #include "mpegvideo_common.h"
00035 #include "dnxhdenc.h"
00036 #include "internal.h"
00037
00038 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
00039 #define DNX10BIT_QMAT_SHIFT 18 // The largest value that will not lead to overflow for 10bit samples.
00040
00041 static const AVOption options[]={
00042 {"nitris_compat", "encode with Avid Nitris compatibility", offsetof(DNXHDEncContext, nitris_compat), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, VE},
00043 {NULL}
00044 };
00045 static const AVClass class = { "dnxhd", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
00046
00047 #define LAMBDA_FRAC_BITS 10
00048
00049 static void dnxhd_8bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00050 {
00051 int i;
00052 for (i = 0; i < 4; i++) {
00053 block[0] = pixels[0]; block[1] = pixels[1];
00054 block[2] = pixels[2]; block[3] = pixels[3];
00055 block[4] = pixels[4]; block[5] = pixels[5];
00056 block[6] = pixels[6]; block[7] = pixels[7];
00057 pixels += line_size;
00058 block += 8;
00059 }
00060 memcpy(block, block - 8, sizeof(*block) * 8);
00061 memcpy(block + 8, block - 16, sizeof(*block) * 8);
00062 memcpy(block + 16, block - 24, sizeof(*block) * 8);
00063 memcpy(block + 24, block - 32, sizeof(*block) * 8);
00064 }
00065
00066 static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00067 {
00068 int i;
00069
00070 block += 32;
00071
00072 for (i = 0; i < 4; i++) {
00073 memcpy(block + i * 8, pixels + i * line_size, 8 * sizeof(*block));
00074 memcpy(block - (i+1) * 8, pixels + i * line_size, 8 * sizeof(*block));
00075 }
00076 }
00077
00078 static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, DCTELEM *block,
00079 int n, int qscale, int *overflow)
00080 {
00081 const uint8_t *scantable= ctx->intra_scantable.scantable;
00082 const int *qmat = n<4 ? ctx->q_intra_matrix[qscale] : ctx->q_chroma_intra_matrix[qscale];
00083 int last_non_zero = 0;
00084 int i;
00085
00086 ctx->dsp.fdct(block);
00087
00088
00089 block[0] = (block[0] + 2) >> 2;
00090
00091 for (i = 1; i < 64; ++i) {
00092 int j = scantable[i];
00093 int sign = block[j] >> 31;
00094 int level = (block[j] ^ sign) - sign;
00095 level = level * qmat[j] >> DNX10BIT_QMAT_SHIFT;
00096 block[j] = (level ^ sign) - sign;
00097 if (level)
00098 last_non_zero = i;
00099 }
00100
00101 return last_non_zero;
00102 }
00103
00104 static int dnxhd_init_vlc(DNXHDEncContext *ctx)
00105 {
00106 int i, j, level, run;
00107 int max_level = 1<<(ctx->cid_table->bit_depth+2);
00108
00109 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_codes, max_level*4*sizeof(*ctx->vlc_codes), fail);
00110 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_bits, max_level*4*sizeof(*ctx->vlc_bits) , fail);
00111 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_codes, 63*2, fail);
00112 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_bits, 63, fail);
00113
00114 ctx->vlc_codes += max_level*2;
00115 ctx->vlc_bits += max_level*2;
00116 for (level = -max_level; level < max_level; level++) {
00117 for (run = 0; run < 2; run++) {
00118 int index = (level<<1)|run;
00119 int sign, offset = 0, alevel = level;
00120
00121 MASK_ABS(sign, alevel);
00122 if (alevel > 64) {
00123 offset = (alevel-1)>>6;
00124 alevel -= offset<<6;
00125 }
00126 for (j = 0; j < 257; j++) {
00127 if (ctx->cid_table->ac_level[j] >> 1 == alevel &&
00128 (!offset || (ctx->cid_table->ac_flags[j] & 1) && offset) &&
00129 (!run || (ctx->cid_table->ac_flags[j] & 2) && run)) {
00130 assert(!ctx->vlc_codes[index]);
00131 if (alevel) {
00132 ctx->vlc_codes[index] = (ctx->cid_table->ac_codes[j]<<1)|(sign&1);
00133 ctx->vlc_bits [index] = ctx->cid_table->ac_bits[j]+1;
00134 } else {
00135 ctx->vlc_codes[index] = ctx->cid_table->ac_codes[j];
00136 ctx->vlc_bits [index] = ctx->cid_table->ac_bits [j];
00137 }
00138 break;
00139 }
00140 }
00141 assert(!alevel || j < 257);
00142 if (offset) {
00143 ctx->vlc_codes[index] = (ctx->vlc_codes[index]<<ctx->cid_table->index_bits)|offset;
00144 ctx->vlc_bits [index]+= ctx->cid_table->index_bits;
00145 }
00146 }
00147 }
00148 for (i = 0; i < 62; i++) {
00149 int run = ctx->cid_table->run[i];
00150 assert(run < 63);
00151 ctx->run_codes[run] = ctx->cid_table->run_codes[i];
00152 ctx->run_bits [run] = ctx->cid_table->run_bits[i];
00153 }
00154 return 0;
00155 fail:
00156 return -1;
00157 }
00158
00159 static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
00160 {
00161
00162 uint16_t weight_matrix[64] = {1,};
00163 int qscale, i;
00164 const uint8_t *luma_weight_table = ctx->cid_table->luma_weight;
00165 const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight;
00166
00167 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail);
00168 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail);
00169 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail);
00170 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail);
00171
00172 if (ctx->cid_table->bit_depth == 8) {
00173 for (i = 1; i < 64; i++) {
00174 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00175 weight_matrix[j] = ctx->cid_table->luma_weight[i];
00176 }
00177 ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix,
00178 ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
00179 for (i = 1; i < 64; i++) {
00180 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00181 weight_matrix[j] = ctx->cid_table->chroma_weight[i];
00182 }
00183 ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix,
00184 ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
00185
00186 for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
00187 for (i = 0; i < 64; i++) {
00188 ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2;
00189 ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2;
00190 ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2;
00191 }
00192 }
00193 } else {
00194
00195 for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
00196 for (i = 1; i < 64; i++) {
00197 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207 ctx->qmatrix_l[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * luma_weight_table[i]);
00208 ctx->qmatrix_c[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * chroma_weight_table[i]);
00209 }
00210 }
00211 }
00212
00213 ctx->m.q_chroma_intra_matrix16 = ctx->qmatrix_c16;
00214 ctx->m.q_chroma_intra_matrix = ctx->qmatrix_c;
00215 ctx->m.q_intra_matrix16 = ctx->qmatrix_l16;
00216 ctx->m.q_intra_matrix = ctx->qmatrix_l;
00217
00218 return 0;
00219 fail:
00220 return -1;
00221 }
00222
00223 static int dnxhd_init_rc(DNXHDEncContext *ctx)
00224 {
00225 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
00226 if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
00227 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
00228
00229 ctx->frame_bits = (ctx->cid_table->coding_unit_size - 640 - 4 - ctx->min_padding) * 8;
00230 ctx->qscale = 1;
00231 ctx->lambda = 2<<LAMBDA_FRAC_BITS;
00232 return 0;
00233 fail:
00234 return -1;
00235 }
00236
00237 static int dnxhd_encode_init(AVCodecContext *avctx)
00238 {
00239 DNXHDEncContext *ctx = avctx->priv_data;
00240 int i, index, bit_depth;
00241
00242 switch (avctx->pix_fmt) {
00243 case PIX_FMT_YUV422P:
00244 bit_depth = 8;
00245 break;
00246 case PIX_FMT_YUV422P10:
00247 bit_depth = 10;
00248 break;
00249 default:
00250 av_log(avctx, AV_LOG_ERROR, "pixel format is incompatible with DNxHD\n");
00251 return -1;
00252 }
00253
00254 ctx->cid = ff_dnxhd_find_cid(avctx, bit_depth);
00255 if (!ctx->cid) {
00256 av_log(avctx, AV_LOG_ERROR, "video parameters incompatible with DNxHD\n");
00257 return -1;
00258 }
00259 av_log(avctx, AV_LOG_DEBUG, "cid %d\n", ctx->cid);
00260
00261 index = ff_dnxhd_get_cid_table(ctx->cid);
00262 ctx->cid_table = &ff_dnxhd_cid_table[index];
00263
00264 ctx->m.avctx = avctx;
00265 ctx->m.mb_intra = 1;
00266 ctx->m.h263_aic = 1;
00267
00268 avctx->bits_per_raw_sample = ctx->cid_table->bit_depth;
00269
00270 ff_dsputil_init(&ctx->m.dsp, avctx);
00271 ff_dct_common_init(&ctx->m);
00272 if (!ctx->m.dct_quantize)
00273 ctx->m.dct_quantize = ff_dct_quantize_c;
00274
00275 if (ctx->cid_table->bit_depth == 10) {
00276 ctx->m.dct_quantize = dnxhd_10bit_dct_quantize;
00277 ctx->get_pixels_8x4_sym = dnxhd_10bit_get_pixels_8x4_sym;
00278 ctx->block_width_l2 = 4;
00279 } else {
00280 ctx->get_pixels_8x4_sym = dnxhd_8bit_get_pixels_8x4_sym;
00281 ctx->block_width_l2 = 3;
00282 }
00283
00284 #if HAVE_MMX
00285 ff_dnxhd_init_mmx(ctx);
00286 #endif
00287
00288 ctx->m.mb_height = (avctx->height + 15) / 16;
00289 ctx->m.mb_width = (avctx->width + 15) / 16;
00290
00291 if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
00292 ctx->interlaced = 1;
00293 ctx->m.mb_height /= 2;
00294 }
00295
00296 ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width;
00297
00298 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00299 ctx->m.intra_quant_bias = avctx->intra_quant_bias;
00300 if (dnxhd_init_qmat(ctx, ctx->m.intra_quant_bias, 0) < 0)
00301 return -1;
00302
00303
00304 if (ctx->nitris_compat)
00305 ctx->min_padding = 1600;
00306
00307 if (dnxhd_init_vlc(ctx) < 0)
00308 return -1;
00309 if (dnxhd_init_rc(ctx) < 0)
00310 return -1;
00311
00312 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t), fail);
00313 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_offs, ctx->m.mb_height*sizeof(uint32_t), fail);
00314 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail);
00315 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail);
00316
00317 ctx->frame.key_frame = 1;
00318 ctx->frame.pict_type = AV_PICTURE_TYPE_I;
00319 ctx->m.avctx->coded_frame = &ctx->frame;
00320
00321 if (avctx->thread_count > MAX_THREADS) {
00322 av_log(avctx, AV_LOG_ERROR, "too many threads\n");
00323 return -1;
00324 }
00325
00326 ctx->thread[0] = ctx;
00327 for (i = 1; i < avctx->thread_count; i++) {
00328 ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext));
00329 memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext));
00330 }
00331
00332 return 0;
00333 fail:
00334 return -1;
00335 }
00336
00337 static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
00338 {
00339 DNXHDEncContext *ctx = avctx->priv_data;
00340 const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
00341
00342 memset(buf, 0, 640);
00343
00344 memcpy(buf, header_prefix, 5);
00345 buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01;
00346 buf[6] = 0x80;
00347 buf[7] = 0xa0;
00348 AV_WB16(buf + 0x18, avctx->height>>ctx->interlaced);
00349 AV_WB16(buf + 0x1a, avctx->width);
00350 AV_WB16(buf + 0x1d, avctx->height>>ctx->interlaced);
00351
00352 buf[0x21] = ctx->cid_table->bit_depth == 10 ? 0x58 : 0x38;
00353 buf[0x22] = 0x88 + (ctx->interlaced<<2);
00354 AV_WB32(buf + 0x28, ctx->cid);
00355 buf[0x2c] = ctx->interlaced ? 0 : 0x80;
00356
00357 buf[0x5f] = 0x01;
00358
00359 buf[0x167] = 0x02;
00360 AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4);
00361 buf[0x16d] = ctx->m.mb_height;
00362 buf[0x16f] = 0x10;
00363
00364 ctx->msip = buf + 0x170;
00365 return 0;
00366 }
00367
00368 static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
00369 {
00370 int nbits;
00371 if (diff < 0) {
00372 nbits = av_log2_16bit(-2*diff);
00373 diff--;
00374 } else {
00375 nbits = av_log2_16bit(2*diff);
00376 }
00377 put_bits(&ctx->m.pb, ctx->cid_table->dc_bits[nbits] + nbits,
00378 (ctx->cid_table->dc_codes[nbits]<<nbits) + (diff & ((1 << nbits) - 1)));
00379 }
00380
00381 static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, DCTELEM *block, int last_index, int n)
00382 {
00383 int last_non_zero = 0;
00384 int slevel, i, j;
00385
00386 dnxhd_encode_dc(ctx, block[0] - ctx->m.last_dc[n]);
00387 ctx->m.last_dc[n] = block[0];
00388
00389 for (i = 1; i <= last_index; i++) {
00390 j = ctx->m.intra_scantable.permutated[i];
00391 slevel = block[j];
00392 if (slevel) {
00393 int run_level = i - last_non_zero - 1;
00394 int rlevel = (slevel<<1)|!!run_level;
00395 put_bits(&ctx->m.pb, ctx->vlc_bits[rlevel], ctx->vlc_codes[rlevel]);
00396 if (run_level)
00397 put_bits(&ctx->m.pb, ctx->run_bits[run_level], ctx->run_codes[run_level]);
00398 last_non_zero = i;
00399 }
00400 }
00401 put_bits(&ctx->m.pb, ctx->vlc_bits[0], ctx->vlc_codes[0]);
00402 }
00403
00404 static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, DCTELEM *block, int n, int qscale, int last_index)
00405 {
00406 const uint8_t *weight_matrix;
00407 int level;
00408 int i;
00409
00410 weight_matrix = (n&2) ? ctx->cid_table->chroma_weight : ctx->cid_table->luma_weight;
00411
00412 for (i = 1; i <= last_index; i++) {
00413 int j = ctx->m.intra_scantable.permutated[i];
00414 level = block[j];
00415 if (level) {
00416 if (level < 0) {
00417 level = (1-2*level) * qscale * weight_matrix[i];
00418 if (ctx->cid_table->bit_depth == 10) {
00419 if (weight_matrix[i] != 8)
00420 level += 8;
00421 level >>= 4;
00422 } else {
00423 if (weight_matrix[i] != 32)
00424 level += 32;
00425 level >>= 6;
00426 }
00427 level = -level;
00428 } else {
00429 level = (2*level+1) * qscale * weight_matrix[i];
00430 if (ctx->cid_table->bit_depth == 10) {
00431 if (weight_matrix[i] != 8)
00432 level += 8;
00433 level >>= 4;
00434 } else {
00435 if (weight_matrix[i] != 32)
00436 level += 32;
00437 level >>= 6;
00438 }
00439 }
00440 block[j] = level;
00441 }
00442 }
00443 }
00444
00445 static av_always_inline int dnxhd_ssd_block(DCTELEM *qblock, DCTELEM *block)
00446 {
00447 int score = 0;
00448 int i;
00449 for (i = 0; i < 64; i++)
00450 score += (block[i] - qblock[i]) * (block[i] - qblock[i]);
00451 return score;
00452 }
00453
00454 static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, DCTELEM *block, int last_index)
00455 {
00456 int last_non_zero = 0;
00457 int bits = 0;
00458 int i, j, level;
00459 for (i = 1; i <= last_index; i++) {
00460 j = ctx->m.intra_scantable.permutated[i];
00461 level = block[j];
00462 if (level) {
00463 int run_level = i - last_non_zero - 1;
00464 bits += ctx->vlc_bits[(level<<1)|!!run_level]+ctx->run_bits[run_level];
00465 last_non_zero = i;
00466 }
00467 }
00468 return bits;
00469 }
00470
00471 static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
00472 {
00473 const int bs = ctx->block_width_l2;
00474 const int bw = 1 << bs;
00475 const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << bs+1);
00476 const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs);
00477 const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs);
00478 DSPContext *dsp = &ctx->m.dsp;
00479
00480 dsp->get_pixels(ctx->blocks[0], ptr_y, ctx->m.linesize);
00481 dsp->get_pixels(ctx->blocks[1], ptr_y + bw, ctx->m.linesize);
00482 dsp->get_pixels(ctx->blocks[2], ptr_u, ctx->m.uvlinesize);
00483 dsp->get_pixels(ctx->blocks[3], ptr_v, ctx->m.uvlinesize);
00484
00485 if (mb_y+1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) {
00486 if (ctx->interlaced) {
00487 ctx->get_pixels_8x4_sym(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize);
00488 ctx->get_pixels_8x4_sym(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize);
00489 ctx->get_pixels_8x4_sym(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize);
00490 ctx->get_pixels_8x4_sym(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize);
00491 } else {
00492 dsp->clear_block(ctx->blocks[4]);
00493 dsp->clear_block(ctx->blocks[5]);
00494 dsp->clear_block(ctx->blocks[6]);
00495 dsp->clear_block(ctx->blocks[7]);
00496 }
00497 } else {
00498 dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize);
00499 dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize);
00500 dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize);
00501 dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize);
00502 }
00503 }
00504
00505 static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
00506 {
00507 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
00508 return component[i];
00509 }
00510
00511 static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00512 {
00513 DNXHDEncContext *ctx = avctx->priv_data;
00514 int mb_y = jobnr, mb_x;
00515 int qscale = ctx->qscale;
00516 LOCAL_ALIGNED_16(DCTELEM, block, [64]);
00517 ctx = ctx->thread[threadnr];
00518
00519 ctx->m.last_dc[0] =
00520 ctx->m.last_dc[1] =
00521 ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2);
00522
00523 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00524 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00525 int ssd = 0;
00526 int ac_bits = 0;
00527 int dc_bits = 0;
00528 int i;
00529
00530 dnxhd_get_blocks(ctx, mb_x, mb_y);
00531
00532 for (i = 0; i < 8; i++) {
00533 DCTELEM *src_block = ctx->blocks[i];
00534 int overflow, nbits, diff, last_index;
00535 int n = dnxhd_switch_matrix(ctx, i);
00536
00537 memcpy(block, src_block, 64*sizeof(*block));
00538 last_index = ctx->m.dct_quantize(&ctx->m, block, 4&(2*i), qscale, &overflow);
00539 ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index);
00540
00541 diff = block[0] - ctx->m.last_dc[n];
00542 if (diff < 0) nbits = av_log2_16bit(-2*diff);
00543 else nbits = av_log2_16bit( 2*diff);
00544
00545 assert(nbits < ctx->cid_table->bit_depth + 4);
00546 dc_bits += ctx->cid_table->dc_bits[nbits] + nbits;
00547
00548 ctx->m.last_dc[n] = block[0];
00549
00550 if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) {
00551 dnxhd_unquantize_c(ctx, block, i, qscale, last_index);
00552 ctx->m.dsp.idct(block);
00553 ssd += dnxhd_ssd_block(block, src_block);
00554 }
00555 }
00556 ctx->mb_rc[qscale][mb].ssd = ssd;
00557 ctx->mb_rc[qscale][mb].bits = ac_bits+dc_bits+12+8*ctx->vlc_bits[0];
00558 }
00559 return 0;
00560 }
00561
00562 static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00563 {
00564 DNXHDEncContext *ctx = avctx->priv_data;
00565 int mb_y = jobnr, mb_x;
00566 ctx = ctx->thread[threadnr];
00567 init_put_bits(&ctx->m.pb, (uint8_t *)arg + 640 + ctx->slice_offs[jobnr], ctx->slice_size[jobnr]);
00568
00569 ctx->m.last_dc[0] =
00570 ctx->m.last_dc[1] =
00571 ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2);
00572 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00573 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00574 int qscale = ctx->mb_qscale[mb];
00575 int i;
00576
00577 put_bits(&ctx->m.pb, 12, qscale<<1);
00578
00579 dnxhd_get_blocks(ctx, mb_x, mb_y);
00580
00581 for (i = 0; i < 8; i++) {
00582 DCTELEM *block = ctx->blocks[i];
00583 int overflow, n = dnxhd_switch_matrix(ctx, i);
00584 int last_index = ctx->m.dct_quantize(&ctx->m, block, 4&(2*i), qscale, &overflow);
00585
00586 dnxhd_encode_block(ctx, block, last_index, n);
00587
00588 }
00589 }
00590 if (put_bits_count(&ctx->m.pb)&31)
00591 put_bits(&ctx->m.pb, 32-(put_bits_count(&ctx->m.pb)&31), 0);
00592 flush_put_bits(&ctx->m.pb);
00593 return 0;
00594 }
00595
00596 static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
00597 {
00598 int mb_y, mb_x;
00599 int offset = 0;
00600 for (mb_y = 0; mb_y < ctx->m.mb_height; mb_y++) {
00601 int thread_size;
00602 ctx->slice_offs[mb_y] = offset;
00603 ctx->slice_size[mb_y] = 0;
00604 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00605 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00606 ctx->slice_size[mb_y] += ctx->mb_bits[mb];
00607 }
00608 ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31;
00609 ctx->slice_size[mb_y] >>= 3;
00610 thread_size = ctx->slice_size[mb_y];
00611 offset += thread_size;
00612 }
00613 }
00614
00615 static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00616 {
00617 DNXHDEncContext *ctx = avctx->priv_data;
00618 int mb_y = jobnr, mb_x;
00619 ctx = ctx->thread[threadnr];
00620 if (ctx->cid_table->bit_depth == 8) {
00621 uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize);
00622 for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) {
00623 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00624 int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
00625 int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)sum*sum)>>8)+128)>>8;
00626 ctx->mb_cmp[mb].value = varc;
00627 ctx->mb_cmp[mb].mb = mb;
00628 }
00629 } else {
00630 int const linesize = ctx->m.linesize >> 1;
00631 for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x) {
00632 uint16_t *pix = (uint16_t*)ctx->thread[0]->src[0] + ((mb_y << 4) * linesize) + (mb_x << 4);
00633 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00634 int sum = 0;
00635 int sqsum = 0;
00636 int mean, sqmean;
00637 int i, j;
00638
00639 for (i = 0; i < 16; ++i) {
00640 for (j = 0; j < 16; ++j) {
00641
00642 int const sample = (unsigned)pix[j] >> 6;
00643 sum += sample;
00644 sqsum += sample * sample;
00645
00646 }
00647 pix += linesize;
00648 }
00649 mean = sum >> 8;
00650 sqmean = sqsum >> 8;
00651 ctx->mb_cmp[mb].value = sqmean - mean * mean;
00652 ctx->mb_cmp[mb].mb = mb;
00653 }
00654 }
00655 return 0;
00656 }
00657
00658 static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
00659 {
00660 int lambda, up_step, down_step;
00661 int last_lower = INT_MAX, last_higher = 0;
00662 int x, y, q;
00663
00664 for (q = 1; q < avctx->qmax; q++) {
00665 ctx->qscale = q;
00666 avctx->execute2(avctx, dnxhd_calc_bits_thread, NULL, NULL, ctx->m.mb_height);
00667 }
00668 up_step = down_step = 2<<LAMBDA_FRAC_BITS;
00669 lambda = ctx->lambda;
00670
00671 for (;;) {
00672 int bits = 0;
00673 int end = 0;
00674 if (lambda == last_higher) {
00675 lambda++;
00676 end = 1;
00677 }
00678 for (y = 0; y < ctx->m.mb_height; y++) {
00679 for (x = 0; x < ctx->m.mb_width; x++) {
00680 unsigned min = UINT_MAX;
00681 int qscale = 1;
00682 int mb = y*ctx->m.mb_width+x;
00683 for (q = 1; q < avctx->qmax; q++) {
00684 unsigned score = ctx->mb_rc[q][mb].bits*lambda+
00685 ((unsigned)ctx->mb_rc[q][mb].ssd<<LAMBDA_FRAC_BITS);
00686 if (score < min) {
00687 min = score;
00688 qscale = q;
00689 }
00690 }
00691 bits += ctx->mb_rc[qscale][mb].bits;
00692 ctx->mb_qscale[mb] = qscale;
00693 ctx->mb_bits[mb] = ctx->mb_rc[qscale][mb].bits;
00694 }
00695 bits = (bits+31)&~31;
00696 if (bits > ctx->frame_bits)
00697 break;
00698 }
00699
00700
00701 if (end) {
00702 if (bits > ctx->frame_bits)
00703 return -1;
00704 break;
00705 }
00706 if (bits < ctx->frame_bits) {
00707 last_lower = FFMIN(lambda, last_lower);
00708 if (last_higher != 0)
00709 lambda = (lambda+last_higher)>>1;
00710 else
00711 lambda -= down_step;
00712 down_step = FFMIN((int64_t)down_step*5, INT_MAX);
00713 up_step = 1<<LAMBDA_FRAC_BITS;
00714 lambda = FFMAX(1, lambda);
00715 if (lambda == last_lower)
00716 break;
00717 } else {
00718 last_higher = FFMAX(lambda, last_higher);
00719 if (last_lower != INT_MAX)
00720 lambda = (lambda+last_lower)>>1;
00721 else if ((int64_t)lambda + up_step > INT_MAX)
00722 return -1;
00723 else
00724 lambda += up_step;
00725 up_step = FFMIN((int64_t)up_step*5, INT_MAX);
00726 down_step = 1<<LAMBDA_FRAC_BITS;
00727 }
00728 }
00729
00730 ctx->lambda = lambda;
00731 return 0;
00732 }
00733
00734 static int dnxhd_find_qscale(DNXHDEncContext *ctx)
00735 {
00736 int bits = 0;
00737 int up_step = 1;
00738 int down_step = 1;
00739 int last_higher = 0;
00740 int last_lower = INT_MAX;
00741 int qscale;
00742 int x, y;
00743
00744 qscale = ctx->qscale;
00745 for (;;) {
00746 bits = 0;
00747 ctx->qscale = qscale;
00748
00749 ctx->m.avctx->execute2(ctx->m.avctx, dnxhd_calc_bits_thread, NULL, NULL, ctx->m.mb_height);
00750 for (y = 0; y < ctx->m.mb_height; y++) {
00751 for (x = 0; x < ctx->m.mb_width; x++)
00752 bits += ctx->mb_rc[qscale][y*ctx->m.mb_width+x].bits;
00753 bits = (bits+31)&~31;
00754 if (bits > ctx->frame_bits)
00755 break;
00756 }
00757
00758
00759 if (bits < ctx->frame_bits) {
00760 if (qscale == 1)
00761 return 1;
00762 if (last_higher == qscale - 1) {
00763 qscale = last_higher;
00764 break;
00765 }
00766 last_lower = FFMIN(qscale, last_lower);
00767 if (last_higher != 0)
00768 qscale = (qscale+last_higher)>>1;
00769 else
00770 qscale -= down_step++;
00771 if (qscale < 1)
00772 qscale = 1;
00773 up_step = 1;
00774 } else {
00775 if (last_lower == qscale + 1)
00776 break;
00777 last_higher = FFMAX(qscale, last_higher);
00778 if (last_lower != INT_MAX)
00779 qscale = (qscale+last_lower)>>1;
00780 else
00781 qscale += up_step++;
00782 down_step = 1;
00783 if (qscale >= ctx->m.avctx->qmax)
00784 return -1;
00785 }
00786 }
00787
00788 ctx->qscale = qscale;
00789 return 0;
00790 }
00791
00792 #define BUCKET_BITS 8
00793 #define RADIX_PASSES 4
00794 #define NBUCKETS (1 << BUCKET_BITS)
00795
00796 static inline int get_bucket(int value, int shift)
00797 {
00798 value >>= shift;
00799 value &= NBUCKETS - 1;
00800 return NBUCKETS - 1 - value;
00801 }
00802
00803 static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
00804 {
00805 int i, j;
00806 memset(buckets, 0, sizeof(buckets[0][0]) * RADIX_PASSES * NBUCKETS);
00807 for (i = 0; i < size; i++) {
00808 int v = data[i].value;
00809 for (j = 0; j < RADIX_PASSES; j++) {
00810 buckets[j][get_bucket(v, 0)]++;
00811 v >>= BUCKET_BITS;
00812 }
00813 assert(!v);
00814 }
00815 for (j = 0; j < RADIX_PASSES; j++) {
00816 int offset = size;
00817 for (i = NBUCKETS - 1; i >= 0; i--)
00818 buckets[j][i] = offset -= buckets[j][i];
00819 assert(!buckets[j][0]);
00820 }
00821 }
00822
00823 static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
00824 {
00825 int shift = pass * BUCKET_BITS;
00826 int i;
00827 for (i = 0; i < size; i++) {
00828 int v = get_bucket(data[i].value, shift);
00829 int pos = buckets[v]++;
00830 dst[pos] = data[i];
00831 }
00832 }
00833
00834 static void radix_sort(RCCMPEntry *data, int size)
00835 {
00836 int buckets[RADIX_PASSES][NBUCKETS];
00837 RCCMPEntry *tmp = av_malloc(sizeof(*tmp) * size);
00838 radix_count(data, size, buckets);
00839 radix_sort_pass(tmp, data, size, buckets[0], 0);
00840 radix_sort_pass(data, tmp, size, buckets[1], 1);
00841 if (buckets[2][NBUCKETS - 1] || buckets[3][NBUCKETS - 1]) {
00842 radix_sort_pass(tmp, data, size, buckets[2], 2);
00843 radix_sort_pass(data, tmp, size, buckets[3], 3);
00844 }
00845 av_free(tmp);
00846 }
00847
00848 static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
00849 {
00850 int max_bits = 0;
00851 int ret, x, y;
00852 if ((ret = dnxhd_find_qscale(ctx)) < 0)
00853 return -1;
00854 for (y = 0; y < ctx->m.mb_height; y++) {
00855 for (x = 0; x < ctx->m.mb_width; x++) {
00856 int mb = y*ctx->m.mb_width+x;
00857 int delta_bits;
00858 ctx->mb_qscale[mb] = ctx->qscale;
00859 ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale][mb].bits;
00860 max_bits += ctx->mb_rc[ctx->qscale][mb].bits;
00861 if (!RC_VARIANCE) {
00862 delta_bits = ctx->mb_rc[ctx->qscale][mb].bits-ctx->mb_rc[ctx->qscale+1][mb].bits;
00863 ctx->mb_cmp[mb].mb = mb;
00864 ctx->mb_cmp[mb].value = delta_bits ?
00865 ((ctx->mb_rc[ctx->qscale][mb].ssd-ctx->mb_rc[ctx->qscale+1][mb].ssd)*100)/delta_bits
00866 : INT_MIN;
00867 }
00868 }
00869 max_bits += 31;
00870 }
00871 if (!ret) {
00872 if (RC_VARIANCE)
00873 avctx->execute2(avctx, dnxhd_mb_var_thread, NULL, NULL, ctx->m.mb_height);
00874 radix_sort(ctx->mb_cmp, ctx->m.mb_num);
00875 for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) {
00876 int mb = ctx->mb_cmp[x].mb;
00877 max_bits -= ctx->mb_rc[ctx->qscale][mb].bits - ctx->mb_rc[ctx->qscale+1][mb].bits;
00878 ctx->mb_qscale[mb] = ctx->qscale+1;
00879 ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale+1][mb].bits;
00880 }
00881 }
00882 return 0;
00883 }
00884
00885 static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
00886 {
00887 int i;
00888
00889 for (i = 0; i < 3; i++) {
00890 ctx->frame.data[i] = frame->data[i];
00891 ctx->frame.linesize[i] = frame->linesize[i];
00892 }
00893
00894 for (i = 0; i < ctx->m.avctx->thread_count; i++) {
00895 ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
00896 ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
00897 ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
00898 ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
00899 }
00900
00901 ctx->frame.interlaced_frame = frame->interlaced_frame;
00902 ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
00903 }
00904
00905 static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
00906 const AVFrame *frame, int *got_packet)
00907 {
00908 DNXHDEncContext *ctx = avctx->priv_data;
00909 int first_field = 1;
00910 int offset, i, ret;
00911 uint8_t *buf;
00912
00913 if ((ret = ff_alloc_packet2(avctx, pkt, ctx->cid_table->frame_size)) < 0)
00914 return ret;
00915 buf = pkt->data;
00916
00917 dnxhd_load_picture(ctx, frame);
00918
00919 encode_coding_unit:
00920 for (i = 0; i < 3; i++) {
00921 ctx->src[i] = ctx->frame.data[i];
00922 if (ctx->interlaced && ctx->cur_field)
00923 ctx->src[i] += ctx->frame.linesize[i];
00924 }
00925
00926 dnxhd_write_header(avctx, buf);
00927
00928 if (avctx->mb_decision == FF_MB_DECISION_RD)
00929 ret = dnxhd_encode_rdo(avctx, ctx);
00930 else
00931 ret = dnxhd_encode_fast(avctx, ctx);
00932 if (ret < 0) {
00933 av_log(avctx, AV_LOG_ERROR,
00934 "picture could not fit ratecontrol constraints, increase qmax\n");
00935 return -1;
00936 }
00937
00938 dnxhd_setup_threads_slices(ctx);
00939
00940 offset = 0;
00941 for (i = 0; i < ctx->m.mb_height; i++) {
00942 AV_WB32(ctx->msip + i * 4, offset);
00943 offset += ctx->slice_size[i];
00944 assert(!(ctx->slice_size[i] & 3));
00945 }
00946
00947 avctx->execute2(avctx, dnxhd_encode_thread, buf, NULL, ctx->m.mb_height);
00948
00949 assert(640 + offset + 4 <= ctx->cid_table->coding_unit_size);
00950 memset(buf + 640 + offset, 0, ctx->cid_table->coding_unit_size - 4 - offset - 640);
00951
00952 AV_WB32(buf + ctx->cid_table->coding_unit_size - 4, 0x600DC0DE);
00953
00954 if (ctx->interlaced && first_field) {
00955 first_field = 0;
00956 ctx->cur_field ^= 1;
00957 buf += ctx->cid_table->coding_unit_size;
00958 goto encode_coding_unit;
00959 }
00960
00961 ctx->frame.quality = ctx->qscale*FF_QP2LAMBDA;
00962
00963 pkt->flags |= AV_PKT_FLAG_KEY;
00964 *got_packet = 1;
00965 return 0;
00966 }
00967
00968 static int dnxhd_encode_end(AVCodecContext *avctx)
00969 {
00970 DNXHDEncContext *ctx = avctx->priv_data;
00971 int max_level = 1<<(ctx->cid_table->bit_depth+2);
00972 int i;
00973
00974 av_free(ctx->vlc_codes-max_level*2);
00975 av_free(ctx->vlc_bits -max_level*2);
00976 av_freep(&ctx->run_codes);
00977 av_freep(&ctx->run_bits);
00978
00979 av_freep(&ctx->mb_bits);
00980 av_freep(&ctx->mb_qscale);
00981 av_freep(&ctx->mb_rc);
00982 av_freep(&ctx->mb_cmp);
00983 av_freep(&ctx->slice_size);
00984 av_freep(&ctx->slice_offs);
00985
00986 av_freep(&ctx->qmatrix_c);
00987 av_freep(&ctx->qmatrix_l);
00988 av_freep(&ctx->qmatrix_c16);
00989 av_freep(&ctx->qmatrix_l16);
00990
00991 for (i = 1; i < avctx->thread_count; i++)
00992 av_freep(&ctx->thread[i]);
00993
00994 return 0;
00995 }
00996
00997 static const AVCodecDefault dnxhd_defaults[] = {
00998 { "qmax", "1024" },
00999 { NULL },
01000 };
01001
01002 AVCodec ff_dnxhd_encoder = {
01003 .name = "dnxhd",
01004 .type = AVMEDIA_TYPE_VIDEO,
01005 .id = CODEC_ID_DNXHD,
01006 .priv_data_size = sizeof(DNXHDEncContext),
01007 .init = dnxhd_encode_init,
01008 .encode2 = dnxhd_encode_picture,
01009 .close = dnxhd_encode_end,
01010 .capabilities = CODEC_CAP_SLICE_THREADS,
01011 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV422P,
01012 PIX_FMT_YUV422P10,
01013 PIX_FMT_NONE },
01014 .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
01015 .priv_class = &class,
01016 .defaults = dnxhd_defaults,
01017 };