27 #define RC_VARIANCE 1 // use variance or ssd for fast rc
38 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
39 #define DNX10BIT_QMAT_SHIFT 18 // The largest value that will not lead to overflow for 10bit samples.
53 #define LAMBDA_FRAC_BITS 10
58 for (i = 0; i < 4; i++) {
59 block[0] = pixels[0]; block[1] = pixels[1];
60 block[2] = pixels[2]; block[3] = pixels[3];
61 block[4] = pixels[4]; block[5] = pixels[5];
62 block[6] = pixels[6]; block[7] = pixels[7];
66 memcpy(block, block - 8,
sizeof(*block) * 8);
67 memcpy(block + 8, block - 16,
sizeof(*block) * 8);
68 memcpy(block + 16, block - 24,
sizeof(*block) * 8);
69 memcpy(block + 24, block - 32,
sizeof(*block) * 8);
75 const uint16_t*
pixels16 = (
const uint16_t*)pixels;
78 for (i = 0; i < 4; i++) {
79 block[0] = pixels16[0]; block[1] = pixels16[1];
80 block[2] = pixels16[2]; block[3] = pixels16[3];
81 block[4] = pixels16[4]; block[5] = pixels16[5];
82 block[6] = pixels16[6]; block[7] = pixels16[7];
83 pixels16 += line_size;
86 memcpy(block, block - 8,
sizeof(*block) * 8);
87 memcpy(block + 8, block - 16,
sizeof(*block) * 8);
88 memcpy(block + 16, block - 24,
sizeof(*block) * 8);
89 memcpy(block + 24, block - 32,
sizeof(*block) * 8);
93 int n,
int qscale,
int *overflow)
97 int last_non_zero = 0;
103 block[0] = (block[0] + 2) >> 2;
105 for (i = 1; i < 64; ++i) {
106 int j = scantable[i];
107 int sign = block[j] >> 31;
108 int level = (block[j] ^ sign) - sign;
110 block[j] = (level ^ sign) - sign;
115 return last_non_zero;
130 for (level = -max_level; level < max_level; level++) {
131 for (run = 0; run < 2; run++) {
132 int index = (level<<1)|run;
137 offset = (alevel-1)>>6;
140 for (j = 0; j < 257; j++) {
162 for (i = 0; i < 62; i++) {
176 uint16_t weight_matrix[64] = {1,};
187 for (i = 1; i < 64; i++) {
193 for (i = 1; i < 64; i++) {
200 for (qscale = 1; qscale <= ctx->
m.
avctx->
qmax; qscale++) {
201 for (i = 0; i < 64; i++) {
209 for (qscale = 1; qscale <= ctx->
m.
avctx->
qmax; qscale++) {
210 for (i = 1; i < 64; i++) {
254 int i,
index, bit_depth;
270 av_log(avctx,
AV_LOG_ERROR,
"video parameters incompatible with DNxHD. Valid DNxHD profiles:\n");
356 const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
360 memcpy(buf, header_prefix, 5);
380 ctx->
msip = buf + 0x170;
399 int last_non_zero = 0;
405 for (i = 1; i <= last_index; i++) {
409 int run_level = i - last_non_zero - 1;
410 int rlevel = (slevel<<1)|!!run_level;
428 for (i = 1; i <= last_index; i++) {
433 level = (1-2*
level) * qscale * weight_matrix[i];
435 if (weight_matrix[i] != 8)
439 if (weight_matrix[i] != 32)
445 level = (2*level+1) * qscale * weight_matrix[i];
447 if (weight_matrix[i] != 8)
451 if (weight_matrix[i] != 32)
465 for (i = 0; i < 64; i++)
466 score += (block[i] - qblock[i]) * (block[i] - qblock[i]);
472 int last_non_zero = 0;
475 for (i = 1; i <= last_index; i++) {
479 int run_level = i - last_non_zero - 1;
490 const int bw = 1 << bs;
523 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
530 int mb_y = jobnr, mb_x;
533 ctx = ctx->
thread[threadnr];
539 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
548 for (i = 0; i < 8; i++) {
549 int16_t *src_block = ctx->
blocks[i];
550 int overflow, nbits,
diff, last_index;
561 av_assert1(nbits < ctx->cid_table->bit_depth + 4);
581 int mb_y = jobnr, mb_x;
582 ctx = ctx->
thread[threadnr];
588 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
597 for (i = 0; i < 8; i++) {
600 int last_index = ctx->
m.
dct_quantize(&ctx->
m, block, 4&(2*i), qscale, &overflow);
616 for (mb_y = 0; mb_y < ctx->
m.
mb_height; mb_y++) {
620 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
627 offset += thread_size;
634 int mb_y = jobnr, mb_x, x,
y;
635 int partial_last_row = (mb_y == ctx->
m.
mb_height - 1) &&
638 ctx = ctx->
thread[threadnr];
641 for (mb_x = 0; mb_x < ctx->
m.
mb_width; ++mb_x, pix += 16) {
646 if (!partial_last_row && mb_x * 16 <= avctx->
width - 16) {
653 for (y = 0; y < bh; y++) {
654 for (x = 0; x < bw; x++) {
661 varc = (varc - (((unsigned)sum * sum) >> 8) + 128) >> 8;
667 int const linesize = ctx->
m.
linesize >> 1;
668 for (mb_x = 0; mb_x < ctx->
m.
mb_width; ++mb_x) {
669 uint16_t *pix = (uint16_t*)ctx->
thread[0]->
src[0] + ((mb_y << 4) * linesize) + (mb_x << 4);
676 for (i = 0; i < 16; ++i) {
677 for (j = 0; j < 16; ++j) {
679 int const sample = (unsigned)pix[j] >> 6;
697 int lambda, up_step, down_step;
698 int last_lower = INT_MAX, last_higher = 0;
701 for (q = 1; q < avctx->
qmax; q++) {
711 if (lambda == last_higher) {
717 unsigned min = UINT_MAX;
720 for (q = 1; q < avctx->
qmax; q++) {
732 bits = (bits+31)&~31;
743 if (bits < ctx->frame_bits) {
744 last_lower =
FFMIN(lambda, last_lower);
745 if (last_higher != 0)
746 lambda = (lambda+last_higher)>>1;
749 down_step =
FFMIN((int64_t)down_step*5, INT_MAX);
751 lambda =
FFMAX(1, lambda);
752 if (lambda == last_lower)
755 last_higher =
FFMAX(lambda, last_higher);
756 if (last_lower != INT_MAX)
757 lambda = (lambda+last_lower)>>1;
758 else if ((int64_t)lambda + up_step > INT_MAX)
762 up_step =
FFMIN((int64_t)up_step*5, INT_MAX);
777 int last_lower = INT_MAX;
790 bits = (bits+31)&~31;
796 if (bits < ctx->frame_bits) {
799 if (last_higher == qscale - 1) {
800 qscale = last_higher;
803 last_lower =
FFMIN(qscale, last_lower);
804 if (last_higher != 0)
805 qscale = (qscale+last_higher)>>1;
807 qscale -= down_step++;
812 if (last_lower == qscale + 1)
814 last_higher =
FFMAX(qscale, last_higher);
815 if (last_lower != INT_MAX)
816 qscale = (qscale+last_lower)>>1;
829 #define BUCKET_BITS 8
830 #define RADIX_PASSES 4
831 #define NBUCKETS (1 << BUCKET_BITS)
843 memset(buckets, 0,
sizeof(buckets[0][0]) *
RADIX_PASSES * NBUCKETS);
844 for (i = 0; i <
size; i++) {
854 for (i = NBUCKETS - 1; i >= 0; i--)
855 buckets[j][i] = offset -= buckets[j][i];
864 for (i = 0; i <
size; i++) {
866 int pos = buckets[
v]++;
926 for (i = 0; i < 3; i++) {
957 for (i = 0; i < 3; i++) {
971 "picture could not fit ratecontrol constraints, increase qmax\n");
995 goto encode_coding_unit;
1052 .priv_class = &
class,