Go to the documentation of this file.
45 #define DNX10BIT_QMAT_SHIFT 18
46 #define RC_VARIANCE 1 // use variance or ssd for fast rc
47 #define LAMBDA_FRAC_BITS 10
49 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
51 {
"nitris_compat",
"encode with Avid Nitris compatibility",
53 {
"ibias",
"intra quant bias",
55 { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
60 0, 0,
VE,
"profile" },
62 0, 0,
VE,
"profile" },
64 0, 0,
VE,
"profile" },
66 0, 0,
VE,
"profile" },
68 0, 0,
VE,
"profile" },
70 0, 0,
VE,
"profile" },
82 const uint8_t *pixels,
86 for (
i = 0;
i < 4;
i++) {
106 const uint8_t *pixels,
109 memcpy(
block + 0 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
110 memcpy(
block + 7 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
111 memcpy(
block + 1 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
112 memcpy(
block + 6 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
113 memcpy(
block + 2 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
114 memcpy(
block + 5 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
115 memcpy(
block + 3 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
116 memcpy(
block + 4 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
122 int i, j,
level, last_non_zero, start_i;
124 const uint8_t *scantable=
ctx->intra_scantable.scantable;
127 unsigned int threshold1, threshold2;
134 qmat = n < 4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
135 bias=
ctx->intra_quant_bias * (1 << (16 - 8));
136 threshold1 = (1 << 16) -
bias - 1;
137 threshold2 = (threshold1 << 1);
139 for (
i = 63;
i >= start_i;
i--) {
143 if (((
unsigned)(
level + threshold1)) > threshold2) {
151 for (
i = start_i;
i <= last_non_zero;
i++) {
155 if (((
unsigned)(
level + threshold1)) > threshold2) {
173 scantable, last_non_zero);
175 return last_non_zero;
181 const uint8_t *scantable=
ctx->intra_scantable.scantable;
182 const int *qmat = n<4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
183 int last_non_zero = 0;
191 for (
i = 1;
i < 64; ++
i) {
192 int j = scantable[
i];
204 scantable, last_non_zero);
206 return last_non_zero;
212 int max_level = 1 << (
ctx->bit_depth + 2);
219 ctx->vlc_codes =
ctx->orig_vlc_codes + max_level * 2;
220 ctx->vlc_bits =
ctx->orig_vlc_bits + max_level * 2;
228 offset = (alevel - 1) >> 6;
231 for (j = 0; j < 257; j++) {
232 if (
ctx->cid_table->ac_info[2*j+0] >> 1 == alevel &&
234 (!
run || (
ctx->cid_table->ac_info[2*j+1] & 2) &&
run)) {
238 (
ctx->cid_table->ac_codes[j] << 1) | (sign & 1);
239 ctx->vlc_bits[
index] =
ctx->cid_table->ac_bits[j] + 1;
241 ctx->vlc_codes[
index] =
ctx->cid_table->ac_codes[j];
251 ctx->vlc_bits[
index] +=
ctx->cid_table->index_bits;
255 for (
i = 0;
i < 62;
i++) {
256 int run =
ctx->cid_table->run[
i];
258 ctx->run_codes[
run] =
ctx->cid_table->run_codes[
i];
259 ctx->run_bits[
run] =
ctx->cid_table->run_bits[
i];
267 uint16_t weight_matrix[64] = { 1, };
269 const uint8_t *luma_weight_table =
ctx->cid_table->luma_weight;
270 const uint8_t *chroma_weight_table =
ctx->cid_table->chroma_weight;
278 if (
ctx->bit_depth == 8) {
279 for (
i = 1;
i < 64;
i++) {
281 weight_matrix[j] =
ctx->cid_table->luma_weight[
i];
284 weight_matrix,
ctx->intra_quant_bias, 1,
285 ctx->m.avctx->qmax, 1);
286 for (
i = 1;
i < 64;
i++) {
288 weight_matrix[j] =
ctx->cid_table->chroma_weight[
i];
291 weight_matrix,
ctx->intra_quant_bias, 1,
292 ctx->m.avctx->qmax, 1);
294 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
295 for (
i = 0;
i < 64;
i++) {
296 ctx->qmatrix_l[qscale][
i] <<= 2;
297 ctx->qmatrix_c[qscale][
i] <<= 2;
298 ctx->qmatrix_l16[qscale][0][
i] <<= 2;
299 ctx->qmatrix_l16[qscale][1][
i] <<= 2;
300 ctx->qmatrix_c16[qscale][0][
i] <<= 2;
301 ctx->qmatrix_c16[qscale][1][
i] <<= 2;
306 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
307 for (
i = 1;
i < 64;
i++) {
322 (qscale * luma_weight_table[
i]);
324 (qscale * chroma_weight_table[
i]);
329 ctx->m.q_chroma_intra_matrix16 =
ctx->qmatrix_c16;
330 ctx->m.q_chroma_intra_matrix =
ctx->qmatrix_c;
331 ctx->m.q_intra_matrix16 =
ctx->qmatrix_l16;
332 ctx->m.q_intra_matrix =
ctx->qmatrix_l;
347 ctx->frame_bits = (
ctx->coding_unit_size -
348 ctx->data_offset - 4 -
ctx->min_padding) * 8;
375 "pixel format is incompatible with DNxHD profile\n");
381 "pixel format is incompatible with DNxHR HQX profile\n");
389 "pixel format is incompatible with DNxHR LB/SQ/HQ profile\n");
398 "video parameters incompatible with DNxHD. Valid DNxHD profiles:\n");
404 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
409 "Input dimensions too small, input must be at least 256x120\n");
432 if (!
ctx->m.dct_quantize)
438 ctx->block_width_l2 = 4;
439 }
else if (
ctx->bit_depth == 10) {
442 ctx->block_width_l2 = 4;
445 ctx->block_width_l2 = 3;
457 ctx->m.mb_height /= 2;
462 "Interlaced encoding is not supported for DNxHR profiles.\n");
466 ctx->m.mb_num =
ctx->m.mb_height *
ctx->m.mb_width;
472 ctx->coding_unit_size =
ctx->frame_size;
474 ctx->frame_size =
ctx->cid_table->frame_size;
475 ctx->coding_unit_size =
ctx->cid_table->coding_unit_size;
478 if (
ctx->m.mb_height > 68)
479 ctx->data_offset = 0x170 + (
ctx->m.mb_height << 2);
481 ctx->data_offset = 0x280;
489 if (
ctx->nitris_compat)
490 ctx->min_padding = 1600;
531 memset(
buf, 0,
ctx->data_offset);
535 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
540 buf[5] =
ctx->interlaced ?
ctx->cur_field + 2 : 0x01;
547 buf[0x21] =
ctx->bit_depth == 10 ? 0x58 : 0x38;
548 buf[0x22] = 0x88 + (
ctx->interlaced << 2);
573 (
ctx->cid_table->dc_codes[nbits] << nbits) +
579 int last_index,
int n)
581 int last_non_zero = 0;
587 for (
i = 1;
i <= last_index;
i++) {
588 j =
ctx->m.intra_scantable.permutated[
i];
591 int run_level =
i - last_non_zero - 1;
592 int rlevel = slevel * (1 << 1) | !!run_level;
596 ctx->run_codes[run_level]);
605 int qscale,
int last_index)
607 const uint8_t *weight_matrix;
612 weight_matrix = ((n % 6) < 2) ?
ctx->cid_table->luma_weight
613 :
ctx->cid_table->chroma_weight;
615 weight_matrix = (n & 2) ?
ctx->cid_table->chroma_weight
616 :
ctx->cid_table->luma_weight;
619 for (
i = 1;
i <= last_index;
i++) {
620 int j =
ctx->m.intra_scantable.permutated[
i];
624 level = (1 - 2 *
level) * qscale * weight_matrix[
i];
625 if (
ctx->bit_depth == 10) {
626 if (weight_matrix[
i] != 8)
630 if (weight_matrix[
i] != 32)
636 level = (2 *
level + 1) * qscale * weight_matrix[
i];
637 if (
ctx->bit_depth == 10) {
638 if (weight_matrix[
i] != 8)
642 if (weight_matrix[
i] != 32)
656 for (
i = 0;
i < 64;
i++)
664 int last_non_zero = 0;
667 for (
i = 1;
i <= last_index;
i++) {
668 j =
ctx->m.intra_scantable.permutated[
i];
671 int run_level =
i - last_non_zero - 1;
673 !!run_level] +
ctx->run_bits[run_level];
683 const int bs =
ctx->block_width_l2;
684 const int bw = 1 << bs;
685 int dct_y_offset =
ctx->dct_y_offset;
686 int dct_uv_offset =
ctx->dct_uv_offset;
687 int linesize =
ctx->m.linesize;
688 int uvlinesize =
ctx->m.uvlinesize;
689 const uint8_t *ptr_y =
ctx->thread[0]->src[0] +
690 ((mb_y << 4) *
ctx->m.linesize) + (mb_x << bs + 1);
691 const uint8_t *ptr_u =
ctx->thread[0]->src[1] +
692 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
693 const uint8_t *ptr_v =
ctx->thread[0]->src[2] +
694 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
699 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
700 int y_w =
ctx->m.avctx->width - (mb_x << 4);
701 int y_h =
ctx->m.avctx->height - (mb_y << 4);
702 int uv_w = (y_w + 1) / 2;
708 linesize,
ctx->m.linesize,
712 uvlinesize,
ctx->m.uvlinesize,
716 uvlinesize,
ctx->m.uvlinesize,
720 dct_y_offset = bw * linesize;
721 dct_uv_offset = bw * uvlinesize;
722 ptr_y = &
ctx->edge_buf_y[0];
723 ptr_u = &
ctx->edge_buf_uv[0][0];
724 ptr_v = &
ctx->edge_buf_uv[1][0];
726 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
727 int y_w =
ctx->m.avctx->width - (mb_x << 4);
728 int y_h =
ctx->m.avctx->height - (mb_y << 4);
729 int uv_w =
ctx->is_444 ? y_w : (y_w + 1) / 2;
732 uvlinesize = 16 + 16 *
ctx->is_444;
735 linesize,
ctx->m.linesize,
739 uvlinesize,
ctx->m.uvlinesize,
743 uvlinesize,
ctx->m.uvlinesize,
747 dct_y_offset = bw * linesize / 2;
748 dct_uv_offset = bw * uvlinesize / 2;
749 ptr_y = &
ctx->edge_buf_y[0];
750 ptr_u = &
ctx->edge_buf_uv[0][0];
751 ptr_v = &
ctx->edge_buf_uv[1][0];
760 if (mb_y + 1 ==
ctx->m.mb_height &&
ctx->m.avctx->height == 1080) {
761 if (
ctx->interlaced) {
762 ctx->get_pixels_8x4_sym(
ctx->blocks[4],
763 ptr_y + dct_y_offset,
765 ctx->get_pixels_8x4_sym(
ctx->blocks[5],
766 ptr_y + dct_y_offset + bw,
768 ctx->get_pixels_8x4_sym(
ctx->blocks[6],
769 ptr_u + dct_uv_offset,
771 ctx->get_pixels_8x4_sym(
ctx->blocks[7],
772 ptr_v + dct_uv_offset,
775 ctx->bdsp.clear_block(
ctx->blocks[4]);
776 ctx->bdsp.clear_block(
ctx->blocks[5]);
777 ctx->bdsp.clear_block(
ctx->blocks[6]);
778 ctx->bdsp.clear_block(
ctx->blocks[7]);
782 ptr_y + dct_y_offset, linesize);
784 ptr_y + dct_y_offset + bw, linesize);
786 ptr_u + dct_uv_offset, uvlinesize);
788 ptr_v + dct_uv_offset, uvlinesize);
793 pdsp->
get_pixels(
ctx->blocks[6], ptr_y + dct_y_offset, linesize);
794 pdsp->
get_pixels(
ctx->blocks[7], ptr_y + dct_y_offset + bw, linesize);
798 pdsp->
get_pixels(
ctx->blocks[8], ptr_u + dct_uv_offset, uvlinesize);
799 pdsp->
get_pixels(
ctx->blocks[9], ptr_u + dct_uv_offset + bw, uvlinesize);
803 pdsp->
get_pixels(
ctx->blocks[10], ptr_v + dct_uv_offset, uvlinesize);
804 pdsp->
get_pixels(
ctx->blocks[11], ptr_v + dct_uv_offset + bw, uvlinesize);
816 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
823 int jobnr,
int threadnr)
826 int mb_y = jobnr, mb_x;
827 int qscale =
ctx->qscale;
829 ctx =
ctx->thread[threadnr];
833 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
835 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
836 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
844 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
845 int16_t *src_block =
ctx->blocks[
i];
849 memcpy(
block, src_block, 64 *
sizeof(*
block));
851 ctx->is_444 ? 4 * (n > 0): 4 & (2*
i),
862 dc_bits +=
ctx->cid_table->dc_bits[nbits] + nbits;
872 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].ssd = ssd;
873 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].
bits = ac_bits + dc_bits + 12 +
874 (1 +
ctx->is_444) * 8 *
ctx->vlc_bits[0];
880 int jobnr,
int threadnr)
883 int mb_y = jobnr, mb_x;
884 ctx =
ctx->thread[threadnr];
886 ctx->slice_size[jobnr]);
890 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
891 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
892 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
893 int qscale =
ctx->mb_qscale[
mb];
901 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
904 int last_index =
ctx->m.dct_quantize(&
ctx->m,
block,
905 ctx->is_444 ? (((
i >> 1) % 3) < 1 ? 0 : 4): 4 & (2*
i),
922 for (mb_y = 0; mb_y <
ctx->m.mb_height; mb_y++) {
925 ctx->slice_size[mb_y] = 0;
926 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
927 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
928 ctx->slice_size[mb_y] +=
ctx->mb_bits[
mb];
930 ctx->slice_size[mb_y] = (
ctx->slice_size[mb_y] + 31
U) & ~31
U;
931 ctx->slice_size[mb_y] >>= 3;
932 thread_size =
ctx->slice_size[mb_y];
938 int jobnr,
int threadnr)
941 int mb_y = jobnr, mb_x, x, y;
942 int partial_last_row = (mb_y ==
ctx->m.mb_height - 1) &&
945 ctx =
ctx->thread[threadnr];
946 if (
ctx->bit_depth == 8) {
947 const uint8_t *pix =
ctx->thread[0]->src[0] + ((mb_y << 4) *
ctx->m.linesize);
948 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x, pix += 16) {
949 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
954 sum =
ctx->m.mpvencdsp.pix_sum(pix,
ctx->m.linesize);
955 varc =
ctx->m.mpvencdsp.pix_norm1(pix,
ctx->m.linesize);
960 for (y = 0; y < bh; y++) {
961 for (x = 0; x < bw; x++) {
962 uint8_t
val = pix[x + y *
ctx->m.linesize];
968 varc = (varc - (((unsigned) sum * sum) >> 8) + 128) >> 8;
970 ctx->mb_cmp[
mb].value = varc;
974 const int linesize =
ctx->m.linesize >> 1;
975 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x) {
976 const uint16_t *pix = (
const uint16_t *)
ctx->thread[0]->src[0] +
977 ((mb_y << 4) * linesize) + (mb_x << 4);
978 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
986 for (
i = 0;
i < bh; ++
i) {
987 for (j = 0; j < bw; ++j) {
989 const int sample = (unsigned) pix[j] >> 6;
1007 int lambda, up_step, down_step;
1008 int last_lower = INT_MAX, last_higher = 0;
1017 lambda =
ctx->lambda;
1022 if (lambda == last_higher) {
1026 for (y = 0; y <
ctx->m.mb_height; y++) {
1027 for (x = 0; x <
ctx->m.mb_width; x++) {
1028 unsigned min = UINT_MAX;
1030 int mb = y *
ctx->m.mb_width + x;
1033 int i = (q*
ctx->m.mb_num) +
mb;
1034 unsigned score =
ctx->mb_rc[
i].bits * lambda +
1043 ctx->mb_qscale[
mb] = qscale;
1044 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1055 if (bits < ctx->frame_bits) {
1056 last_lower =
FFMIN(lambda, last_lower);
1057 if (last_higher != 0)
1058 lambda = (lambda+last_higher)>>1;
1060 lambda -= down_step;
1061 down_step =
FFMIN((int64_t)down_step*5, INT_MAX);
1063 lambda =
FFMAX(1, lambda);
1064 if (lambda == last_lower)
1067 last_higher =
FFMAX(lambda, last_higher);
1068 if (last_lower != INT_MAX)
1069 lambda = (lambda+last_lower)>>1;
1070 else if ((int64_t)lambda + up_step > INT_MAX)
1074 up_step =
FFMIN((int64_t)up_step*5, INT_MAX);
1078 ctx->lambda = lambda;
1087 int last_higher = 0;
1088 int last_lower = INT_MAX;
1092 qscale =
ctx->qscale;
1095 ctx->qscale = qscale;
1099 for (y = 0; y <
ctx->m.mb_height; y++) {
1100 for (x = 0; x <
ctx->m.mb_width; x++)
1106 if (bits < ctx->frame_bits) {
1109 if (last_higher == qscale - 1) {
1110 qscale = last_higher;
1113 last_lower =
FFMIN(qscale, last_lower);
1114 if (last_higher != 0)
1115 qscale = (qscale + last_higher) >> 1;
1117 qscale -= down_step++;
1122 if (last_lower == qscale + 1)
1124 last_higher =
FFMAX(qscale, last_higher);
1125 if (last_lower != INT_MAX)
1126 qscale = (qscale + last_lower) >> 1;
1128 qscale += up_step++;
1130 if (qscale >=
ctx->m.avctx->qmax)
1134 ctx->qscale = qscale;
1138 #define BUCKET_BITS 8
1139 #define RADIX_PASSES 4
1140 #define NBUCKETS (1 << BUCKET_BITS)
1155 int v =
data[
i].value;
1165 buckets[j][
i] =
offset -= buckets[j][
i];
1177 int pos = buckets[v]++;
1200 for (y = 0; y <
ctx->m.mb_height; y++) {
1201 for (x = 0; x <
ctx->m.mb_width; x++) {
1202 int mb = y *
ctx->m.mb_width + x;
1203 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1206 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1207 max_bits +=
ctx->mb_rc[rc].bits;
1209 delta_bits =
ctx->mb_rc[rc].bits -
1210 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1212 ctx->mb_cmp[
mb].value =
1213 delta_bits ? ((
ctx->mb_rc[rc].ssd -
1214 ctx->mb_rc[rc +
ctx->m.mb_num].ssd) * 100) /
1227 for (x = 0; x <
ctx->m.mb_num && max_bits >
ctx->frame_bits; x++) {
1228 int mb =
ctx->mb_cmp[x].mb;
1229 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1230 max_bits -=
ctx->mb_rc[rc].bits -
1231 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1232 if (
ctx->mb_qscale[
mb] < 255)
1233 ctx->mb_qscale[
mb]++;
1234 ctx->mb_bits[
mb] =
ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1237 if (max_bits >
ctx->frame_bits)
1247 for (
i = 0;
i <
ctx->m.avctx->thread_count;
i++) {
1250 ctx->thread[
i]->dct_y_offset =
ctx->m.linesize *8;
1251 ctx->thread[
i]->dct_uv_offset =
ctx->m.uvlinesize*8;
1273 for (
i = 0;
i < 3;
i++) {
1275 if (
ctx->interlaced &&
ctx->cur_field)
1287 "picture could not fit ratecontrol constraints, increase qmax\n");
1294 for (
i = 0;
i <
ctx->m.mb_height;
i++) {
1304 ctx->coding_unit_size - 4 -
offset -
ctx->data_offset);
1310 ctx->cur_field ^= 1;
1311 buf +=
ctx->coding_unit_size;
1312 goto encode_coding_unit;
1344 if (
ctx->thread[1]) {
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
static const AVOption options[]
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, int16_t *block, int last_index, int n)
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
int av_log2_16bit(unsigned v)
static void dnxhd_8bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define AV_PROFILE_DNXHR_444
#define DNX10BIT_QMAT_SHIFT
#define MASK_ABS(mask, level)
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
int mb_decision
macroblock decision mode
int qmax
maximum quantizer
static const FFCodecDefault dnxhd_defaults[]
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
AVCodec p
The public AVCodec.
static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
#define AV_PIX_FMT_GBRP10
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
void ff_dnxhd_print_profiles(AVCodecContext *avctx, int loglevel)
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
#define AV_PROFILE_DNXHR_SQ
#define FF_CODEC_ENCODE_CB(func)
static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
static int put_bytes_left(const PutBitContext *s, int round_up)
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
#define AV_PIX_FMT_YUV444P10
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PROFILE_DNXHR_LB
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
#define AV_PROFILE_DNXHR_HQ
av_cold void ff_blockdsp_init(BlockDSPContext *c)
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
int ff_dnxhd_get_hr_frame_size(int cid, int w, int h)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define LOCAL_ALIGNED_16(t, v,...)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_PROFILE_DNXHR_HQX
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
#define CODEC_LONG_NAME(str)
const AVProfile ff_dnxhd_profiles[]
static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static int bias(int x, int c)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
#define DNXHD_VARIABLE
Indicate that a CIDEntry value must be read in the bitstream.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
AVIOContext * pb
I/O context.
static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
static const AVClass dnxhd_class
#define AV_PIX_FMT_YUV422P10
static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx)
static int shift(int a, int b)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_ssd_block(int16_t *qblock, int16_t *block)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int dnxhd_find_qscale(DNXHDEncContext *ctx)
const CIDEntry * ff_dnxhd_get_cid_table(int cid)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
const FFCodec ff_dnxhd_encoder
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
const uint8_t ff_zigzag_direct[64]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static int get_bucket(int value, int shift)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
static float mean(const float *input, int size)
#define FF_MB_DECISION_RD
rate distortion
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Undefined Behavior In the C some operations are like signed integer overflow
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
#define MKTAG(a, b, c, d)
static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static int first_field(const struct video_data *s)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n, int qscale, int last_index)