44 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
89 int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
90 ((height >> log2_min_cb_size) + 1);
144 uint8_t luma_weight_l0_flag[16];
145 uint8_t chroma_weight_l0_flag[16];
146 uint8_t luma_weight_l1_flag[16];
147 uint8_t chroma_weight_l1_flag[16];
148 int luma_log2_weight_denom;
151 if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7)
161 if (!luma_weight_l0_flag[i]) {
168 chroma_weight_l0_flag[i] =
get_bits1(gb);
171 chroma_weight_l0_flag[i] = 0;
174 if (luma_weight_l0_flag[i]) {
179 if (chroma_weight_l0_flag[i]) {
180 for (j = 0; j < 2; j++) {
197 if (!luma_weight_l1_flag[i]) {
204 chroma_weight_l1_flag[i] =
get_bits1(gb);
207 chroma_weight_l1_flag[i] = 0;
210 if (luma_weight_l1_flag[i]) {
215 if (chroma_weight_l1_flag[i]) {
216 for (j = 0; j < 2; j++) {
237 int prev_delta_msb = 0;
238 unsigned int nb_sps = 0, nb_sh;
254 for (i = 0; i < rps->
nb_refs; i++) {
271 if (delta_poc_msb_present) {
274 if (i && i != nb_sps)
275 delta += prev_delta_msb;
278 prev_delta_msb =
delta;
289 unsigned int num = 0, den = 0;
326 if (num != 0 && den != 0)
333 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + CONFIG_HEVC_D3D11VA_HWACCEL + CONFIG_HEVC_VAAPI_HWACCEL + CONFIG_HEVC_VDPAU_HWACCEL)
351 #if CONFIG_HEVC_DXVA2_HWACCEL
354 #if CONFIG_HEVC_D3D11VA_HWACCEL
357 #if CONFIG_HEVC_VAAPI_HWACCEL
360 #if CONFIG_HEVC_VDPAU_HWACCEL
382 for (i = 0; i < 3; i++) {
391 for(c_idx = 0; c_idx < c_count; c_idx++) {
466 int slice_address_length;
476 "Invalid slice segment address: %u.\n",
525 "Ignoring POC change between slices: %d -> %d\n", s->
poc, poc);
541 int numbits, rps_idx;
549 rps_idx = numbits > 0 ?
get_bits(gb, numbits) : 0;
655 "Invalid collocated_ref_idx: %d.\n",
670 "Invalid number of merging MVP candidates: %d.\n",
692 int deblocking_filter_override_flag = 0;
695 deblocking_filter_override_flag =
get_bits1(gb);
697 if (deblocking_filter_override_flag) {
740 if (offset_len < 1 || offset_len > 32) {
776 for (i = 0; i <
length; i++)
785 "The slice_qp %d is outside the valid range "
819 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
821 #define SET_SAO(elem, value) \
823 if (!sao_merge_up_flag && !sao_merge_left_flag) \
825 else if (sao_merge_left_flag) \
826 sao->elem = CTB(s->sao, rx-1, ry).elem; \
827 else if (sao_merge_up_flag) \
828 sao->elem = CTB(s->sao, rx, ry-1).elem; \
836 int sao_merge_left_flag = 0;
837 int sao_merge_up_flag = 0;
847 if (ry > 0 && !sao_merge_left_flag) {
872 for (i = 0; i < 4; i++)
876 for (i = 0; i < 4; i++) {
885 }
else if (c_idx != 2) {
891 for (i = 0; i < 4; i++) {
899 sao->
offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
911 if (log2_res_scale_abs_plus1 != 0) {
914 (1 - 2 * res_scale_sign_flag);
924 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
925 int log2_cb_size,
int log2_trafo_size,
926 int blk_idx,
int cbf_luma,
int *cbf_cb,
int *cbf_cr)
929 const int log2_trafo_size_c = log2_trafo_size - s->
ps.
sps->
hshift[1];
933 int trafo_size = 1 << log2_trafo_size;
939 if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
943 int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
945 (cbf_cb[1] || cbf_cr[1]));
957 "The cu_qp_delta %d is outside the valid range "
971 if (cu_chroma_qp_offset_flag) {
972 int cu_chroma_qp_offset_idx = 0;
976 "cu_chroma_qp_offset_idx not yet tested.\n");
1010 int trafo_size_h = 1 << (log2_trafo_size_c + s->
ps.
sps->
hshift[1]);
1011 int trafo_size_v = 1 << (log2_trafo_size_c + s->
ps.
sps->
vshift[1]);
1022 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (i << log2_trafo_size_c), 1);
1026 log2_trafo_size_c, scan_idx_c, 1);
1034 int size = 1 << log2_trafo_size_c;
1038 for (i = 0; i < (size *
size); i++) {
1051 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (i << log2_trafo_size_c), 2);
1055 log2_trafo_size_c, scan_idx_c, 2);
1063 int size = 1 << log2_trafo_size_c;
1067 for (i = 0; i < (size *
size); i++) {
1074 int trafo_size_h = 1 << (log2_trafo_size + 1);
1075 int trafo_size_v = 1 << (log2_trafo_size + s->
ps.
sps->
vshift[1]);
1079 trafo_size_h, trafo_size_v);
1080 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (i << log2_trafo_size), 1);
1084 log2_trafo_size, scan_idx_c, 1);
1089 trafo_size_h, trafo_size_v);
1090 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (i << log2_trafo_size), 2);
1094 log2_trafo_size, scan_idx_c, 2);
1099 int trafo_size_h = 1 << (log2_trafo_size_c + s->
ps.
sps->
hshift[1]);
1100 int trafo_size_v = 1 << (log2_trafo_size_c + s->
ps.
sps->
vshift[1]);
1106 trafo_size_h, trafo_size_v);
1107 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 1);
1108 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 2);
1110 }
else if (blk_idx == 3) {
1111 int trafo_size_h = 1 << (log2_trafo_size + 1);
1112 int trafo_size_v = 1 << (log2_trafo_size + s->
ps.
sps->
vshift[1]);
1114 trafo_size_h, trafo_size_v);
1119 trafo_size_h, trafo_size_v);
1120 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1121 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1131 int cb_size = 1 << log2_cb_size;
1139 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1140 for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1141 s->
is_pcm[i + j * min_pu_width] = 2;
1145 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1146 int log2_cb_size,
int log2_trafo_size,
1147 int trafo_depth,
int blk_idx,
1148 const int *base_cbf_cb,
const int *base_cbf_cr)
1156 cbf_cb[0] = base_cbf_cb[0];
1157 cbf_cb[1] = base_cbf_cb[1];
1158 cbf_cr[0] = base_cbf_cr[0];
1159 cbf_cr[1] = base_cbf_cr[1];
1162 if (trafo_depth == 1) {
1178 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1180 trafo_depth < lc->cu.max_trafo_depth &&
1195 if (trafo_depth == 0 || cbf_cb[0]) {
1202 if (trafo_depth == 0 || cbf_cr[0]) {
1210 if (split_transform_flag) {
1211 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1212 const int x1 = x0 + trafo_size_split;
1213 const int y1 = y0 + trafo_size_split;
1215 #define SUBDIVIDE(x, y, idx) \
1217 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1218 log2_trafo_size - 1, trafo_depth + 1, idx, \
1237 cbf_cb[0] || cbf_cr[0] ||
1243 log2_cb_size, log2_trafo_size,
1244 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1250 for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1251 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1252 int x_tu = (x0 + j) >> log2_min_tu_size;
1253 int y_tu = (y0 + i) >> log2_min_tu_size;
1254 s->
cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1271 int cb_size = 1 << log2_cb_size;
1326 int block_w,
int block_h,
int luma_weight,
int luma_offset)
1330 ptrdiff_t srcstride = ref->
linesize[0];
1339 x_off += mv->
x >> 2;
1340 y_off += mv->
y >> 2;
1351 edge_emu_stride, srcstride,
1355 pic_width, pic_height);
1357 srcstride = edge_emu_stride;
1362 block_h, mx, my, block_w);
1366 luma_weight, luma_offset, mx, my, block_w);
1386 AVFrame *ref0,
const Mv *mv0,
int x_off,
int y_off,
1387 int block_w,
int block_h,
AVFrame *ref1,
const Mv *mv1,
struct MvField *current_mv)
1390 ptrdiff_t src0stride = ref0->
linesize[0];
1391 ptrdiff_t src1stride = ref1->
linesize[0];
1394 int mx0 = mv0->
x & 3;
1395 int my0 = mv0->
y & 3;
1396 int mx1 = mv1->
x & 3;
1397 int my1 = mv1->
y & 3;
1400 int x_off0 = x_off + (mv0->
x >> 2);
1401 int y_off0 = y_off + (mv0->
y >> 2);
1402 int x_off1 = x_off + (mv1->
x >> 2);
1403 int y_off1 = y_off + (mv1->
y >> 2);
1417 edge_emu_stride, src0stride,
1421 pic_width, pic_height);
1423 src0stride = edge_emu_stride;
1434 edge_emu_stride, src1stride,
1438 pic_width, pic_height);
1440 src1stride = edge_emu_stride;
1444 block_h, mx0, my0, block_w);
1447 block_h, mx1, my1, block_w);
1477 ptrdiff_t dststride,
uint8_t *
src0, ptrdiff_t srcstride,
int reflist,
1478 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int chroma_weight,
int chroma_offset)
1483 const Mv *
mv = ¤t_mv->
mv[reflist];
1489 intptr_t mx = av_mod_uintp2(mv->
x, 2 + hshift);
1490 intptr_t my = av_mod_uintp2(mv->
y, 2 + vshift);
1491 intptr_t _mx = mx << (1 - hshift);
1492 intptr_t _my = my << (1 - vshift);
1494 x_off += mv->
x >> (2 + hshift);
1495 y_off += mv->
y >> (2 + vshift);
1506 edge_emu_stride, srcstride,
1510 pic_width, pic_height);
1513 srcstride = edge_emu_stride;
1517 block_h, _mx, _my, block_w);
1521 chroma_weight, chroma_offset, _mx, _my, block_w);
1542 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int cidx)
1547 ptrdiff_t src1stride = ref0->
linesize[cidx+1];
1548 ptrdiff_t src2stride = ref1->
linesize[cidx+1];
1553 Mv *mv0 = ¤t_mv->
mv[0];
1554 Mv *mv1 = ¤t_mv->
mv[1];
1558 intptr_t mx0 = av_mod_uintp2(mv0->
x, 2 + hshift);
1559 intptr_t my0 = av_mod_uintp2(mv0->
y, 2 + vshift);
1560 intptr_t mx1 = av_mod_uintp2(mv1->
x, 2 + hshift);
1561 intptr_t my1 = av_mod_uintp2(mv1->
y, 2 + vshift);
1562 intptr_t _mx0 = mx0 << (1 - hshift);
1563 intptr_t _my0 = my0 << (1 - vshift);
1564 intptr_t _mx1 = mx1 << (1 - hshift);
1565 intptr_t _my1 = my1 << (1 - vshift);
1567 int x_off0 = x_off + (mv0->
x >> (2 + hshift));
1568 int y_off0 = y_off + (mv0->
y >> (2 + vshift));
1569 int x_off1 = x_off + (mv1->
x >> (2 + hshift));
1570 int y_off1 = y_off + (mv1->
y >> (2 + vshift));
1572 src1 += y_off0 * src1stride + (int)((
unsigned)x_off0 << s->
ps.
sps->
pixel_shift);
1573 src2 += y_off1 * src2stride + (int)((
unsigned)x_off1 << s->
ps.
sps->
pixel_shift);
1584 edge_emu_stride, src1stride,
1588 pic_width, pic_height);
1591 src1stride = edge_emu_stride;
1603 edge_emu_stride, src2stride,
1607 pic_width, pic_height);
1610 src2stride = edge_emu_stride;
1614 block_h, _mx0, _my0, block_w);
1617 src2, src2stride, lc->
tmp,
1618 block_h, _mx1, _my1, block_w);
1621 src2, src2stride, lc->
tmp,
1628 _mx1, _my1, block_w);
1634 int y =
FFMAX(0, (mv->
y >> 2) + y0 + height + 9);
1641 int nPbH,
int log2_cb_size,
int part_idx,
1653 if (inter_pred_idc !=
PRED_L1) {
1661 part_idx, merge_idx, mv, mvp_flag, 0);
1666 if (inter_pred_idc !=
PRED_L0) {
1679 part_idx, merge_idx, mv, mvp_flag, 1);
1687 int log2_cb_size,
int partIdx,
int idx)
1689 #define POS(c_idx, x, y) \
1690 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1691 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1694 struct MvField current_mv = {{{ 0 }}};
1706 int x_cb = x0 >> log2_min_cb_size;
1707 int y_cb = y0 >> log2_min_cb_size;
1723 partIdx, merge_idx, ¤t_mv);
1726 partIdx, merge_idx, ¤t_mv);
1734 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1737 ref0 = refPicList[0].
ref[current_mv.
ref_idx[0]];
1743 ref1 = refPicList[1].
ref[current_mv.
ref_idx[1]];
1756 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1762 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1765 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1775 ¤t_mv.
mv[1], x0, y0, nPbW, nPbH,
1781 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1785 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1795 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1796 ref1->frame, ¤t_mv.
mv[1], ¤t_mv);
1800 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
1803 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
1812 int prev_intra_luma_pred_flag)
1830 int intra_pred_mode;
1835 if ((y0 - 1) < y_ctb)
1838 if (cand_left == cand_up) {
1839 if (cand_left < 2) {
1844 candidate[0] = cand_left;
1845 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1846 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1849 candidate[0] = cand_left;
1850 candidate[1] = cand_up;
1860 if (prev_intra_luma_pred_flag) {
1861 intra_pred_mode = candidate[lc->
pu.
mpm_idx];
1863 if (candidate[0] > candidate[1])
1865 if (candidate[0] > candidate[2])
1867 if (candidate[1] > candidate[2])
1871 for (i = 0; i < 3; i++)
1872 if (intra_pred_mode >= candidate[i])
1879 for (i = 0; i < size_in_pus; i++) {
1880 memset(&s->
tab_ipm[(y_pu + i) * min_pu_width + x_pu],
1881 intra_pred_mode, size_in_pus);
1883 for (j = 0; j < size_in_pus; j++) {
1888 return intra_pred_mode;
1892 int log2_cb_size,
int ct_depth)
1905 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
1906 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
1912 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
1913 uint8_t prev_intra_luma_pred_flag[4];
1915 int pb_size = (1 << log2_cb_size) >> split;
1916 int side = split + 1;
1920 for (i = 0; i < side; i++)
1921 for (j = 0; j < side; j++)
1924 for (i = 0; i < side; i++) {
1925 for (j = 0; j < side; j++) {
1926 if (prev_intra_luma_pred_flag[2 * i + j])
1933 prev_intra_luma_pred_flag[2 * i + j]);
1938 for (i = 0; i < side; i++) {
1939 for (j = 0; j < side; j++) {
1941 if (chroma_mode != 4) {
1954 if (chroma_mode != 4) {
1958 mode_idx = intra_chroma_table[chroma_mode];
1965 if (chroma_mode != 4) {
1981 int pb_size = 1 << log2_cb_size;
1989 if (size_in_pus == 0)
1991 for (j = 0; j < size_in_pus; j++)
1992 memset(&s->
tab_ipm[(y_pu + j) * min_pu_width + x_pu],
INTRA_DC, size_in_pus);
1994 for (j = 0; j < size_in_pus; j++)
1995 for (k = 0; k < size_in_pus; k++)
2001 int cb_size = 1 << log2_cb_size;
2004 int length = cb_size >> log2_min_cb_size;
2006 int x_cb = x0 >> log2_min_cb_size;
2007 int y_cb = y0 >> log2_min_cb_size;
2008 int idx = log2_cb_size - 2;
2019 for (x = 0; x < 4; x++)
2031 x = y_cb * min_cb_width + x_cb;
2032 for (y = 0; y <
length; y++) {
2033 memset(&s->
skip_flag[x], skip_flag, length);
2038 x = y_cb * min_cb_width + x_cb;
2039 for (y = 0; y <
length; y++) {
2066 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2092 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2096 hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2100 hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2104 hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2108 hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2112 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2113 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2114 hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2120 int rqt_root_cbf = 1;
2127 const static int cbf[2] = { 0 };
2133 log2_cb_size, 0, 0, cbf, cbf);
2146 x = y_cb * min_cb_width + x_cb;
2147 for (y = 0; y <
length; y++) {
2152 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2153 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2163 int log2_cb_size,
int cb_depth)
2166 const int cb_size = 1 << log2_cb_size;
2171 if (x0 + cb_size <= s->ps.sps->width &&
2172 y0 + cb_size <= s->ps.sps->height &&
2191 const int cb_size_split = cb_size >> 1;
2192 const int x1 = x0 + cb_size_split;
2193 const int y1 = y0 + cb_size_split;
2201 if (more_data && x1 < s->ps.sps->width) {
2206 if (more_data && y1 < s->ps.sps->height) {
2211 if (more_data && x1 < s->ps.sps->width &&
2212 y1 < s->ps.sps->height) {
2218 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2219 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2223 return ((x1 + cb_size_split) < s->
ps.
sps->
width ||
2231 if ((!((x0 + cb_size) %
2238 return !end_of_slice_flag;
2253 int ctb_addr_in_slice = ctb_addr_rs - s->
sh.
slice_addr;
2258 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2284 if (ctb_addr_in_slice <= 0)
2286 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2318 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2334 if (more_data < 0) {
2369 int *ctb_row_p = input_ctb_row;
2370 int ctb_row = ctb_row_p[job];
2380 ret =
init_get_bits8(&lc->
gb,
s->data +
s->sh.offset[ctb_row - 1],
s->sh.size[ctb_row - 1]);
2387 while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2388 int x_ctb = (ctb_addr_rs %
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2389 int y_ctb = (ctb_addr_rs /
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2401 hls_sao_param(
s, x_ctb >>
s->ps.sps->log2_ctb_size, y_ctb >>
s->ps.sps->log2_ctb_size);
2404 if (more_data < 0) {
2405 s->tab_slice_address[ctb_addr_rs] = -1;
2417 if (!more_data && (x_ctb+ctb_size) <
s->ps.sps->width && ctb_row !=
s->sh.num_entry_point_offsets) {
2423 if ((x_ctb+ctb_size) >=
s->ps.sps->width && (y_ctb+ctb_size) >=
s->ps.sps->height ) {
2428 ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2431 if(x_ctb >=
s->ps.sps->width) {
2448 int64_t startheader, cmpt = 0;
2479 for (j = 0, cmpt = 0, startheader = offset + s->
sh.
entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2488 for (j = 0, cmpt = 0, startheader = offset
2501 if (length < offset) {
2592 const int mapping[3] = {2, 0, 1};
2593 const int chroma_den = 50000;
2594 const int luma_den = 10000;
2601 for (i = 0; i < 3; i++) {
2602 const int j = mapping[i];
2622 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2631 "min_luminance=%f, max_luminance=%f\n",
2710 int ctb_addr_ts, ret;
2759 if (s->
max_ra == INT_MAX) {
2781 }
else if (!s->
ref) {
2788 "Non-matching NAL types of the VCL NALUs: %d %d\n",
2798 "Error constructing the reference lists for the current slice.\n");
2822 if (ctb_addr_ts < 0) {
2862 "Error splitting the input into NAL units.\n");
2877 "Error parsing NAL unit #%d.\n", i);
2892 for (i = 0; i < 16; i++)
2893 av_log(log_ctx, level,
"%02"PRIx8, md5[i]);
2922 for (i = 0; frame->
data[i]; i++) {
2930 for (j = 0; j <
h; j++) {
2935 (
const uint16_t *) src, w);
2943 if (!memcmp(md5, s->
md5[i], 16)) {
2985 "hardware accelerator failed to decode picture\n");
3067 for (i = 0; i < 3; i++) {
3250 int i, j, num_arrays, nal_len_size;
3255 nal_len_size = (bytestream2_get_byte(&gb) & 3) + 1;
3256 num_arrays = bytestream2_get_byte(&gb);
3263 for (i = 0; i < num_arrays; i++) {
3264 int type = bytestream2_get_byte(&gb) & 0x3f;
3265 int cnt = bytestream2_get_be16(&gb);
3267 for (j = 0; j < cnt; j++) {
3269 int nalsize = bytestream2_peek_be16(&gb) + 2;
3272 "Invalid NAL unit size in extradata.\n");
3279 "Decoding nal unit %d %d from hvcC failed\n",
3350 memset(s, 0,
sizeof(*s));
3367 #define OFFSET(x) offsetof(HEVCContext, x)
3368 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3371 {
"apply_defdispwin",
"Apply default display window from VUI",
OFFSET(apply_defdispwin),
3373 {
"strict-displaywin",
"stricly apply default display window size",
OFFSET(apply_defdispwin),
3391 .priv_class = &hevc_decoder_class,
#define EDGE_EMU_BUFFER_STRIDE
const uint8_t ff_hevc_pel_weight[65]
int frame_packing_arrangement_type
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
unsigned int log2_min_cb_size
int sei_frame_packing_present
frame packing arrangement variables
const char const char void * val
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
uint8_t log2_sao_offset_scale_luma
int ff_hevc_merge_idx_decode(HEVCContext *s)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static enum AVPixelFormat pix_fmt
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
int ff_hevc_frame_nb_refs(HEVCContext *s)
Get the number of candidate references for the current frame.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
uint8_t diff_cu_chroma_qp_offset_depth
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
int ff_hevc_merge_flag_decode(HEVCContext *s)
int ff_hevc_sao_band_position_decode(HEVCContext *s)
int coded_width
Bitstream width / height, may be different from width/height e.g.
int max_dec_pic_buffering
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7)*EDGE_EMU_BUFFER_STRIDE *2]
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
static int init_thread_copy(AVCodecContext *avctx)
void(* put_hevc_qpel_bi_w[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int wx1, int ox0, int ox1, intptr_t mx, intptr_t my, int width)
int content_interpretation_type
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
int8_t cb_qp_offset_list[5]
#define LIBAVUTIL_VERSION_INT
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int16_t x
horizontal component of motion vector
void(* bswap16_buf)(uint16_t *dst, const uint16_t *src, int len)
static av_cold int init(AVCodecContext *avctx)
void * hwaccel_picture_private
static int hevc_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
uint8_t intra_split_flag
IntraSplitFlag.
int rem_intra_luma_pred_mode
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
uint8_t weighted_bipred_flag
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
uint8_t seq_loop_filter_across_slices_enabled_flag
uint8_t cabac_init_present_flag
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void(* put_hevc_epel_uni[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my, int width)
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
int * ctb_addr_ts_to_rs
CtbAddrTSToRS.
int num_ref_idx_l0_default_active
num_ref_idx_l0_default_active_minus1 + 1
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
struct HEVCFrame * ref[MAX_REFS]
Views are next to each other.
AVBufferRef * vps_list[MAX_VPS_COUNT]
ShortTermRPS st_rps[MAX_SHORT_TERM_RPS_COUNT]
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
uint16_t seq_decode
Sequence counters for decoded and output frames, so that old frames are output first after a POC rese...
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
enum NALUnitType first_nal_type
Macro definitions for various function/variable attributes.
uint8_t entropy_coding_sync_enabled_flag
uint32_t min_mastering_luminance
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
static int hls_slice_data(HEVCContext *s)
static void hls_sao_param(HEVCContext *s, int rx, int ry)
AVBufferPool * rpl_tab_pool
candidate references for the current frame
uint8_t log2_sao_offset_scale_chroma
struct AVHWAccel * hwaccel
Hardware accelerator in use.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
unsigned int log2_max_trafo_size
void(* put_hevc_epel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
int ff_hevc_mpm_idx_decode(HEVCContext *s)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
static int set_side_data(HEVCContext *s)
uint8_t ctb_up_right_flag
uint8_t vps_timing_info_present_flag
static int hls_slice_header(HEVCContext *s)
int num_ref_idx_l1_default_active
num_ref_idx_l1_default_active_minus1 + 1
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
unsigned int log2_min_pcm_cb_size
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
#define avpriv_atomic_int_set
Structure to hold side data for an AVFrame.
static double av_q2d(AVRational a)
Convert rational to double.
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
int ff_hevc_decode_nal_sei(HEVCContext *s)
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
void(* put_hevc_qpel_uni_w[10][2][2])(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width)
#define EPEL_EXTRA_BEFORE
uint8_t loop_filter_disable_flag
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
int sei_anticlockwise_rotation
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
uint8_t cu_transquant_bypass_flag
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Views are alternated temporally.
void(* put_hevc_qpel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
static av_unused const uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
uint8_t transquant_bypass_enable_flag
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
int temporal_id
temporal_id_plus1 - 1
#define SET_SAO(elem, value)
HEVCLocalContext * HEVClcList[MAX_NB_THREADS]
static void hevc_decode_flush(AVCodecContext *avctx)
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
const AVProfile ff_hevc_profiles[]
int8_t cr_qp_offset_list[5]
int slice_idx
number of the slice being currently decoded
#define BOUNDARY_UPPER_SLICE
static int get_bits_left(GetBitContext *gb)
static int hls_slice_data_wpp(HEVCContext *s, const HEVCNAL *nal)
uint8_t intra_pred_mode[4]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int has_b_frames
Size of the frame reordering buffer in the decoder.
int flags
Additional information about the frame packing.
static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
uint8_t slice_initialized
1 if the independent slice segment header was successfully parsed
unsigned int log2_max_poc_lsb
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
AVBufferRef * rpl_tab_buf
#define avpriv_atomic_int_get
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int vui_timing_info_present_flag
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
ATSC A53 Part 4 Closed Captions.
void(* intra_pred[4])(struct HEVCContext *s, int x0, int y0, int c_idx)
uint16_t display_primaries[3][2]
int ff_hevc_compute_poc(HEVCContext *s, int poc_lsb)
Compute POC of the current frame and return it.
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
unsigned int log2_ctb_size
void(* transform_add[4])(uint8_t *_dst, int16_t *coeffs, ptrdiff_t _stride)
uint8_t * sao_pixel_buffer_h[3]
const char * name
Name of the codec implementation.
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
static const uint8_t offset[127][2]
static int verify_md5(HEVCContext *s, AVFrame *frame)
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
static const AVClass hevc_decoder_class
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
uint8_t max_trafo_depth
MaxTrafoDepth.
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7)*EDGE_EMU_BUFFER_STRIDE *2]
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
static char * split(char *message, char delim)
uint8_t tiles_enabled_flag
int ff_alloc_entries(AVCodecContext *avctx, int count)
int eo_class[3]
sao_eo_class
uint32_t vps_num_units_in_tick
static av_cold int hevc_init_context(AVCodecContext *avctx)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
struct HEVCContext * sList[MAX_NB_THREADS]
common internal API header
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
uint8_t lists_modification_present_flag
AVBufferRef * tab_mvf_buf
uint8_t type_idx[3]
sao_type_idx
enum AVPictureType pict_type
Picture type of the frame.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define FF_THREAD_FRAME
Decode more than one frame at once.
int max_transform_hierarchy_depth_inter
static const AVOption options[]
uint8_t * sao_pixel_buffer_v[3]
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int offset_abs[3][4]
sao_offset_abs
int num_tile_columns
num_tile_columns_minus1 + 1
int width
picture width / height.
int ff_hevc_output_frame(HEVCContext *s, AVFrame *frame, int flush)
Find next frame in output order and put a reference to it in frame.
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
AVBufferRef * pps_list[MAX_PPS_COUNT]
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
uint8_t cu_qp_delta_enabled_flag
uint8_t used_by_curr_pic_lt_sps_flag[32]
int sei_mastering_display_info_present
mastering display
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Context Adaptive Binary Arithmetic Coder inline functions.
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
#define AV_EF_EXPLODE
abort decoding on minor error detection
void(* put_hevc_qpel_bi[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width)
uint8_t output_flag_present_flag
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
uint32_t max_mastering_luminance
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
static int hevc_frame_start(HEVCContext *s)
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
the normal 2^n-1 "JPEG" YUV ranges
uint8_t pic_slice_level_chroma_qp_offsets_present_flag
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
void ff_reset_entries(AVCodecContext *avctx)
int colour_description_present_flag
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
static const int8_t mv[256][2]
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
enum AVPixelFormat pix_fmt
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
int sei_display_orientation_present
display orientation
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
enum AVStereo3DType type
How views are packed within the video.
#define AV_LOG_INFO
Standard information.
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
static av_cold int hevc_decode_init(AVCodecContext *avctx)
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
uint8_t is_nalff
this flag is != 0 if bitstream is encapsulated as a format defined in 14496-15
int * ctb_addr_rs_to_ts
CtbAddrRSToTS.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
unsigned int log2_min_pu_size
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Views are next to each other, but when upscaling apply a checkerboard pattern.
unsigned int sps_id
seq_parameter_set_id
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
static int hevc_decode_extradata(HEVCContext *s)
enum PredMode pred_mode
PredMode.
AVBufferRef * hwaccel_priv_buf
int num_extra_slice_header_bits
uint8_t * data
The data buffer.
int16_t y
vertical component of motion vector
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
uint8_t num_long_term_ref_pics_sps
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
uint8_t cross_component_prediction_enabled_flag
uint32_t vui_num_units_in_tick
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
uint8_t deblocking_filter_control_present_flag
static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps, const HEVCSPS *sps)
static unsigned int get_bits1(GetBitContext *s)
BYTE int const BYTE int int int height
uint8_t * checksum_buf
used on BE to byteswap the lines for checksumming
static int decode_nal_unit(HEVCContext *s, const HEVCNAL *nal)
uint8_t sps_temporal_mvp_enabled_flag
Describe the class of an AVClass context structure.
int num_tile_rows
num_tile_rows_minus1 + 1
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
static void skip_bits(GetBitContext *s, int n)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
uint8_t chroma_qp_offset_list_enabled_flag
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
enum AVColorSpace colorspace
YUV colorspace type.
static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
void(* put_pcm)(uint8_t *_dst, ptrdiff_t _stride, int width, int height, struct GetBitContext *gb, int pcm_bit_depth)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
int enable_parallel_tiles
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
int last_eos
last packet contains an EOS/EOB NAL
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data...
unsigned int log2_min_tb_size
enum PartMode part_mode
PartMode.
uint16_t lt_ref_pic_poc_lsb_sps[32]
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
uint8_t intra_pred_mode_c[4]
void(* put_hevc_qpel_uni[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
enum NALUnitType nal_unit_type
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
int allocate_progress
Whether to allocate progress for frame threading.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
struct HEVCSPS::@52 temporal_layer[MAX_SUB_LAYERS]
int tc_offset
tc_offset_div2 * 2
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
uint8_t transfer_characteristic
static enum AVPixelFormat pix_fmts[]
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
HEVCLocalContext * HEVClc
int ff_hevc_split_packet(HEVCContext *s, HEVCPacket *pkt, const uint8_t *buf, int length, AVCodecContext *avctx, int is_nalff, int nal_length_size)
Split an input packet into NAL units.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
void ff_hevc_bump_frame(HEVCContext *s)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
the normal 219*2^(n-8) "MPEG" YUV ranges
int eos
current packet contains an EOS/EOB NAL
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
int max_transform_hierarchy_depth_intra
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
static const AVProfile profiles[]
GLint GLenum GLboolean GLsizei stride
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
uint8_t weighted_pred_flag
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
#define BOUNDARY_LEFT_SLICE
int32_t * tab_slice_address
int16_t offset_val[3][5]
SaoOffsetVal.
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
unsigned int * column_width
ColumnWidth.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
uint8_t * filter_slice_edges
uint8_t slice_header_extension_present_flag
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
AVBufferPool * tab_mvf_pool
int video_full_range_flag
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
uint8_t chroma_qp_offset_list_len_minus1
static const uint8_t tab_mode_idx[]
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
void ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts)
#define SUBDIVIDE(x, y, idx)
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
static const int16_t coeffs[]
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
#define QPEL_EXTRA_BEFORE
int ff_hevc_pred_mode_decode(HEVCContext *s)
struct AVCodecInternal * internal
Private context used for internal data.
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
void(* put_hevc_epel_bi[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width)
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
Views are on top of each other.
int key_frame
1 -> keyframe, 0-> not
uint8_t long_term_ref_pics_present_flag
static void * av_mallocz_array(size_t nmemb, size_t size)
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
void(* put_hevc_epel_uni_w[10][2][2])(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width)
static int hls_cross_component_pred(HEVCContext *s, int idx)
int diff_cu_qp_delta_depth
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
int ff_hevc_pcm_flag_decode(HEVCContext *s)
HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer...
#define av_malloc_array(a, b)
uint8_t context_initialized
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
int video_signal_type_present_flag
#define FFSWAP(type, a, b)
uint8_t deblocking_filter_override_enabled_flag
int beta_offset
beta_offset_div2 * 2
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
#define BOUNDARY_LEFT_TILE
int depth
Number of bits in the component.
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
AVPixelFormat
Pixel format.
static av_cold int hevc_decode_free(AVCodecContext *avctx)
This structure stores compressed data.
AVBufferRef * sps_list[MAX_SPS_COUNT]
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
uint8_t separate_colour_plane_flag
output (i.e. cropped) values
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
#define SAMPLE_CTB(tab, x, y)
uint8_t dependent_slice_segments_enabled_flag
int offset_sign[3][4]
sao_offset_sign
#define BOUNDARY_UPPER_TILE
void(* put_hevc_epel_bi_w[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int ox0, int wx1, int ox1, intptr_t mx, intptr_t my, int width)