66 #define MIN_TB_ADDR_ZS(x, y) \
67 s->ps.pps->min_tb_addr_zs[(y) * (s->ps.sps->tb_mask+2) + (x)]
73 if( yN_ctb < yCurr_ctb || xN_ctb < xCurr_ctb )
89 return xN >> plevel == xP >> plevel &&
90 yN >> plevel == yP >> plevel;
93 #define MATCH_MV(x) (AV_RN32A(&A.x) == AV_RN32A(&B.x))
94 #define MATCH(x) (A.x == B.x)
105 }
else if (a_pf ==
PF_L0) {
107 }
else if (a_pf ==
PF_L1) {
116 int tx, scale_factor;
118 td = av_clip_int8(td);
119 tb = av_clip_int8(tb);
120 tx = (0x4000 + abs(td / 2)) / td;
121 scale_factor = av_clip_intp2((tb * tx + 32) >> 6, 12);
122 dst->
x = av_clip_int16((scale_factor * src->
x + 127 +
123 (scale_factor * src->
x < 0)) >> 8);
124 dst->
y = av_clip_int16((scale_factor * src->
y + 127 +
125 (scale_factor * src->
y < 0)) >> 8);
131 RefPicList *refPicList_col,
int listCol,
int refidxCol)
133 int cur_lt = refPicList[X].
isLongTerm[refIdxLx];
134 int col_lt = refPicList_col[listCol].
isLongTerm[refidxCol];
135 int col_poc_diff, cur_poc_diff;
137 if (cur_lt != col_lt) {
143 col_poc_diff = colPic - refPicList_col[listCol].
list[refidxCol];
144 cur_poc_diff = poc - refPicList[X].
list[refIdxLx];
146 if (cur_lt || col_poc_diff == cur_poc_diff || !col_poc_diff) {
147 mvLXCol->
x = mvCol->
x;
148 mvLXCol->
y = mvCol->
y;
150 mv_scale(mvLXCol, mvCol, col_poc_diff, cur_poc_diff);
155 #define CHECK_MVSET(l) \
156 check_mvset(mvLXCol, temp_col.mv + l, \
158 refPicList, X, refIdxLx, \
159 refPicList_col, L ## l, temp_col.ref_idx[l])
163 int refIdxLx,
Mv *mvLXCol,
int X,
176 int check_diffpicount = 0;
178 for (j = 0; j < 2; j++) {
179 for (i = 0; i < refPicList[j].
nb_refs; i++) {
180 if (refPicList[j].list[i] > s->
poc) {
186 if (!check_diffpicount) {
202 #define TAB_MVF(x, y) \
203 tab_mvf[(y) * min_pu_width + x]
205 #define TAB_MVF_PU(v) \
206 TAB_MVF(((x ## v) >> s->ps.sps->log2_min_pu_size), \
207 ((y ## v) >> s->ps.sps->log2_min_pu_size))
209 #define DERIVE_TEMPORAL_COLOCATED_MVS \
210 derive_temporal_colocated_mvs(s, temp_col, \
211 refIdxLx, mvLXCol, X, colPic, \
212 ff_hevc_get_ref_list(s, ref, x, y))
218 int nPbW,
int nPbH,
int refIdxLx,
223 int x, y, x_pu, y_pu;
225 int availableFlagLXCol = 0;
231 memset(mvLXCol, 0,
sizeof(*mvLXCol));
244 y < s->ps.sps->height &&
245 x < s->ps.sps->width) {
252 temp_col =
TAB_MVF(x_pu, y_pu);
257 if (tab_mvf && !availableFlagLXCol) {
258 x = x0 + (nPbW >> 1);
259 y = y0 + (nPbH >> 1);
266 temp_col =
TAB_MVF(x_pu, y_pu);
269 return availableFlagLXCol;
272 #define AVAILABLE(cand, v) \
273 (cand && !(TAB_MVF_PU(v).pred_flag == PF_INTRA))
275 #define PRED_BLOCK_AVAILABLE(v) \
276 z_scan_block_avail(s, x0, y0, x ## v, y ## v)
278 #define COMPARE_MV_REFIDX(a, b) \
279 compare_mv_ref_idx(TAB_MVF_PU(a), TAB_MVF_PU(b))
287 int singleMCLFlag,
int part_idx,
289 struct MvField mergecandlist[])
303 const int xA1 = x0 - 1;
304 const int yA1 = y0 + nPbH - 1;
306 const int xB1 = x0 + nPbW - 1;
307 const int yB1 = y0 - 1;
309 const int xB0 = x0 + nPbW;
310 const int yB0 = y0 - 1;
312 const int xA0 = x0 - 1;
313 const int yA0 = y0 + nPbH;
315 const int xB2 = x0 - 1;
316 const int yB2 = y0 - 1;
323 int nb_merge_cand = 0;
324 int nb_orig_merge_cand = 0;
333 if (!singleMCLFlag && part_idx == 1 &&
341 if (is_available_a1) {
349 if (!singleMCLFlag && part_idx == 1 &&
357 if (is_available_b1 &&
360 if (merge_idx == nb_merge_cand)
372 if (is_available_b0 &&
375 if (merge_idx == nb_merge_cand)
381 is_available_a0 =
AVAILABLE(cand_bottom_left, A0) &&
386 if (is_available_a0 &&
388 mergecandlist[nb_merge_cand] =
TAB_MVF_PU(A0);
389 if (merge_idx == nb_merge_cand)
398 if (is_available_b2 &&
401 nb_merge_cand != 4) {
403 if (merge_idx == nb_merge_cand)
410 nb_merge_cand < s->sh.max_num_merge_cand) {
411 Mv mv_l0_col = { 0 }, mv_l1_col = { 0 };
416 0, &mv_l1_col, 1) : 0;
418 if (available_l0 || available_l1) {
419 mergecandlist[nb_merge_cand].
pred_flag = available_l0 + (available_l1 << 1);
420 AV_ZERO16(mergecandlist[nb_merge_cand].ref_idx);
421 mergecandlist[nb_merge_cand].
mv[0] = mv_l0_col;
422 mergecandlist[nb_merge_cand].
mv[1] = mv_l1_col;
424 if (merge_idx == nb_merge_cand)
430 nb_orig_merge_cand = nb_merge_cand;
434 nb_orig_merge_cand < s->sh.max_num_merge_cand) {
438 comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) {
441 MvField l0_cand = mergecandlist[l0_cand_idx];
442 MvField l1_cand = mergecandlist[l1_cand_idx];
445 (refPicList[0].list[l0_cand.
ref_idx[0]] !=
451 AV_COPY32(&mergecandlist[nb_merge_cand].
mv[0], &l0_cand.
mv[0]);
452 AV_COPY32(&mergecandlist[nb_merge_cand].mv[1], &l1_cand.
mv[1]);
453 if (merge_idx == nb_merge_cand)
461 while (nb_merge_cand < s->sh.max_num_merge_cand) {
465 mergecandlist[nb_merge_cand].
ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0;
466 mergecandlist[nb_merge_cand].
ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0;
468 if (merge_idx == nb_merge_cand)
479 int nPbH,
int log2_cb_size,
int part_idx,
482 int singleMCLFlag = 0;
483 int nCS = 1 << log2_cb_size;
500 singleMCLFlag, part_idx,
501 merge_idx, mergecand_list);
503 if (mergecand_list[merge_idx].pred_flag ==
PF_BI &&
504 (nPbW2 + nPbH2) == 12) {
505 mergecand_list[merge_idx].pred_flag =
PF_L0;
508 *mv = mergecand_list[merge_idx];
512 int min_pu_width,
int x,
int y,
513 int elist,
int ref_idx_curr,
int ref_idx)
517 int ref_pic_elist = refPicList[elist].
list[
TAB_MVF(x, y).ref_idx[elist]];
518 int ref_pic_curr = refPicList[ref_idx_curr].
list[ref_idx];
520 if (ref_pic_elist != ref_pic_curr) {
521 int poc_diff = s->
poc - ref_pic_elist;
529 Mv *
mv,
int ref_idx_curr,
int ref_idx)
536 if (((
TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) &&
537 refPicList[pred_flag_index].list[
TAB_MVF(x, y).ref_idx[pred_flag_index]] == refPicList[ref_idx_curr].list[ref_idx]) {
538 *mv =
TAB_MVF(x, y).mv[pred_flag_index];
545 Mv *
mv,
int ref_idx_curr,
int ref_idx)
552 if ((
TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) {
553 int currIsLongTerm = refPicList[ref_idx_curr].
isLongTerm[ref_idx];
556 refPicList[pred_flag_index].
isLongTerm[(
TAB_MVF(x, y).ref_idx[pred_flag_index])];
558 if (colIsLongTerm == currIsLongTerm) {
559 *mv =
TAB_MVF(x, y).mv[pred_flag_index];
562 pred_flag_index, ref_idx_curr, ref_idx);
569 #define MP_MX(v, pred, mx) \
571 (x ## v) >> s->ps.sps->log2_min_pu_size, \
572 (y ## v) >> s->ps.sps->log2_min_pu_size, \
573 pred, &mx, ref_idx_curr, ref_idx)
575 #define MP_MX_LT(v, pred, mx) \
576 mv_mp_mode_mx_lt(s, \
577 (x ## v) >> s->ps.sps->log2_min_pu_size, \
578 (y ## v) >> s->ps.sps->log2_min_pu_size, \
579 pred, &mx, ref_idx_curr, ref_idx)
582 int nPbH,
int log2_cb_size,
int part_idx,
584 int mvp_lx_flag,
int LX)
588 int isScaledFlag_L0 = 0;
589 int availableFlagLXA0 = 1;
590 int availableFlagLXB0 = 1;
591 int numMVPCandLX = 0;
605 Mv mvpcand_list[2] = { { 0 } };
610 int pred_flag_index_l0;
611 int pred_flag_index_l1;
620 pred_flag_index_l0 = LX;
621 pred_flag_index_l1 = !LX;
627 is_available_a0 =
AVAILABLE(cand_bottom_left, A0) &&
636 if (is_available_a0 || is_available_a1)
639 if (is_available_a0) {
640 if (
MP_MX(A0, pred_flag_index_l0, mxA)) {
643 if (
MP_MX(A0, pred_flag_index_l1, mxA)) {
648 if (is_available_a1) {
649 if (
MP_MX(
A1, pred_flag_index_l0, mxA)) {
652 if (
MP_MX(
A1, pred_flag_index_l1, mxA)) {
657 if (is_available_a0) {
658 if (
MP_MX_LT(A0, pred_flag_index_l0, mxA)) {
661 if (
MP_MX_LT(A0, pred_flag_index_l1, mxA)) {
666 if (is_available_a1) {
674 availableFlagLXA0 = 0;
697 if (is_available_b0) {
698 if (
MP_MX(
B0, pred_flag_index_l0, mxB)) {
701 if (
MP_MX(
B0, pred_flag_index_l1, mxB)) {
707 if (is_available_b1) {
708 if (
MP_MX(
B1, pred_flag_index_l0, mxB)) {
711 if (
MP_MX(
B1, pred_flag_index_l1, mxB)) {
717 if (is_available_b2) {
718 if (
MP_MX(
B2, pred_flag_index_l0, mxB)) {
721 if (
MP_MX(
B2, pred_flag_index_l1, mxB)) {
725 availableFlagLXB0 = 0;
728 if (!isScaledFlag_L0) {
729 if (availableFlagLXB0) {
730 availableFlagLXA0 = 1;
733 availableFlagLXB0 = 0;
736 if (is_available_b0) {
737 availableFlagLXB0 =
MP_MX_LT(
B0, pred_flag_index_l0, mxB);
738 if (!availableFlagLXB0)
739 availableFlagLXB0 =
MP_MX_LT(
B0, pred_flag_index_l1, mxB);
742 if (is_available_b1 && !availableFlagLXB0) {
743 availableFlagLXB0 =
MP_MX_LT(
B1, pred_flag_index_l0, mxB);
744 if (!availableFlagLXB0)
745 availableFlagLXB0 =
MP_MX_LT(
B1, pred_flag_index_l1, mxB);
748 if (is_available_b2 && !availableFlagLXB0) {
749 availableFlagLXB0 =
MP_MX_LT(
B2, pred_flag_index_l0, mxB);
750 if (!availableFlagLXB0)
751 availableFlagLXB0 =
MP_MX_LT(
B2, pred_flag_index_l1, mxB);
755 if (availableFlagLXA0)
756 mvpcand_list[numMVPCandLX++] = mxA;
758 if (availableFlagLXB0 && (!availableFlagLXA0 || mxA.
x != mxB.
x || mxA.
y != mxB.
y))
759 mvpcand_list[numMVPCandLX++] = mxB;
762 if (numMVPCandLX < 2 && s->sh.slice_temporal_mvp_enabled_flag &&
763 mvp_lx_flag == numMVPCandLX) {
769 mvpcand_list[numMVPCandLX++] = mv_col;
772 mv->
mv[LX] = mvpcand_list[mvp_lx_flag];
static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
#define MP_MX(v, pred, mx)
int16_t x
horizontal component of motion vector
static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int refIdxLx, Mv *mvLXCol, int X)
static int mv_mp_mode_mx(HEVCContext *s, int x, int y, int pred_flag_index, Mv *mv, int ref_idx_curr, int ref_idx)
static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int singleMCLFlag, int part_idx, int merge_idx, struct MvField mergecandlist[])
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static int derive_temporal_colocated_mvs(HEVCContext *s, MvField temp_col, int refIdxLx, Mv *mvLXCol, int X, int colPic, RefPicList *refPicList_col)
int log2_parallel_merge_level
log2_parallel_merge_level_minus2 + 2
static int mv_mp_mode_mx_lt(HEVCContext *s, int x, int y, int pred_flag_index, Mv *mv, int ref_idx_curr, int ref_idx)
#define MRG_MAX_NUM_CANDS
uint8_t ctb_up_right_flag
#define DERIVE_TEMPORAL_COLOCATED_MVS
static av_always_inline int is_diff_mer(HEVCContext *s, int xN, int yN, int xP, int yP)
#define AVAILABLE(cand, v)
#define MIN_TB_ADDR_ZS(x, y)
unsigned int log2_ctb_size
#define MP_MX_LT(v, pred, mx)
static int check_mvset(Mv *mvLXCol, Mv *mvCol, int colPic, int poc, RefPicList *refPicList, int X, int refIdxLx, RefPicList *refPicList_col, int listCol, int refidxCol)
#define COMPARE_MV_REFIDX(a, b)
#define FF_THREAD_FRAME
Decode more than one frame at once.
static const uint8_t l0_l1_cand_idx[12][2]
struct HEVCFrame * collocated_ref
static const int8_t mv[256][2]
static av_always_inline void dist_scale(HEVCContext *s, Mv *mv, int min_pu_width, int x, int y, int elist, int ref_idx_curr, int ref_idx)
unsigned int log2_min_pu_size
int16_t y
vertical component of motion vector
unsigned int log2_min_tb_size
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
enum PartMode part_mode
PartMode.
static av_always_inline int z_scan_block_avail(HEVCContext *s, int xCurr, int yCurr, int xN, int yN)
HEVCLocalContext * HEVClc
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
static av_always_inline int compare_mv_ref_idx(struct MvField A, struct MvField B)
#define LOCAL_ALIGNED(a, t, v,...)
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
#define PRED_BLOCK_AVAILABLE(v)