38 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
39 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
40 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 24
44 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 8,
45 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36,
46 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64
51 static const int qp_c[] = {
52 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37
54 int qp, qp_i,
offset, idxt;
62 qp_i =
av_clip(qp_y + offset, 0, 57);
84 int xQgBase = xBase - (xBase & MinCuQpDeltaSizeMask);
85 int yQgBase = yBase - (yBase & MinCuQpDeltaSizeMask);
89 int availableA = (xBase & ctb_size_mask) &&
90 (xQgBase & ctb_size_mask);
91 int availableB = (yBase & ctb_size_mask) &&
92 (yQgBase & ctb_size_mask);
93 int qPy_pred, qPy_a, qPy_b;
107 qPy_a = s->
qp_y_tab[(x_cb - 1) + y_cb * min_cb_width];
113 qPy_b = s->
qp_y_tab[x_cb + (y_cb - 1) * min_cb_width];
118 return (qPy_a + qPy_b + 1) >> 1;
136 int x = xC >> log2_min_cb_size;
137 int y = yC >> log2_min_cb_size;
142 ptrdiff_t stride_dst, ptrdiff_t stride_src)
146 if (((intptr_t)dst | (intptr_t)src | stride_dst | stride_src) & 15) {
147 for (i = 0; i <
height; i++) {
148 for (j = 0; j <
width; j+=8)
154 for (i = 0; i <
height; i++) {
155 for (j = 0; j <
width; j+=16)
166 *(uint16_t *)dst = *(uint16_t *)
src;
172 int pixel_shift,
int height,
173 ptrdiff_t stride_dst, ptrdiff_t stride_src)
176 if (pixel_shift == 0) {
177 for (i = 0; i <
height; i++) {
183 for (i = 0; i <
height; i++) {
184 *(uint16_t *)dst = *(uint16_t *)
src;
192 ptrdiff_t stride_src,
int x,
int y,
int width,
int height,
193 int c_idx,
int x_ctb,
int y_ctb)
203 src + stride_src * (height - 1), width << sh);
208 copy_vert(s->
sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 1) * h + y) << sh), src + ((width - 1) << sh), sh, height, 1 << sh, stride_src);
213 ptrdiff_t stride_src, ptrdiff_t stride_dst,
227 for (y = y_min; y < y_max; y++) {
228 for (x = x_min; x < x_max; x++) {
233 for (n = 0; n < (min_pu_size >> vshift); n++) {
234 memcpy(src, dst, len);
244 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)]) 248 static const uint8_t sao_tab[8] = { 0, 1, 2, 2, 3, 3, 4, 4 };
258 uint8_t vert_edge[] = { 0, 0 };
259 uint8_t horiz_edge[] = { 0, 0 };
260 uint8_t diag_edge[] = { 0, 0, 0, 0 };
264 uint8_t restore = no_tile_filter || !lfase;
270 edges[0] = x_ctb == 0;
271 edges[1] = y_ctb == 0;
292 if (!edges[0] && !edges[1]) {
295 if (!edges[1] && !edges[2]) {
298 if (!edges[2] && !edges[3]) {
301 if (!edges[0] && !edges[3]) {
314 int tab = sao_tab[(
FFALIGN(width, 8) >> 3) - 1];
316 ptrdiff_t stride_dst;
332 x, y, width, height, c_idx);
344 int left_edge = edges[0];
345 int top_edge = edges[1];
346 int right_edge = edges[2];
347 int bottom_edge = edges[3];
349 int left_pixels, right_pixels;
355 int left = 1 - left_edge;
356 int right = 1 - right_edge;
361 dst1 = dst - stride_dst - (left << sh);
362 src1[0] = src - stride_src - (left << sh);
366 src_idx = (
CTB(s->
sao, x_ctb-1, y_ctb-1).type_idx[c_idx] ==
371 src_idx = (
CTB(s->
sao, x_ctb, y_ctb-1).type_idx[c_idx] ==
373 memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
376 src_idx = (
CTB(s->
sao, x_ctb+1, y_ctb-1).type_idx[c_idx] ==
378 copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
382 int left = 1 - left_edge;
383 int right = 1 - right_edge;
388 dst1 = dst + height * stride_dst - (left << sh);
389 src1[0] = src + height * stride_src - (left << sh);
393 src_idx = (
CTB(s->
sao, x_ctb-1, y_ctb+1).type_idx[c_idx] ==
398 src_idx = (
CTB(s->
sao, x_ctb, y_ctb+1).type_idx[c_idx] ==
400 memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
403 src_idx = (
CTB(s->
sao, x_ctb+1, y_ctb+1).type_idx[c_idx] ==
405 copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
413 sh, height, stride_dst, 1 << sh);
423 sh, height, stride_dst, 1 << sh);
430 src - (left_pixels << sh),
431 (width + left_pixels + right_pixels) << sh,
432 height, stride_dst, stride_src);
439 stride_src, stride_dst,
447 x, y, width, height, c_idx);
463 x_pu = x >> log2_min_pu_size;
464 y_pu = y >> log2_min_pu_size;
471 #define TC_CALC(qp, bs) \ 472 tctable[av_clip((qp) + DEFAULT_INTRA_TC_OFFSET * ((bs) - 1) + \ 474 0, MAX_QP + DEFAULT_INTRA_TC_OFFSET)] 486 int x_end, x_end2, y_end;
487 int ctb_size = 1 << log2_ctb_size;
488 int ctb = (x0 >> log2_ctb_size) +
492 int left_tc_offset, left_beta_offset;
493 int tc_offset, beta_offset;
503 left_beta_offset = 0;
506 x_end = x0 + ctb_size;
509 y_end = y0 + ctb_size;
513 tc_offset = cur_tc_offset;
514 beta_offset = cur_beta_offset;
519 for (y = y0; y < y_end; y += 8) {
521 for (x = x0 ? x0 : 8; x < x_end; x += 8) {
525 const int qp = (
get_qPy(s, x - 1, y) +
get_qPy(s, x, y) + 1) >> 1;
529 tc[0] = bs0 ?
TC_CALC(qp, bs0) : 0;
530 tc[1] = bs1 ?
TC_CALC(qp, bs1) : 0;
533 no_p[0] =
get_pcm(s, x - 1, y);
534 no_p[1] =
get_pcm(s, x - 1, y + 4);
536 no_q[1] =
get_pcm(s, x, y + 4);
539 beta, tc, no_p, no_q);
543 beta, tc, no_p, no_q);
551 for (x = x0 ? x0 - 8 : 0; x < x_end2; x += 8) {
555 const int qp = (
get_qPy(s, x, y - 1) +
get_qPy(s, x, y) + 1) >> 1;
557 tc_offset = x >= x0 ? cur_tc_offset : left_tc_offset;
558 beta_offset = x >= x0 ? cur_beta_offset : left_beta_offset;
561 tc[0] = bs0 ?
TC_CALC(qp, bs0) : 0;
562 tc[1] = bs1 ?
TC_CALC(qp, bs1) : 0;
565 no_p[0] =
get_pcm(s, x, y - 1);
566 no_p[1] =
get_pcm(s, x + 4, y - 1);
568 no_q[1] =
get_pcm(s, x + 4, y);
571 beta, tc, no_p, no_q);
575 beta, tc, no_p, no_q);
581 for (chroma = 1; chroma <= 2; chroma++) {
586 for (y = y0; y < y_end; y += (8 * v)) {
587 for (x = x0 ? x0 : 8 * h; x < x_end; x += (8 *
h)) {
591 if ((bs0 == 2) || (bs1 == 2)) {
592 const int qp0 = (
get_qPy(s, x - 1, y) +
get_qPy(s, x, y) + 1) >> 1;
593 const int qp1 = (
get_qPy(s, x - 1, y + (4 * v)) +
get_qPy(s, x, y + (4 * v)) + 1) >> 1;
595 c_tc[0] = (bs0 == 2) ?
chroma_tc(s, qp0, chroma, tc_offset) : 0;
596 c_tc[1] = (bs1 == 2) ?
chroma_tc(s, qp1, chroma, tc_offset) : 0;
599 no_p[0] =
get_pcm(s, x - 1, y);
600 no_p[1] =
get_pcm(s, x - 1, y + (4 * v));
602 no_q[1] =
get_pcm(s, x, y + (4 * v));
617 tc_offset = x0 ? left_tc_offset : cur_tc_offset;
620 x_end2 = x_end - 8 *
h;
621 for (x = x0 ? x0 - 8 * h : 0; x < x_end2; x += (8 *
h)) {
624 if ((bs0 == 2) || (bs1 == 2)) {
625 const int qp0 = bs0 == 2 ? (
get_qPy(s, x, y - 1) +
get_qPy(s, x, y) + 1) >> 1 : 0;
626 const int qp1 = bs1 == 2 ? (
get_qPy(s, x + (4 * h), y - 1) +
get_qPy(s, x + (4 * h), y) + 1) >> 1 : 0;
628 c_tc[0] = bs0 == 2 ?
chroma_tc(s, qp0, chroma, tc_offset) : 0;
629 c_tc[1] = bs1 == 2 ?
chroma_tc(s, qp1, chroma, cur_tc_offset) : 0;
632 no_p[0] =
get_pcm(s, x, y - 1);
633 no_p[1] =
get_pcm(s, x + (4 * h), y - 1);
635 no_q[1] =
get_pcm(s, x + (4 * h), y);
696 ref_B = neigh_refPicList[0].
list[neigh->
ref_idx[0]];
699 ref_B = neigh_refPicList[1].
list[neigh->
ref_idx[1]];
702 if (ref_A == ref_B) {
723 int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width +
725 int boundary_upper, boundary_left;
728 boundary_upper = y0 > 0 && !(y0 & 7);
729 if (boundary_upper &&
738 if (boundary_upper) {
742 int yp_pu = (y0 - 1) >> log2_min_pu_size;
743 int yq_pu = y0 >> log2_min_pu_size;
744 int yp_tu = (y0 - 1) >> log2_min_tu_size;
745 int yq_tu = y0 >> log2_min_tu_size;
747 for (i = 0; i < (1 << log2_trafo_size); i += 4) {
748 int x_pu = (x0 +
i) >> log2_min_pu_size;
749 int x_tu = (x0 +
i) >> log2_min_tu_size;
750 MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
751 MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
757 else if (curr_cbf_luma || top_cbf_luma)
766 boundary_left = x0 > 0 && !(x0 & 7);
780 int xp_pu = (x0 - 1) >> log2_min_pu_size;
781 int xq_pu = x0 >> log2_min_pu_size;
782 int xp_tu = (x0 - 1) >> log2_min_tu_size;
783 int xq_tu = x0 >> log2_min_tu_size;
785 for (i = 0; i < (1 << log2_trafo_size); i += 4) {
786 int y_pu = (y0 +
i) >> log2_min_pu_size;
787 int y_tu = (y0 +
i) >> log2_min_tu_size;
788 MvField *
left = &tab_mvf[y_pu * min_pu_width + xp_pu];
789 MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
795 else if (curr_cbf_luma || left_cbf_luma)
803 if (log2_trafo_size > log2_min_pu_size && !is_intra) {
807 for (j = 8; j < (1 << log2_trafo_size); j += 8) {
808 int yp_pu = (y0 + j - 1) >> log2_min_pu_size;
809 int yq_pu = (y0 + j) >> log2_min_pu_size;
811 for (i = 0; i < (1 << log2_trafo_size); i += 4) {
812 int x_pu = (x0 +
i) >> log2_min_pu_size;
813 MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
814 MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
822 for (j = 0; j < (1 << log2_trafo_size); j += 4) {
823 int y_pu = (y0 + j) >> log2_min_pu_size;
825 for (i = 8; i < (1 << log2_trafo_size); i += 8) {
826 int xp_pu = (x0 + i - 1) >> log2_min_pu_size;
827 int xq_pu = (x0 +
i) >> log2_min_pu_size;
828 MvField *
left = &tab_mvf[y_pu * min_pu_width + xp_pu];
829 MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
869 if (x_end && y_end) {
880 int x_end = x_ctb >= s->
ps.
sps->
width - ctb_size;
unsigned int log2_min_cb_size
discard all frames except keyframes
void(* hevc_v_loop_filter_chroma)(uint8_t *pix, ptrdiff_t stride, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
void(* hevc_h_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride, int beta, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7)*EDGE_EMU_BUFFER_STRIDE *2]
void(* sao_edge_restore[2])(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, struct SAOParams *sao, int *borders, int _width, int _height, int c_idx, uint8_t *vert_edge, uint8_t *horiz_edge, uint8_t *diag_edge)
int16_t x
horizontal component of motion vector
static void restore_tqb_pixels(HEVCContext *s, uint8_t *src1, const uint8_t *dst1, ptrdiff_t stride_src, ptrdiff_t stride_dst, int x0, int y0, int width, int height, int c_idx)
discard all non intra frames
static void sao_filter_CTB(HEVCContext *s, int x, int y)
enum HEVCNALUnitType nal_unit_type
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
#define BOUNDARY_LEFT_TILE
static const uint8_t tctable[54]
uint8_t loop_filter_disable_flag
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
static const uint8_t betatable[52]
uint8_t transquant_bypass_enable_flag
discard all bidirectional frames
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
unsigned int log2_ctb_size
uint8_t * sao_pixel_buffer_h[3]
void(* sao_edge_filter[5])(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val, int sao_eo_class, int width, int height)
#define BOUNDARY_UPPER_TILE
uint8_t tiles_enabled_flag
static void copy_pixel(uint8_t *dst, const uint8_t *src, int pixel_shift)
int eo_class[3]
sao_eo_class
common internal API header
uint8_t type_idx[3]
sao_type_idx
static void copy_CTB(uint8_t *dst, const uint8_t *src, int width, int height, ptrdiff_t stride_dst, ptrdiff_t stride_src)
#define FF_THREAD_FRAME
Decode more than one frame at once.
uint8_t * sao_pixel_buffer_v[3]
void(* hevc_h_loop_filter_chroma_c)(uint8_t *pix, ptrdiff_t stride, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Context Adaptive Binary Arithmetic Coder inline functions.
void(* hevc_v_loop_filter_luma_c)(uint8_t *pix, ptrdiff_t stride, int beta, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
void(* hevc_v_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride, int beta, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
void(* sao_band_filter[5])(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src, int16_t *sao_offset_val, int sao_left_class, int width, int height)
int * ctb_addr_rs_to_ts
CtbAddrRSToTS.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
unsigned int log2_min_pu_size
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
int16_t y
vertical component of motion vector
uint8_t loop_filter_across_tiles_enabled_flag
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
void(* hevc_v_loop_filter_chroma_c)(uint8_t *pix, ptrdiff_t stride, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
#define BOUNDARY_UPPER_SLICE
unsigned int log2_min_tb_size
static int get_qPy(HEVCContext *s, int xC, int yC)
static void copy_vert(uint8_t *dst, const uint8_t *src, int pixel_shift, int height, ptrdiff_t stride_dst, ptrdiff_t stride_src)
#define DEFAULT_INTRA_TC_OFFSET
void(* hevc_h_loop_filter_luma_c)(uint8_t *pix, ptrdiff_t stride, int beta, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
HEVCLocalContext * HEVClc
RefPicList * ff_hevc_get_ref_list(HEVCContext *s, HEVCFrame *ref, int x0, int y0)
static int get_pcm(HEVCContext *s, int x, int y)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void(* hevc_h_loop_filter_chroma)(uint8_t *pix, ptrdiff_t stride, int32_t *tc, uint8_t *no_p, uint8_t *no_q)
#define BOUNDARY_LEFT_SLICE
discard all non reference
static int chroma_tc(HEVCContext *s, int qp_y, int c_idx, int tc_offset)
common internal and external API header
int32_t * tab_slice_address
int16_t offset_val[3][5]
SaoOffsetVal.
uint8_t * filter_slice_edges
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
uint8_t band_position[3]
sao_band_position
static const struct twinvq_data tab
int diff_cu_qp_delta_depth
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
static int boundary_strength(HEVCContext *s, MvField *curr, MvField *neigh, RefPicList *neigh_refPicList)
static void copy_CTB_to_hv(HEVCContext *s, const uint8_t *src, ptrdiff_t stride_src, int x, int y, int width, int height, int c_idx, int x_ctb, int y_ctb)
static void deblocking_filter_CTB(HEVCContext *s, int x0, int y0)
static int get_qPy_pred(HEVCContext *s, int xBase, int yBase, int log2_cb_size)