23 #ifndef AVCODEC_HEVCDEC_H 24 #define AVCODEC_HEVCDEC_H 26 #include <stdatomic.h> 46 #define SHIFT_CTB_WPP 2 49 #define MAX_TRANSFORM_DEPTH 5 51 #define MAX_TB_SIZE 32 53 #define DEFAULT_INTRA_TC_OFFSET 2 55 #define HEVC_CONTEXTS 199 56 #define HEVC_STAT_COEFFS 4 58 #define MRG_MAX_NUM_CANDS 5 63 #define EPEL_EXTRA_BEFORE 1 64 #define EPEL_EXTRA_AFTER 2 66 #define QPEL_EXTRA_BEFORE 3 67 #define QPEL_EXTRA_AFTER 4 70 #define EDGE_EMU_BUFFER_STRIDE 80 75 #define SAMPLE(tab, x, y) ((tab)[(y) * s->sps->width + (x)]) 76 #define SAMPLE_CTB(tab, x, y) ((tab)[(y) * min_cb_width + (x)]) 78 #define IS_IDR(s) ((s)->nal_unit_type == HEVC_NAL_IDR_W_RADL || (s)->nal_unit_type == HEVC_NAL_IDR_N_LP) 79 #define IS_BLA(s) ((s)->nal_unit_type == HEVC_NAL_BLA_W_RADL || (s)->nal_unit_type == HEVC_NAL_BLA_W_LP || \ 80 (s)->nal_unit_type == HEVC_NAL_BLA_N_LP) 81 #define IS_IRAP(s) ((s)->nal_unit_type >= 16 && (s)->nal_unit_type <= 23) 274 unsigned int list_entry_lx[2][32];
313 int16_t luma_weight_l0[16];
314 int16_t chroma_weight_l0[16][2];
315 int16_t chroma_weight_l1[16][2];
316 int16_t luma_weight_l1[16];
318 int16_t luma_offset_l0[16];
319 int16_t chroma_offset_l0[16][2];
321 int16_t luma_offset_l1[16];
322 int16_t chroma_offset_l1[16][2];
391 #define HEVC_FRAME_FLAG_OUTPUT (1 << 0) 392 #define HEVC_FRAME_FLAG_SHORT_REF (1 << 1) 393 #define HEVC_FRAME_FLAG_LONG_REF (1 << 2) 394 #define HEVC_FRAME_FLAG_BUMPING (1 << 3) 459 #define BOUNDARY_LEFT_SLICE (1 << 0) 460 #define BOUNDARY_LEFT_TILE (1 << 1) 461 #define BOUNDARY_UPPER_SLICE (1 << 2) 462 #define BOUNDARY_UPPER_TILE (1 << 3) 668 int nPbW,
int nPbH,
int log2_cb_size,
669 int part_idx,
int merge_idx,
MvField *
mv);
671 int nPbW,
int nPbH,
int log2_cb_size,
672 int part_idx,
int merge_idx,
673 MvField *mv,
int mvp_lx_flag,
int LX);
677 int log2_trafo_size);
685 int log2_trafo_size,
enum ScanType scan_idx,
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
This structure describes decoded (raw) audio or video data.
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
static void flush(AVCodecContext *avctx)
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
const uint8_t ff_hevc_qpel_extra_before[4]
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
int16_t x
horizontal component of motion vector
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
void * hwaccel_picture_private
uint8_t intra_split_flag
IntraSplitFlag.
int rem_intra_luma_pred_mode
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
int ff_hevc_mpm_idx_decode(HEVCContext *s)
uint16_t seq_decode
Sequence counters for decoded and output frames, so that old frames are output first after a POC rese...
HEVCNALUnitType
Table 7-1 – NAL unit type codes and NAL unit type classes in T-REC-H.265-201802. ...
void ff_hevc_bump_frame(HEVCContext *s)
AVBufferPool * rpl_tab_pool
candidate references for the current frame
int is_nalff
this flag is != 0 if bitstream is encapsulated as a format defined in 14496-15
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
int ff_hevc_output_frame(HEVCContext *s, AVFrame *frame, int flush)
Find next frame in output order and put a reference to it in frame.
uint8_t ctb_up_right_flag
Multithreading support functions.
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
int ff_hevc_sao_band_position_decode(HEVCContext *s)
uint8_t poc_msb_present[32]
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
bitstream reader API header.
uint8_t cu_transquant_bypass_flag
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
int temporal_id
temporal_id_plus1 - 1
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
int slice_idx
number of the slice being currently decoded
uint8_t slice_initialized
1 if the independent slice segment header was successfully parsed
RefPicList * ff_hevc_get_ref_list(HEVCContext *s, HEVCFrame *frame, int x0, int y0)
AVBufferRef * rpl_tab_buf
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
uint8_t * sao_pixel_buffer_h[3]
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
uint8_t max_trafo_depth
MaxTrafoDepth.
HEVCLocalContext ** HEVClcList
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
AVBufferRef * tab_mvf_buf
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
uint8_t * sao_pixel_buffer_v[3]
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
const uint8_t ff_hevc_qpel_extra[4]
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
struct HEVCFrame * collocated_ref
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
int ff_hevc_merge_flag_decode(HEVCContext *s)
static const int8_t mv[256][2]
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
Libavcodec external API header.
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
main external API structure.
AVBufferRef * hwaccel_priv_buf
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
int16_t y
vertical component of motion vector
#define EDGE_EMU_BUFFER_STRIDE
int ff_hevc_pred_mode_decode(HEVCContext *s)
uint8_t * checksum_buf
used on BE to byteswap the lines for checksumming
Describe the class of an AVClass context structure.
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
struct HEVCContext ** sList
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
int enable_parallel_tiles
int last_eos
last packet contains an EOS/EOB NAL
refcounted data buffer API
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts, int thread)
int ff_hevc_pcm_flag_decode(HEVCContext *s)
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
HEVCLocalContext * HEVClc
#define flags(name, subs,...)
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
uint8_t stat_coeff[HEVC_STAT_COEFFS]
const uint8_t ff_hevc_qpel_extra_after[4]
int eos
current packet contains an EOS/EOB NAL
A reference to a data buffer.
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
common internal api header.
static int ref[MAX_W *MAX_W]
int32_t * tab_slice_address
uint8_t * filter_slice_edges
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
AVBufferPool * tab_mvf_pool
Core video DSP helper functions.
int ff_hevc_merge_idx_decode(HEVCContext *s)
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
Decoded Picture Buffer (DPB).
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Public header for MD5 hash function implementation.
uint8_t context_initialized
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
Context Adaptive Binary Arithmetic Coder.