Go to the documentation of this file.
52 #define TL_ADD(t, s) do { \
53 av_assert0(l->nb_tabs < TAB_MAX); \
54 l->tabs[l->nb_tabs].tab = (void**)&fc->tab.t; \
55 l->tabs[l->nb_tabs].size = sizeof(*fc->tab.t) * (s); \
97 const int ctu_count =
pps ?
pps->ctb_count : 0;
98 const int changed =
fc->tab.sz.ctu_count != ctu_count;
102 TL_ADD(deblock, ctu_count);
112 const int ctu_size =
sps ? (1 <<
sps->ctb_log2_size_y <<
sps->ctb_log2_size_y) : 0;
113 const int ctu_count =
pps ?
pps->ctb_count : 0;
114 const int changed =
fc->tab.sz.ctu_count != ctu_count ||
fc->tab.sz.ctu_size != ctu_size;
117 TL_ADD(slice_idx, ctu_count);
124 const int pic_size_in_min_cb =
pps ?
pps->min_cb_width *
pps->min_cb_height : 0;
125 const int changed =
fc->tab.sz.pic_size_in_min_cb != pic_size_in_min_cb;
131 TL_ADD(imtf, pic_size_in_min_cb);
132 TL_ADD(imm, pic_size_in_min_cb);
133 TL_ADD(ipm, pic_size_in_min_cb);
136 TL_ADD(cb_pos_x[
i], pic_size_in_min_cb);
137 TL_ADD(cb_pos_y[
i], pic_size_in_min_cb);
138 TL_ADD(cb_width[
i], pic_size_in_min_cb);
139 TL_ADD(cb_height[
i], pic_size_in_min_cb);
140 TL_ADD(cqt_depth[
i], pic_size_in_min_cb);
141 TL_ADD(cpm[
i], pic_size_in_min_cb);
149 const int pic_size_in_min_pu =
pps ?
pps->min_pu_width *
pps->min_pu_height : 0;
150 const int changed =
fc->tab.sz.pic_size_in_min_pu != pic_size_in_min_pu;
154 TL_ADD(msf, pic_size_in_min_pu);
155 TL_ADD(iaf, pic_size_in_min_pu);
156 TL_ADD(mmi, pic_size_in_min_pu);
157 TL_ADD(mvf, pic_size_in_min_pu);
163 const int pic_size_in_min_tu =
pps ?
pps->min_tu_width *
pps->min_tu_height : 0;
164 const int changed =
fc->tab.sz.pic_size_in_min_tu != pic_size_in_min_tu;
168 TL_ADD(tu_joint_cbcr_residual_flag, pic_size_in_min_tu);
170 TL_ADD(tb_pos_x0[
i], pic_size_in_min_tu);
171 TL_ADD(tb_pos_y0[
i], pic_size_in_min_tu);
172 TL_ADD(tb_width[
i], pic_size_in_min_tu);
173 TL_ADD(tb_height[
i], pic_size_in_min_tu);
174 TL_ADD(pcmf[
i], pic_size_in_min_tu);
178 TL_ADD(tu_coded_flag[
i], pic_size_in_min_tu);
179 TL_ADD(qp[
i], pic_size_in_min_tu);
186 const int bs_width =
pps ? (
pps->width >> 2) + 1 : 0;
187 const int bs_height =
pps ? (
pps->height >> 2) + 1 : 0;
188 const int bs_count = bs_width * bs_height;
189 const int changed =
fc->tab.sz.bs_width != bs_width ||
190 fc->tab.sz.bs_height != bs_height;
195 TL_ADD(horizontal_bs[
i], bs_count);
196 TL_ADD(vertical_bs[
i], bs_count);
198 TL_ADD(horizontal_q, bs_count);
199 TL_ADD(horizontal_p, bs_count);
200 TL_ADD(vertical_p, bs_count);
201 TL_ADD(vertical_q, bs_count);
210 const int ctu_width =
pps ?
pps->ctb_width : 0;
211 const int ctu_height =
pps ?
pps->ctb_height : 0;
212 const int chroma_idc =
sps ?
sps->r->sps_chroma_format_idc : 0;
213 const int ps =
sps ?
sps->pixel_shift : 0;
215 const int changed =
fc->tab.sz.chroma_format_idc != chroma_idc ||
217 fc->tab.sz.ctu_width != ctu_width ||
fc->tab.sz.ctu_height != ctu_height;
221 for (
int c_idx = 0; c_idx < c_end; c_idx++) {
224 TL_ADD(sao_pixel_buffer_h[c_idx], (
w * 2 * ctu_height) << ps);
225 TL_ADD(sao_pixel_buffer_v[c_idx], (
h * 2 * ctu_width) << ps);
228 for (
int c_idx = 0; c_idx < c_end; c_idx++) {
232 for (
int i = 0;
i < 2;
i++) {
233 TL_ADD(alf_pixel_buffer_h[c_idx][
i], (
w * border_pixels * ctu_height) << ps);
270 const int ctu_height =
pps ?
pps->ctb_height : 0;
271 const int ctu_size =
sps ?
sps->ctb_size_y : 0;
272 const int ps =
sps ?
sps->pixel_shift : 0;
273 const int chroma_idc =
sps ?
sps->r->sps_chroma_format_idc : 0;
274 const int has_ibc =
sps ?
sps->r->sps_ibc_enabled_flag : 0;
275 const int changed =
fc->tab.sz.chroma_format_idc != chroma_idc ||
276 fc->tab.sz.ctu_height != ctu_height ||
277 fc->tab.sz.ctu_size != ctu_size ||
278 fc->tab.sz.pixel_shift != ps;
285 const int hs =
sps ?
sps->hshift[
i] : 0;
286 const int vs =
sps ?
sps->vshift[
i] : 0;
287 TL_ADD(ibc_vir_buf[
i],
fc->tab.sz.ibc_buffer_width * ctu_size * ctu_height << ps >> hs >> vs);
323 for (
int i = 0;
i <
fc->tab.sz.ctu_count;
i++)
335 memset(&
fc->tab.sz, 0,
sizeof(
fc->tab.sz));
342 const int ctu_count =
pps->ctb_count;
343 const int pic_size_in_min_pu =
pps->min_pu_width *
pps->min_pu_height;
352 memset(
fc->tab.slice_idx, -1,
sizeof(*
fc->tab.slice_idx) * ctu_count);
354 if (
fc->tab.sz.ctu_count != ctu_count) {
357 if (!
fc->rpl_tab_pool)
361 if (
fc->tab.sz.pic_size_in_min_pu != pic_size_in_min_pu) {
365 if (!
fc->tab_dmvr_mvf_pool)
369 fc->tab.sz.ctu_count =
pps->ctb_count;
370 fc->tab.sz.ctu_size = 1 <<
sps->ctb_log2_size_y <<
sps->ctb_log2_size_y;
371 fc->tab.sz.pic_size_in_min_cb =
pps->min_cb_width *
pps->min_cb_height;
372 fc->tab.sz.pic_size_in_min_pu = pic_size_in_min_pu;
373 fc->tab.sz.pic_size_in_min_tu =
pps->min_tu_width *
pps->min_tu_height;
374 fc->tab.sz.width =
pps->width;
375 fc->tab.sz.height =
pps->height;
376 fc->tab.sz.ctu_width =
pps->ctb_width;
377 fc->tab.sz.ctu_height =
pps->ctb_height;
378 fc->tab.sz.chroma_format_idc =
sps->r->sps_chroma_format_idc;
379 fc->tab.sz.pixel_shift =
sps->pixel_shift;
380 fc->tab.sz.bs_width = (
fc->ps.pps->width >> 2) + 1;
381 fc->tab.sz.bs_height = (
fc->ps.pps->height >> 2) + 1;
388 return diff > 0 && (idx < 0 ||
diff < min_diff);
393 return diff < 0 && (idx < 0 || diff > max_diff);
402 const int poc =
fc->ref->poc;
408 if (find(idx,
diff, old_diff)) {
439 for (
int i = 0;
i <
fc->nb_slices_allocated;
i++) {
450 fc->nb_slices_allocated = 0;
457 const int size = (
fc->nb_slices_allocated + 1) * 3 / 2;
459 if (
fc->nb_slices <
fc->nb_slices_allocated)
467 for (
int i =
fc->nb_slices_allocated;
i <
size;
i++) {
469 if (!
fc->slices[
i]) {
470 fc->nb_slices_allocated =
i;
473 fc->slices[
i]->slice_idx =
i;
475 fc->nb_slices_allocated =
size;
488 if (index < rsh->num_entry_points) {
490 int64_t start = (gb->
index >> 3);
516 if (sc->
nb_eps != nb_eps) {
532 for (
int j = ep->
ctu_start; j < ep->ctu_end; j++) {
548 const int size =
s->nb_fcs;
601 if (!
fc->output_frame)
606 if (!
fc->DPB[j].frame)
627 if (
s->nb_frames &&
s->nb_fcs > 1) {
640 s->seq_decode = (
s->seq_decode + 1) & 0xff;
660 s->poc_tid0 =
ph->poc;
698 if (is_first_slice) {
702 }
else if (
fc->ref) {
707 "Error constructing the reference lists for the current slice.\n");
728 c->pix_fmt =
sps->pix_fmt;
729 c->coded_width =
pps->width;
730 c->coded_height =
pps->height;
731 c->width =
pps->width - ((
pps->r->pps_conf_win_left_offset +
pps->r->pps_conf_win_right_offset) <<
sps->hshift[
CHROMA]);
732 c->height =
pps->height - ((
pps->r->pps_conf_win_top_offset +
pps->r->pps_conf_win_bottom_offset) <<
sps->vshift[
CHROMA]);
753 const int is_first_slice = !
fc->nb_slices;
759 sc =
fc->slices[
fc->nb_slices];
761 s->vcl_unit_type = nal->
type;
762 if (is_first_slice) {
786 switch (unit->
type) {
820 int eos_at_start = 1;
821 s->last_eos =
s->eos;
831 for (
int i = 0;
i <
frame->nb_units;
i++) {
844 "Error parsing NAL unit #%d.\n",
i);
892 if (
s->nb_delayed >=
s->nb_fcs) {
902 while (
s->nb_delayed) {
937 fc->decode_order =
s->nb_frames;
958 while (
s->nb_delayed)
977 for (
int i = 0;
i <
s->nb_fcs;
i++)
992 #define VVC_MAX_DELAYED_FRAMES 16
1019 for (
int i = 0;
i <
s->nb_fcs;
i++) {
int(* smvd_find_fxn)(const int idx, const int diff, const int old_diff)
static av_cold void vvc_decode_flush(AVCodecContext *avctx)
#define AV_LOG_WARNING
Something somehow does not look correct.
void * content_ref
If content is reference counted, a RefStruct reference backing content.
uint32_t num_ctus_in_curr_slice
NumCtusInCurrSlice.
static av_cold int frame_context_init(VVCFrameContext *fc, AVCodecContext *avctx)
struct AVFrame * output_frame
static void bs_tl_init(TabList *l, VVCFrameContext *fc)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int max_negtive(const int idx, const int diff, const int max_diff)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static atomic_int cpu_count
uint32_t entry_point_start_ctu[VVC_MAX_ENTRY_POINTS]
entry point start in ctu_addr
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
void ff_vvc_report_frame_finished(VVCFrame *frame)
static int frame_setup(VVCFrameContext *fc, VVCContext *s)
av_cold void ff_cbs_fragment_free(CodedBitstreamFragment *frag)
Free the units array of a fragment in addition to what ff_cbs_fragment_reset does.
static int slice_init_entry_points(SliceContext *sc, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
VVCFrame DPB[VVC_MAX_DPB_SIZE+1]
static av_cold int vvc_decode_init(AVCodecContext *avctx)
static int get_bits_count(const GetBitContext *s)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int FUNC() ph(CodedBitstreamContext *ctx, RWContext *rw, H266RawPH *current)
This structure describes decoded (raw) audio or video data.
void ff_cbs_fragment_reset(CodedBitstreamFragment *frag)
Free the units contained in a fragment as well as the fragment's own data buffer, but not the units a...
static int decode_slice(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
static void tl_init(TabList *l, const int zero, const int realloc)
const H266RawSliceHeader * r
RefStruct reference.
static int vvc_decode_frame(AVCodecContext *avctx, AVFrame *output, int *got_output, AVPacket *avpkt)
int ff_vvc_decode_aps(VVCParamSets *ps, const CodedBitstreamUnit *unit)
CodedBitstreamUnitType type
Codec-specific type of this unit.
int ff_vvc_slice_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
#define fc(width, name, range_min, range_max)
uint8_t ff_vvc_default_scale_m[64 *64]
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static int decode_nal_units(VVCContext *s, VVCFrameContext *fc, AVPacket *avpkt)
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Coded bitstream unit structure.
av_cold void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
static void min_tu_tl_init(TabList *l, VVCFrameContext *fc)
static int submit_frame(VVCContext *s, VVCFrameContext *fc, AVFrame *output, int *got_output)
static void skip_bits(GetBitContext *s, int n)
static void msm_tl_init(TabList *l, VVCFrameContext *fc)
static int frame_context_setup(VVCFrameContext *fc, VVCContext *s)
AVCodec p
The public AVCodec.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
RefPicListTab * rpl
RefStruct reference.
int flags
AV_CODEC_FLAG_*.
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void export_frame_params(VVCContext *s, const VVCFrameContext *fc)
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
static int ff_thread_once(char *control, void(*routine)(void))
void ff_vvc_unref_frame(VVCFrameContext *fc, VVCFrame *frame, int flags)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_CODEC_CAP_EXPERIMENTAL
Codec is experimental and is thus avoided in favor of non experimental encoders.
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
uint8_t flags
A combination of VVC_FRAME_FLAG_*.
Coded bitstream fragment structure, combining one or more units.
#define FF_CODEC_DECODE_CB(func)
#define AV_CEIL_RSHIFT(a, b)
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
#define VVC_MAX_DELAYED_FRAMES
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
static void smvd_ref_idx(const VVCFrameContext *fc, SliceContext *sc)
void ff_vvc_clear_refs(VVCFrameContext *fc)
int ff_vvc_frame_wait(VVCContext *s, VVCFrameContext *fc)
static void eps_free(SliceContext *slice)
static int frame_context_for_each_tl(VVCFrameContext *fc, int(*unary_fn)(TabList *l))
static int min_positive(const int idx, const int diff, const int min_diff)
static void ctu_nz_tl_init(TabList *l, VVCFrameContext *fc)
int ff_vvc_decode_frame_ps(VVCFrameParamSets *fps, struct VVCContext *s)
static void ctu_tl_init(TabList *l, VVCFrameContext *fc)
#define MAX_CONTROL_POINTS
void ff_vvc_flush_dpb(VVCFrameContext *fc)
static int wait_delayed_frame(VVCContext *s, AVFrame *output, int *got_output)
#define GDR_SET_RECOVERED(s)
int ff_vvc_decode_sh(VVCSH *sh, const VVCFrameParamSets *fps, const CodedBitstreamUnit *unit)
static void min_pu_tl_init(TabList *l, VVCFrameContext *fc)
int8_t ref_idx_sym[2]
RefIdxSymL0, RefIdxSymL1.
void ff_vvc_frame_ps_free(VVCFrameParamSets *fps)
static av_cold void init_default_scale_m(void)
const FFCodec ff_vvc_decoder
void ff_vvc_frame_thread_free(VVCFrameContext *fc)
CodedBitstreamH2645Context common
static void pic_arrays_free(VVCFrameContext *fc)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_vvc_ctu_free_cus(CTU *ctu)
void ff_vvc_ps_uninit(VVCParamSets *ps)
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
int ff_vvc_output_frame(VVCContext *s, VVCFrameContext *fc, AVFrame *out, const int no_output_of_prior_pics_flag, int flush)
static int pic_arrays_init(VVCContext *s, VVCFrameContext *fc)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int isLongTerm[HEVC_MAX_REFS]
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
static int tl_free(TabList *l)
static void ibc_tl_init(TabList *l, VVCFrameContext *fc)
void * ref
RefStruct reference, backing slice data.
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
#define ALF_BORDER_CHROMA
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
struct MvField * tab_dmvr_mvf
RefStruct reference.
const uint32_t * ctb_addr_in_curr_slice
CtbAddrInCurrSlice.
int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const AVCodecContext *avctx)
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
#define FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
static int get_decoded_frame(VVCContext *s, AVFrame *output, int *got_output)
static void min_cb_tl_init(TabList *l, VVCFrameContext *fc)
#define i(width, name, range_min, range_max)
static void ispmf_tl_init(TabList *l, VVCFrameContext *fc)
static void pixel_buffer_nz_tl_init(TabList *l, VVCFrameContext *fc)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
struct FrameProgress * progress
RefStruct reference.
void(* tl_init_fn)(TabList *l, VVCFrameContext *fc)
int ff_vvc_frame_thread_init(VVCFrameContext *fc)
static void slices_free(VVCFrameContext *fc)
void ff_vvc_dsp_init(VVCDSPContext *vvcdsp, int bit_depth)
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static av_cold int vvc_decode_free(AVCodecContext *avctx)
void * av_calloc(size_t nmemb, size_t size)
static int decode_nal_unit(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
static int8_t smvd_find(const VVCFrameContext *fc, const SliceContext *sc, int lx, smvd_find_fxn find)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold void frame_context_free(VVCFrameContext *fc)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
void ff_vvc_executor_free(AVExecutor **e)
main external API structure.
void ff_vvc_frame_submit(VVCContext *s, VVCFrameContext *fc)
static int slices_realloc(VVCFrameContext *fc)
static void ep_init_cabac_decoder(SliceContext *sc, const int index, const H2645NAL *nal, GetBitContext *gb, const CodedBitstreamUnit *unit)
static int frame_start(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
static VVCFrameContext * get_frame_context(const VVCContext *s, const VVCFrameContext *fc, const int delta)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int ff_vvc_frame_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
int ff_cbs_read_packet(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const AVPacket *pkt)
Read the data bitstream from a packet into a fragment, then split into units and decompose.
const AVProfile ff_vvc_profiles[]
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
static int tl_create(TabList *l)
av_cold int ff_cbs_init(CodedBitstreamContext **ctx_ptr, enum AVCodecID codec_id, void *log_ctx)
Create and initialise a new context for the given codec.
This structure stores compressed data.
static int set_output_format(const VVCContext *s, const AVFrame *output)
static void free_cus(VVCFrameContext *fc)
RefPicListTab ** rpl_tab
RefStruct reference.
AVExecutor * ff_vvc_executor_alloc(VVCContext *s, const int thread_count)
int ff_vvc_set_new_ref(VVCContext *s, VVCFrameContext *fc, AVFrame **frame)
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
void ff_vvc_bump_frame(VVCContext *s, VVCFrameContext *fc)