Go to the documentation of this file.
29 #define START_CODE 0x000001
30 #define IS_IDR(nut) (nut == VVC_IDR_W_RADL || nut == VVC_IDR_N_LP)
31 #define IS_H266_SLICE(nut) (nut <= VVC_RASL_NUT || (nut >= VVC_IDR_W_RADL && nut <= VVC_GDR_NUT))
73 switch (
sps->sps_bitdepth_minus8) {
93 for (
i = 0;
i < buf_size;
i++) {
101 code_len = ((pc->
state64 >> 3 * 8) & 0xFFFFFFFF) == 0x01 ? 4 : 3;
103 nut = (pc->
state64 >> (8 + 3)) & 0x1F;
112 return i - (code_len + 2);
115 int sh_picture_header_in_slice_header_flag = buf[
i] >> 7;
117 if (nut ==
VVC_PH_NUT || sh_picture_header_in_slice_header_flag) {
122 return i - (code_len + 2);
152 static const uint8_t h266_sub_width_c[] = {
155 static const uint8_t h266_sub_height_c[] = {
171 s->coded_width =
pps->pps_pic_width_in_luma_samples;
172 s->coded_height =
pps->pps_pic_height_in_luma_samples;
173 s->width =
pps->pps_pic_width_in_luma_samples -
174 (
pps->pps_conf_win_left_offset +
pps->pps_conf_win_right_offset) *
175 h266_sub_width_c[
sps->sps_chroma_format_idc];
176 s->height =
pps->pps_pic_height_in_luma_samples -
177 (
pps->pps_conf_win_top_offset +
pps->pps_conf_win_bottom_offset) *
178 h266_sub_height_c[
sps->sps_chroma_format_idc];
180 avctx->
profile =
sps->profile_tier_level.general_profile_idc;
181 avctx->
level =
sps->profile_tier_level.general_level_idc;
189 if (
sps->sps_ptl_dpb_hrd_params_present_flag &&
190 sps->sps_timing_hrd_params_present_flag) {
191 uint32_t num =
sps->sps_general_timing_hrd_parameters.num_units_in_tick;
192 uint32_t den =
sps->sps_general_timing_hrd_parameters.time_scale;
194 if (num != 0 && den != 0)
208 int poc_msb, max_poc_lsb, poc_lsb;
210 max_poc_lsb = 1 << (
sps->sps_log2_max_pic_order_cnt_lsb_minus4 + 4);
211 poc_lsb =
ph->ph_pic_order_cnt_lsb;
213 if (
ph->ph_poc_msb_cycle_present_flag)
214 poc_msb =
ph->ph_poc_msb_cycle_val * max_poc_lsb;
219 int prev_poc_lsb = prev_poc & (max_poc_lsb - 1);
220 int prev_poc_msb = prev_poc - prev_poc_lsb;
221 if (
ph->ph_poc_msb_cycle_present_flag) {
222 poc_msb =
ph->ph_poc_msb_cycle_val * max_poc_lsb;
224 if ((poc_lsb < prev_poc_lsb) && ((prev_poc_lsb - poc_lsb) >=
226 poc_msb = prev_poc_msb + (unsigned)max_poc_lsb;
227 else if ((poc_lsb > prev_poc_lsb) && ((poc_lsb - prev_poc_lsb) >
229 poc_msb = prev_poc_msb - (unsigned)max_poc_lsb;
231 poc_msb = prev_poc_msb;
235 *poc = poc_msb + poc_lsb;
283 info->ph = &
ph->ph_picture_header;
286 if (
info->slice->header.sh_picture_header_in_slice_header_flag)
287 info->ph = &
info->slice->header.sh_picture_header;
290 "can't find picture header in picture unit.\n");
302 info->pps = h266->
pps[
info->ph->ph_pic_parameter_set_id];
305 info->ph->ph_pic_parameter_set_id);
309 info->sps = h266->
sps[
info->pps->pps_seq_parameter_set_id];
312 info->pps->pps_seq_parameter_set_id);
361 if ((
ret = ff_cbs_read(
ctx->cbc, pu,
NULL, buf, buf_size)) < 0) {
378 ff_cbs_fragment_reset(pu);
392 const uint8_t **buf,
int *buf_size)
397 ctx->cbc->log_ctx = avctx;
402 if (
ctx->last_au.size) {
403 *buf =
ctx->last_au.data;
404 *buf_size =
ctx->last_au.size;
414 const uint8_t **poutbuf,
int *poutbuf_size,
415 const uint8_t *buf,
int buf_size)
422 int is_dummy_buf = !buf_size;
423 int flush = !buf_size;
424 const uint8_t *dummy_buf = buf;
430 ctx->parsed_extradata = 1;
432 ret = ff_cbs_read_extradata_from_codec(
ctx->cbc, pu, avctx);
436 ff_cbs_fragment_reset(pu);
447 is_dummy_buf &= (dummy_buf == buf);
460 *poutbuf_size = buf_size;
503 ff_cbs_fragment_free(&
ctx->picture_unit);
505 ff_cbs_close(&
ctx->cbc);
static void error(const char *err)
static void au_detector_init(AuDetector *d)
#define START_CODE
start_code_prefix_one_3bytes
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
const H266RawPictureHeader * ph
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
AVColorTransferCharacteristic
Color Transfer Characteristic.
void * content
Pointer to the decomposed form of this unit.
int av_grow_packet(AVPacket *pkt, int grow_by)
Increase packet size, correctly zeroing padding.
static int FUNC() ph(CodedBitstreamContext *ctx, RWContext *rw, H266RawPH *current)
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
Context structure for coded bitstream operations.
static const CodedBitstreamUnitType decompose_unit_types[]
#define AV_PIX_FMT_YUV420P10
CodedBitstreamUnitType type
Codec-specific type of this unit.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
static int vvc_parser_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size)
Coded bitstream unit structure.
static av_cold void close(AVCodecParserContext *s)
H266RawPPS * pps[VVC_MAX_PPS_COUNT]
RefStruct references.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
CodedBitstreamFragment picture_unit
#define AV_PIX_FMT_YUV444P10
CodedBitstreamUnit * units
Pointer to an array of units of length nb_units_allocated.
@ AV_PICTURE_STRUCTURE_FRAME
coded as frame
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
Coded bitstream fragment structure, combining one or more units.
uint32_t CodedBitstreamUnitType
The codec-specific type of a bitstream unit.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAY10
static int is_au_start(VVCParserContext *s, const PuInfo *pu, void *log_ctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
@ AV_PICTURE_TYPE_I
Intra.
void(* flush)(AVBSFContext *ctx)
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
static int parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size)
int level
Encoding level descriptor.
static void get_slice_poc(VVCParserContext *s, int *poc, const H266RawSPS *sps, const H266RawPictureHeader *ph, const H266RawSliceHeader *slice, void *log_ctx)
static int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf, int buf_size, AVCodecContext *avctx)
Parse NAL units of found picture and decode some basic information.
static enum AVPixelFormat pix_fmts_10bit[]
int(* init)(AVBSFContext *ctx)
static enum AVPixelFormat pix_fmts_8bit[]
static int combine_au(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **buf, int *buf_size)
Combine PU to AU.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
CodedBitstreamContext * cbc
const H266RawSlice * slice
const FFCodecParser ff_vvc_parser
#define PARSER_FLAG_COMPLETE_FRAMES
#define i(width, name, range_min, range_max)
AVColorSpace
YUV colorspace type.
H266RawSPS * sps[VVC_MAX_SPS_COUNT]
RefStruct references.
#define PARSER_CODEC_LIST(...)
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define IS_H266_SLICE(nut)
static av_cold int vvc_parser_init(AVCodecParserContext *s)
static int get_pict_type(const CodedBitstreamFragment *pu)
static int append_au(AVPacket *pkt, const uint8_t *buf, int buf_size)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
H266RawSliceHeader header
main external API structure.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
static av_cold void vvc_parser_close(AVCodecParserContext *s)
static void set_parser_ctx(AVCodecParserContext *s, AVCodecContext *avctx, const PuInfo *pu)
uint64_t state64
contains the last 8 bytes in MSB order
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
This structure stores compressed data.
static int get_format(const H266RawSPS *sps)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int find_frame_end(AVCodecParserContext *s, const uint8_t *buf, int buf_size)
Find the end of the current frame in the bitstream.
void * priv_data
Format private data.
static int get_pu_info(PuInfo *info, const CodedBitstreamH266Context *h266, const CodedBitstreamFragment *pu, void *logctx)
int nb_units
Number of units in this fragment.