38 dst->PicIdx = cf ? cf->idx : -1;
39 dst->FrameIdx = frame_idx;
41 dst->not_existing = 0;
42 dst->used_for_reference = src->
reference & 3;
43 dst->FieldOrderCnt[0] = src->
field_poc[0];
44 dst->FieldOrderCnt[1] = src->
field_poc[1];
56 CUVIDH264PICPARAMS *ppc = &pp->CodecSpecific.h264;
69 *pp = (CUVIDPICPARAMS) {
72 .CurrPicIdx = cf->
idx,
79 .CodecSpecific.h264 = {
92 .entropy_coding_mode_flag = pps->
cabac,
94 .num_ref_idx_l0_active_minus1 = pps->
ref_count[0] - 1,
95 .num_ref_idx_l1_active_minus1 = pps->
ref_count[1] - 1,
98 .pic_init_qp_minus26 = pps->
init_qp - 26,
113 memcpy(ppc->WeightScale4x4, pps->
scaling_matrix4,
sizeof(ppc->WeightScale4x4));
114 memcpy(ppc->WeightScale8x8[0], pps->
scaling_matrix8[0],
sizeof(ppc->WeightScale8x8[0]));
115 memcpy(ppc->WeightScale8x8[1], pps->
scaling_matrix8[3],
sizeof(ppc->WeightScale8x8[0]));
120 for (i = 0; i < 16; i++) {
126 ppc->dpb[i].PicIdx = -1;
159 pp->intra_pic_flag = 0;
173 .
name =
"h264_nvdec",
int long_ref
1->long term reference 0->short term reference
int chroma_qp_index_offset[2]
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
int ff_nvdec_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx, int dpb_size, int supports_444)
static int nvdec_h264_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
#define FF_ARRAY_ELEMS(a)
H264Picture * long_ref[32]
static void dpb_add(const H264Context *h, CUVIDH264DPBENTRY *dst, const H264Picture *src, int frame_idx)
uint8_t scaling_matrix4[6][16]
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
int bit_depth_chroma
bit_depth_chroma_minus8 + 8
CUVIDPICPARAMS pic_params
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
int cabac
entropy_coding_mode_flag
#define PICT_BOTTOM_FIELD
int transform_bypass
qpprime_y_zero_transform_bypass_flag
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
int mb_aff
mb_adaptive_frame_field_flag
int poc_type
pic_order_cnt_type
int constrained_intra_pred
constrained_intra_pred_flag
int ff_nvdec_end_frame(AVCodecContext *avctx)
int ff_nvdec_start_frame(AVCodecContext *avctx, AVFrame *frame)
int weighted_pred
weighted_pred_flag
int frame_num
frame_num (raw frame_num from slice header)
int residual_color_transform_flag
residual_colour_transform_flag
int delta_pic_order_always_zero_flag
uint8_t scaling_matrix8[6][64]
int ff_nvdec_decode_init(AVCodecContext *avctx)
static int nvdec_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
int ref_frame_count
num_ref_frames
const char * name
Name of the hardware accelerated codec.
int init_qp
pic_init_qp_minus26 + 26
H.264 / AVC / MPEG-4 part10 codec.
H264SliceContext * slice_ctx
int direct_8x8_inference_flag
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
HW acceleration through CUDA.
int pic_order_present
pic_order_present_flag
Libavcodec external API header.
unsigned int bitstream_allocated
H264Picture * short_ref[32]
int field_poc[2]
top/bottom POC
main external API structure.
int ff_nvdec_decode_uninit(AVCodecContext *avctx)
uint8_t * data
The data buffer.
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
unsigned int slice_offsets_allocated
H264Picture * cur_pic_ptr
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
int transform_8x8_mode
transform_8x8_mode_flag
const AVHWAccel ff_h264_nvdec_hwaccel
A reference to a data buffer.
static int nvdec_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
common internal api header.
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
void * hwaccel_priv_data
hwaccel-specific private data
int bit_depth_luma
bit_depth_luma_minus8 + 8
struct AVCodecInternal * internal
Private context used for internal data.
void * hwaccel_priv
Per-frame private data for hwaccels.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int short_ref_count
number of actual short term references