24 #include <vdpau/vdpau.h> 44 rf->surface = VDP_INVALID_HANDLE;
45 rf->is_long_term = VDP_FALSE;
46 rf->top_is_reference = VDP_FALSE;
47 rf->bottom_is_reference = VDP_FALSE;
48 rf->field_order_cnt[0] = 0;
49 rf->field_order_cnt[1] = 0;
58 if (pic_structure == 0)
61 rf->surface = surface;
77 VdpReferenceFrameH264 *rf = &info->referenceFrames[0];
78 #define H264_RF_COUNT FF_ARRAY_ELEMS(info->referenceFrames) 80 for (list = 0; list < 2; ++
list) {
84 for (i = 0; i < ls; ++
i) {
86 VdpReferenceFrameH264 *rf2;
87 VdpVideoSurface surface_ref;
95 rf2 = &info->referenceFrames[0];
97 if ((rf2->surface == surface_ref) &&
98 (rf2->is_long_term == pic->
long_ref) &&
99 (rf2->frame_idx == pic_frame_idx))
130 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE 131 VdpPictureInfoH264Predictive *info2 = &pic_ctx->
info.h264_predictive;
135 info->slice_count = 0;
143 info->mb_adaptive_frame_field_flag = sps->
mb_aff && !info->field_pic_flag;
151 info->pic_init_qp_minus26 = pps->
init_qp - 26;
152 info->num_ref_idx_l0_active_minus1 = pps->
ref_count[0] - 1;
153 info->num_ref_idx_l1_active_minus1 = pps->
ref_count[1] - 1;
155 info->pic_order_cnt_type = sps->
poc_type;
159 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE 163 info->entropy_coding_mode_flag = pps->
cabac;
169 sizeof(info->scaling_lists_4x4));
171 sizeof(info->scaling_lists_8x8[0]));
173 sizeof(info->scaling_lists_8x8[1]));
225 profile = VDP_DECODER_PROFILE_H264_BASELINE;
228 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE 229 profile = VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE;
233 profile = VDP_DECODER_PROFILE_H264_MAIN;
236 profile = VDP_DECODER_PROFILE_H264_HIGH;
238 #ifdef VDP_DECODER_PROFILE_H264_EXTENDED 240 profile = VDP_DECODER_PROFILE_H264_EXTENDED;
246 profile = VDP_DECODER_PROFILE_H264_HIGH;
248 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE 252 profile = VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE;
260 level = VDP_DECODER_LEVEL_H264_1b;
266 .
name =
"h264_vdpau",
#define FF_PROFILE_H264_MAIN
int long_ref
1->long term reference 0->short term reference
#define FF_PROFILE_H264_CAVLC_444
int chroma_qp_index_offset[2]
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
#define FF_PROFILE_H264_INTRA
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Public libavcodec VDPAU header.
static void vdpau_h264_clear_rf(VdpReferenceFrameH264 *rf)
H264Picture * long_ref[32]
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
uint8_t scaling_matrix4[6][16]
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
#define FF_PROFILE_H264_BASELINE
int ff_vdpau_common_uninit(AVCodecContext *avctx)
const AVHWAccel ff_h264_vdpau_hwaccel
int cabac
entropy_coding_mode_flag
#define PICT_BOTTOM_FIELD
int transform_bypass
qpprime_y_zero_transform_bypass_flag
static int vdpau_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
#define FF_PROFILE_H264_EXTENDED
static void vdpau_h264_set_rf(VdpReferenceFrameH264 *rf, H264Picture *pic, int pic_structure)
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
static int32_t h264_foc(int foc)
H.264 parameter set handling.
int mb_aff
mb_adaptive_frame_field_flag
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
int poc_type
pic_order_cnt_type
int constrained_intra_pred
constrained_intra_pred_flag
void * hwaccel_picture_private
hardware accelerator private data
#define FF_PROFILE_H264_HIGH_422
#define FF_PROFILE_H264_HIGH
int weighted_pred
weighted_pred_flag
int frame_num
frame_num (raw frame_num from slice header)
int residual_color_transform_flag
residual_colour_transform_flag
int delta_pic_order_always_zero_flag
uint8_t scaling_matrix8[6][64]
#define HWACCEL_CAP_ASYNC_SAFE
int ref_frame_count
num_ref_frames
const char * name
Name of the hardware accelerated codec.
static int vdpau_h264_end_frame(AVCodecContext *avctx)
static int vdpau_h264_init(AVCodecContext *avctx)
static void vdpau_h264_set_reference_frames(AVCodecContext *avctx)
int init_qp
pic_init_qp_minus26 + 26
H.264 / AVC / MPEG-4 part10 codec.
H264SliceContext * slice_ctx
int direct_8x8_inference_flag
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
int pic_order_present
pic_order_present_flag
Libavcodec external API header.
static const uint8_t start_code_prefix[3]
H264Picture * short_ref[32]
int field_poc[2]
top/bottom POC
main external API structure.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
H264Picture * cur_pic_ptr
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
int transform_8x8_mode
transform_8x8_mode_flag
int pic_id
pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) ...
common internal api header.
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
static int vdpau_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
union VDPAUPictureInfo info
VDPAU picture information.
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define FF_PROFILE_H264_HIGH_10
static double val(void *priv, double ch)
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
int short_ref_count
number of actual short term references