Go to the documentation of this file.
24 #include "config_components.h"
50 case VDP_STATUS_NO_IMPLEMENTATION:
52 case VDP_STATUS_DISPLAY_PREEMPTED:
54 case VDP_STATUS_INVALID_HANDLE:
56 case VDP_STATUS_INVALID_POINTER:
58 case VDP_STATUS_RESOURCES:
60 case VDP_STATUS_HANDLE_DEVICE_MISMATCH:
62 case VDP_STATUS_ERROR:
74 #define MAKE_ACCESSORS(str, name, type, field) \
75 type av_##name##_get_##field(const str *s) { return s->field; } \
76 void av_##name##_set_##field(str *s, type v) { s->field = v; }
84 uint32_t
w = avctx->coded_width;
85 uint32_t
h = avctx->coded_height;
88 switch (avctx->sw_pix_fmt) {
93 t = VDP_CHROMA_TYPE_420;
99 t = VDP_CHROMA_TYPE_422;
107 t = VDP_CHROMA_TYPE_444;
147 VdpVideoSurfaceQueryCapabilities *surface_query_caps;
148 VdpDecoderQueryCapabilities *decoder_query_caps;
150 VdpGetInformationString *
info;
151 const char *info_string;
155 uint32_t max_level, max_mb, max_width, max_height;
161 vdctx->
width = UINT32_MAX;
162 vdctx->
height = UINT32_MAX;
173 vdctx->
device = VDP_INVALID_HANDLE;
184 type != VDP_CHROMA_TYPE_420)
208 VDP_FUNC_ID_GET_INFORMATION_STRING,
210 if (
status != VDP_STATUS_OK)
216 if (
status != VDP_STATUS_OK)
220 int driver_version = 0;
221 sscanf(info_string,
"NVIDIA VDPAU Driver Shared Library %d", &driver_version);
222 if (driver_version < 410) {
229 VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES,
231 if (
status != VDP_STATUS_OK)
234 surface_query_caps =
func;
237 &max_width, &max_height);
238 if (
status != VDP_STATUS_OK)
240 if (supported != VDP_TRUE ||
245 VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES,
247 if (
status != VDP_STATUS_OK)
250 decoder_query_caps =
func;
253 &max_mb, &max_width, &max_height);
254 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
255 if ((
status != VDP_STATUS_OK || supported != VDP_TRUE) &&
profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) {
256 profile = VDP_DECODER_PROFILE_H264_MAIN;
259 &max_width, &max_height);
262 if (
status != VDP_STATUS_OK)
265 if (supported != VDP_TRUE || max_level <
level ||
271 if (
status != VDP_STATUS_OK)
278 if (
status != VDP_STATUS_OK)
285 if (
status == VDP_STATUS_OK) {
300 if (vdctx->
device == VDP_INVALID_HANDLE)
302 if (vdctx->
width == UINT32_MAX && vdctx->
height == UINT32_MAX)
306 VDP_FUNC_ID_DECODER_DESTROY, &
func);
307 if (
status != VDP_STATUS_OK)
321 if (vdctx->
device == VDP_INVALID_HANDLE)
367 #if CONFIG_MPEG1_VDPAU_HWACCEL || \
368 CONFIG_MPEG2_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL || \
369 CONFIG_VC1_VDPAU_HWACCEL || CONFIG_WMV3_VDPAU_HWACCEL
373 Picture *pic =
s->current_picture_ptr;
387 const uint8_t *buf, uint32_t
size)
399 buffers->struct_version = VDP_BITSTREAM_BUFFER_VERSION;
400 buffers->bitstream = buf;
401 buffers->bitstream_bytes =
size;
411 VdpGetProcAddress *get_proc,
unsigned flags)
423 memset(hwctx, 0,
sizeof(*hwctx));
int(* func)(AVBPrint *dst, const char *in, const char *arg)
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
void * hwaccel_context
Hardware accelerator context.
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void destroy(struct ResampleContext **c)
uint8_t * data
The data buffer.
static int ff_vdpau_common_reinit(AVCodecContext *avctx)
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
This structure describes decoded (raw) audio or video data.
VdpGetProcAddress * get_proc_address
int bitstream_buffers_used
Useful bitstream buffers in the bitstream buffers table.
#define AV_PIX_FMT_YUV420P10
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
VdpGetProcAddress * get_proc_address
This struct is allocated as AVHWDeviceContext.hwctx.
#define AV_LOG_VERBOSE
Detailed information.
int width
The allocated dimensions of the frames in this pool.
VdpDecoderRender * render
VDPAU decoder render callback.
#define AV_HWACCEL_FLAG_IGNORE_LEVEL
Hardware acceleration should be used for decoding even if the codec level used is unknown or higher t...
This structure is used to share data between the libavcodec library and the client video application.
int refs
number of reference frames
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
#define AV_PIX_FMT_YUV444P10
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, VdpGetProcAddress *get_proc, unsigned flags)
Associate a VDPAU device with a codec context for hardware acceleration.
#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH
Hardware acceleration can output YUV pixel formats with a different chroma sampling than 4:2:0 and/or...
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
void * hwaccel_picture_private
Hardware accelerator private data.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
static struct ResampleContext * create(struct ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta, double precision, int cheby, int exact_rational)
struct AVCodecInternal * internal
Private context used for internal data.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH
Hardware acceleration should still be attempted for decoding when the codec profile does not match th...
#define MAKE_ACCESSORS(str, name, type, field)
VdpDecoderRender * render
VDPAU decoder render callback.
int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, uint32_t *width, uint32_t *height)
Gets the parameters to create an adequate VDPAU video surface for the codec context using VDPAU hardw...
VdpDevice device
VDPAU device handle.
int ff_vdpau_common_uninit(AVCodecContext *avctx)
VdpDecoder decoder
VDPAU decoder handle.
void * hwaccel_priv_data
hwaccel-specific private data
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
VdpDecoder decoder
VDPAU decoder handle.
#define AV_PIX_FMT_YUV444P12
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
int hwaccel_flags
Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated decoding (if active).
VdpBitstreamBuffer * bitstream_buffers
Table of bitstream buffers.
AVVDPAUContext * av_vdpau_alloc_context(void)
Allocate an AVVDPAUContext.
int(* AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, const VdpPictureInfo *, uint32_t, const VdpBitstreamBuffer *)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
This struct describes a set or pool of "hardware" frames (i.e.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
VdpGetProcAddress * get_proc_address
VDPAU device driver.
#define AV_PIX_FMT_YUV420P12
int bitstream_buffers_allocated
Allocated size of the bitstream_buffers table.
main external API structure.
union VDPAUPictureInfo info
VDPAU picture information.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int vdpau_error(VdpStatus status)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
A reference to a data buffer.
AVVDPAUContext * av_alloc_vdpaucontext(void)
allocation function for AVVDPAUContext
#define flags(name, subs,...)
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.