Go to the documentation of this file.
57 #if CONFIG_VIDEOTOOLBOX
129 if (
ctx->internal->hw_type->device_uninit)
130 ctx->internal->hw_type->device_uninit(
ctx);
169 if (!
ctx->internal->priv)
188 ctx->internal->hw_type = hw_type;
206 if (
ctx->internal->hw_type->device_init) {
207 ret =
ctx->internal->hw_type->device_init(
ctx);
214 if (
ctx->internal->hw_type->device_uninit)
215 ctx->internal->hw_type->device_uninit(
ctx);
229 if (
ctx->internal->pool_internal)
232 if (
ctx->internal->hw_type->frames_uninit)
233 ctx->internal->hw_type->frames_uninit(
ctx);
263 if (hw_type->frames_priv_size) {
265 if (!
ctx->internal->priv)
269 if (hw_type->frames_hwctx_size) {
286 ctx->device_ref = device_ref;
287 ctx->device_ctx = device_ctx;
291 ctx->internal->hw_type = hw_type;
316 for (
i = 0;
i <
ctx->initial_pool_size;
i++) {
327 for (
i = 0;
i <
ctx->initial_pool_size;
i++)
340 if (
ctx->internal->source_frames) {
352 "The hardware pixel format '%s' is not supported by the device type '%s'\n",
363 if (
ctx->internal->hw_type->frames_init) {
364 ret =
ctx->internal->hw_type->frames_init(
ctx);
369 if (
ctx->internal->pool_internal && !
ctx->pool)
370 ctx->pool =
ctx->internal->pool_internal;
373 if (
ctx->initial_pool_size > 0) {
381 if (
ctx->internal->hw_type->frames_uninit)
382 ctx->internal->hw_type->frames_uninit(
ctx);
392 if (!
ctx->internal->hw_type->transfer_get_formats)
395 return ctx->internal->hw_type->transfer_get_formats(
ctx, dir,
formats);
411 frame_tmp->format = dst->
format;
420 frame_tmp->format =
formats[0];
423 frame_tmp->width =
ctx->width;
424 frame_tmp->height =
ctx->height;
434 frame_tmp->width =
src->width;
435 frame_tmp->height =
src->height;
466 "A device with a derived frame context cannot be used as "
467 "the source of a HW -> HW transfer.");
471 if (dst_ctx->internal->source_frames) {
473 "A device with a derived frame context cannot be used as "
474 "the destination of a HW -> HW transfer.");
480 ret = dst_ctx->internal->hw_type->transfer_data_to(dst_ctx, dst,
src);
484 if (
src->hw_frames_ctx) {
487 ret =
ctx->internal->hw_type->transfer_data_from(
ctx, dst,
src);
493 ret =
ctx->internal->hw_type->transfer_data_to(
ctx, dst,
src);
508 if (
ctx->internal->source_frames) {
515 if (!
frame->hw_frames_ctx)
530 ctx->internal->source_allocation_map_flags);
533 "frame context: %d.\n",
ret);
545 if (!
ctx->internal->hw_type->frames_get_buffer)
552 if (!
frame->hw_frames_ctx)
571 if (hw_type->device_hwconfig_size == 0)
574 return av_mallocz(hw_type->device_hwconfig_size);
578 const void *hwconfig)
584 if (!hw_type->frames_get_constraints)
587 constraints =
av_mallocz(
sizeof(*constraints));
591 constraints->min_width = constraints->min_height = 0;
592 constraints->max_width = constraints->max_height = INT_MAX;
594 if (hw_type->frames_get_constraints(
ctx, hwconfig, constraints) >= 0) {
605 av_freep(&(*constraints)->valid_hw_formats);
606 av_freep(&(*constraints)->valid_sw_formats);
639 *pdevice_ref = device_ref;
706 *dst_ref_ptr = dst_ref;
769 hwmap->
unmap = unmap;
802 if ((src_frames == dst_frames &&
807 (uint8_t*)dst_frames)) {
814 "found when attempting unmap.\n");
823 if (
src->hw_frames_ctx) {
865 dst->
format = orig_dst_fmt;
881 if (
src->internal->source_frames) {
892 if (!*derived_frame_ctx) {
926 if (
src->internal->hw_type->frames_derive_from)
927 ret =
src->internal->hw_type->frames_derive_from(dst,
src,
flags);
936 *derived_frame_ctx = dst_ref;
static void hwframe_ctx_free(void *opaque, uint8_t *data)
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
AVBufferRef * source_device
For a derived device, a reference to the original device context it was derived from.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_HWFRAME_TRANSFER_DIRECTION_FROM
Transfer the data from the queried hw frame.
AVFrame * source
A reference to the original source of the mapping.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
void * av_hwdevice_hwconfig_alloc(AVBufferRef *ref)
Allocate a HW-specific configuration structure for a given HW device.
uint8_t * data
The data buffer.
static void ff_hwframe_unmap(void *opaque, uint8_t *data)
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
static void hwdevice_ctx_free(void *opaque, uint8_t *data)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
This structure describes decoded (raw) audio or video data.
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
const HWContextType ff_hwcontext_type_qsv
int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags)
Map a hardware frame.
const HWContextType ff_hwcontext_type_drm
@ AV_HWDEVICE_TYPE_MEDIACODEC
int(* map_to)(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src, int flags)
const HWContextType ff_hwcontext_type_vdpau
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
const HWContextType ff_hwcontext_type_vaapi
int(* map_from)(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src, int flags)
enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev)
Iterate over supported device types.
AVHWDeviceInternal * internal
Private data used internally by libavutil.
@ AV_HWFRAME_MAP_DIRECT
The mapping must be direct.
AVHWFramesInternal * internal
Private data used internally by libavutil.
AVBufferRef * hw_frames_ctx
A reference to the hardware frames context in which this mapping was made.
int ff_hwframe_map_create(AVBufferRef *hwframe_ref, AVFrame *dst, const AVFrame *src, void(*unmap)(AVHWFramesContext *ctx, HWMapDescriptor *hwmap), void *priv)
@ AV_HWDEVICE_TYPE_VIDEOTOOLBOX
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
void * priv
Hardware-specific private data associated with the mapping.
int width
The allocated dimensions of the frames in this pool.
AVHWFramesConstraints * av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, const void *hwconfig)
Get the constraints on HW frames given a device and the HW-specific configuration to be used with tha...
int av_hwdevice_ctx_init(AVBufferRef *ref)
Finalize the device context before use.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
AVBufferRef * source_frames
For a derived context, a reference to the original frames context it was derived from.
@ AV_HWDEVICE_TYPE_VULKAN
This struct describes the constraints on hardware frames attached to a given device with a hardware-s...
const HWContextType ff_hwcontext_type_d3d11va
int(* device_derive)(AVHWDeviceContext *dst_ctx, AVHWDeviceContext *src_ctx, AVDictionary *opts, int flags)
if it could not because there are no more frames
@ AV_HWDEVICE_TYPE_D3D11VA
int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, AVDictionary *options, int flags)
Create a new device of the specified type from an existing device.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
const HWContextType ff_hwcontext_type_mediacodec
int source_allocation_map_flags
Flags to apply to the mapping from the source to the derived frame context when trying to allocate in...
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int hwframe_pool_prealloc(AVBufferRef *ref)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const HWContextType ff_hwcontext_type_dxva2
#define FF_ARRAY_ELEMS(a)
static void set(uint8_t *a[], int ch, int index, int ch_count, enum AVSampleFormat f, double v)
static const char *const hw_type_names[]
AVBufferRef * av_hwdevice_ctx_alloc(enum AVHWDeviceType type)
Allocate an AVHWDeviceContext for a given hardware type.
static const HWContextType *const hw_table[]
void av_hwframe_constraints_free(AVHWFramesConstraints **constraints)
Free an AVHWFrameConstraints structure.
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmt
int(* device_create)(AVHWDeviceContext *ctx, const char *device, AVDictionary *opts, int flags)
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
const HWContextType ff_hwcontext_type_videotoolbox
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
const char * av_default_item_name(void *ptr)
Return the context name.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx, enum AVPixelFormat format, AVBufferRef *derived_device_ctx, AVBufferRef *source_frame_ctx, int flags)
Create and initialise an AVHWFramesContext as a mapping of another existing AVHWFramesContext on a di...
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
const OptionDef options[]
int(* frames_derive_to)(AVHWFramesContext *dst_ctx, AVHWFramesContext *src_ctx, int flags)
@ AV_HWDEVICE_TYPE_OPENCL
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
const HWContextType ff_hwcontext_type_cuda
const HWContextType * hw_type
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const AVClass hwframe_ctx_class
static const AVClass hwdevice_ctx_class
const HWContextType ff_hwcontext_type_vulkan
@ AV_HWFRAME_MAP_READ
The mapping must be readable.
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
size_t device_hwctx_size
size of the public hardware-specific context, i.e.
#define i(width, name, range_min, range_max)
void(* unmap)(AVHWFramesContext *ctx, struct HWMapDescriptor *hwmap)
Unmap function.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const HWContextType * hw_type
@ AV_HWFRAME_MAP_WRITE
The mapping must be writeable.
int ff_hwframe_map_replace(AVFrame *dst, const AVFrame *src)
Replace the current hwmap of dst with the one from src, used for indirect mappings like VAAPI->(DRM)-...
void * av_calloc(size_t nmemb, size_t size)
AVHWFrameTransferDirection
This struct describes a set or pool of "hardware" frames (i.e.
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
const AVClass * av_class
A class for logging and AVOptions.
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
@ AV_HWFRAME_MAP_OVERWRITE
The mapped frame will be overwritten completely in subsequent operations, so the current frame data n...
static int ref[MAX_W *MAX_W]
size_t device_priv_size
size of the private data, i.e.
int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ref, enum AVHWFrameTransferDirection dir, enum AVPixelFormat **formats, int flags)
Get a list of possible source or target formats usable in av_hwframe_transfer_data().
A reference to a data buffer.
#define flags(name, subs,...)
int(* transfer_data_from)(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
const HWContextType ff_hwcontext_type_opencl
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.