Go to the documentation of this file.
19 #ifndef AVCODEC_HW_BASE_ENCODE_H
20 #define AVCODEC_HW_BASE_ENCODE_H
26 #define MAX_DPB_SIZE 16
27 #define MAX_PICTURE_REFERENCES 2
28 #define MAX_REORDER_DELAY 16
29 #define MAX_ASYNC_DEPTH 64
30 #define MAX_REFERENCE_LIST_NUM 2
34 const char *
const picture_type_name[] = {
"IDR",
"I",
"P",
"B" };
35 return picture_type_name[
type];
225 uint32_t ref_l0, uint32_t ref_l1,
226 int flags,
int prediction_pre_only);
237 #define HW_BASE_ENCODE_COMMON_OPTIONS \
239 "Distance (in I-frames) between key frames", \
240 OFFSET(common.base.idr_interval), AV_OPT_TYPE_INT, \
241 { .i64 = 0 }, 0, INT_MAX, FLAGS }, \
243 "Maximum B-frame reference depth", \
244 OFFSET(common.base.desired_b_depth), AV_OPT_TYPE_INT, \
245 { .i64 = 1 }, 1, INT_MAX, FLAGS }, \
246 { "async_depth", "Maximum processing parallelism. " \
247 "Increase this to improve single channel performance.", \
248 OFFSET(common.base.async_depth), AV_OPT_TYPE_INT, \
249 { .i64 = 2 }, 1, MAX_ASYNC_DEPTH, FLAGS }
AVPixelFormat
Pixel format.
AVBufferRef * recon_frames_ref
struct FFHWBaseEncodePicture * next
This structure describes decoded (raw) audio or video data.
FFHWBaseEncodePicture * next_prev[MAX_PICTURE_REFERENCES]
int64_t ts_ring[MAX_REORDER_DELAY *3+MAX_ASYNC_DEPTH]
int(* output)(AVCodecContext *avctx, const FFHWBaseEncodePicture *base_pic, AVPacket *pkt)
AVBufferRef * input_frames_ref
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
AVHWDeviceContext * device
struct FFHWBaseEncodePicture * prev
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
@ FF_HW_FLAG_CONSTANT_QUALITY_ONLY
AVPacket * tail_pkt
Tail data of a pic, now only used for av1 repeat frame header.
int ff_hw_base_get_recon_format(FFHWBaseEncodeContext *ctx, const void *hwconfig, enum AVPixelFormat *fmt)
int(* issue)(AVCodecContext *avctx, const FFHWBaseEncodePicture *base_pic)
int ff_hw_base_encode_close(FFHWBaseEncodeContext *ctx)
FFHWBaseEncodePicture * pic_end
@ FF_HW_FLAG_SLICE_CONTROL
int ff_hw_base_encode_set_output_property(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, FFHWBaseEncodePicture *pic, AVPacket *pkt, int flag_no_delay)
FFHWBaseEncodePicture * pic_start
struct FFHWBaseEncodePicture * dpb[MAX_DPB_SIZE]
Describe the class of an AVClass context structure.
@ FF_HW_FLAG_B_PICTURE_REFERENCES
int nb_refs[MAX_REFERENCE_LIST_NUM]
int ff_hw_base_encode_receive_packet(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, AVPacket *pkt)
const struct FFHWEncodePictureOperation * op
#define MAX_REFERENCE_LIST_NUM
struct FFHWBaseEncodePicture * refs[MAX_REFERENCE_LIST_NUM][MAX_PICTURE_REFERENCES]
This struct describes a set or pool of "hardware" frames (i.e.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_hw_base_encode_free(FFHWBaseEncodePicture *pic)
static const char * ff_hw_base_encode_get_pictype_name(const int type)
main external API structure.
int(* free)(AVCodecContext *avctx, FFHWBaseEncodePicture *base_pic)
AVHWFramesContext * input_frames
A reference to a data buffer.
int ff_hw_base_init_gop_structure(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, uint32_t ref_l0, uint32_t ref_l1, int flags, int prediction_pre_only)
This structure stores compressed data.
AVHWFramesContext * recon_frames
#define flags(name, subs,...)
@ FF_HW_FLAG_NON_IDR_KEY_PICTURES
#define MAX_REORDER_DELAY
#define MAX_PICTURE_REFERENCES
int ff_hw_base_encode_init(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx)