29 #define xavs2_opt_set2(name, format, ...) do{ \ 30 char opt_str[16] = {0}; \ 32 av_strlcatf(opt_str, sizeof(opt_str), format, __VA_ARGS__); \ 33 err = cae->api->opt_set2(cae->param, name, opt_str); \ 35 av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s\n", name, opt_str);\ 56 const xavs2_api_t *
api;
68 cae->
api = xavs2_api_get(bit_depth);
133 for (plane = 0; plane < 3; plane++) {
134 p_plane = (uint16_t *)pic->img.img_planes[plane];
135 p_buffer = frame->
data[plane];
136 for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
137 memset(p_plane, 0, pic->img.i_stride[plane]);
138 for (wIdx = 0; wIdx < pic->img.i_width[plane]; wIdx++) {
139 p_plane[wIdx] = p_buffer[wIdx] << shift_in;
141 p_plane += pic->img.i_stride[plane];
155 for (plane = 0; plane < 3; plane++) {
156 p_plane = pic->img.img_planes[plane];
157 p_buffer = frame->
data[plane];
158 stride = pic->img.i_width[plane] * pic->img.in_sample_size;
159 for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
160 memcpy(p_plane, p_buffer, stride);
161 p_plane += pic->img.i_stride[plane];
176 if (cae->
api->encoder_get_buffer(cae->
encoder, &pic) < 0) {
183 if (pic.img.in_sample_size == pic.img.enc_sample_size) {
186 const int shift_in = atoi(cae->
api->opt_get(cae->
param,
"SampleShift"));
191 if (pic.img.in_sample_size == pic.img.enc_sample_size) {
202 pic.i_pts = frame->
pts;
203 pic.i_type = XAVS2_TYPE_AUTO;
216 if ((cae->
packet.len) && (cae->
packet.state != XAVS2_STATE_FLUSH_END)) {
226 if (cae->
packet.type == XAVS2_TYPE_IDR ||
227 cae->
packet.type == XAVS2_TYPE_I ||
228 cae->
packet.type == XAVS2_TYPE_KEYFRAME) {
259 #define OFFSET(x) offsetof(XAVS2EContext, x) 260 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 269 {
"log_level" ,
"log level: -1: none, 0: error, 1: warning, 2: info, 3: debug",
OFFSET(
log_level) ,
AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 3, VE },
302 .wrapper_name =
"libxavs2",
This structure describes decoded (raw) audio or video data.
int64_t bit_rate
the average bitrate
#define LIBAVUTIL_VERSION_INT
static av_cold int init(AVCodecContext *avctx)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
const char * av_default_item_name(void *ptr)
Return the context name.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_CODEC_CAP_AUTO_THREADS
Codec supports avctx->thread_count == 0 (auto).
static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
static void xavs2_copy_frame(xavs2_picture_t *pic, const AVFrame *frame)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
AVCodec ff_libxavs2_encoder
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const AVOption options[]
int qmax
maximum quantizer
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
void ff_mpeg12_find_best_frame_rate(AVRational frame_rate, int *code, int *ext_n, int *ext_d, int nonstandard)
int flags
A combination of AV_PKT_FLAG values.
static av_cold int xavs2_close(AVCodecContext *avctx)
int width
picture width / height.
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int qmin
minimum quantizer
#define AV_PIX_FMT_YUV420P10
Describe the class of an AVClass context structure.
static const AVCodecDefault xavs2_defaults[]
static enum AVPixelFormat pix_fmts[]
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_cold int xavs2_init(AVCodecContext *avctx)
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static const AVClass libxavs2
#define xavs2_opt_set2(name, format,...)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
#define AV_CODEC_FLAG_CLOSED_GOP
static void xavs2_copy_frame_with_shift(xavs2_picture_t *pic, const AVFrame *frame, const int shift_in)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVDictionary * xavs2_opts
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...