Go to the documentation of this file.
56 const kvz_api *
const api =
ctx->api = kvz_api_get(8);
57 kvz_config *cfg =
NULL;
58 kvz_encoder *enc =
NULL;
63 "Video dimensions are not a multiple of 8 (%dx%d).\n",
68 ctx->config = cfg = api->config_alloc();
71 "Could not allocate kvazaar config structure.\n");
75 if (!api->config_init(cfg)) {
77 "Could not initialize kvazaar config structure.\n");
81 cfg->width = avctx->
width;
82 cfg->height = avctx->
height;
91 #if FF_API_TICKS_PER_FRAME
97 cfg->target_bitrate = avctx->
bit_rate;
101 cfg->rc_algorithm = KVZ_LAMBDA;
111 if (
ctx->kvz_params) {
116 if (!api->config_parse(cfg,
entry->key,
entry->value)) {
125 ctx->encoder = enc = api->encoder_open(cfg);
132 kvz_data_chunk *data_out =
NULL;
133 kvz_data_chunk *chunk =
NULL;
137 if (!api->encoder_headers(enc, &data_out, &len_out))
142 ctx->api->chunk_free(data_out);
148 for (chunk = data_out; chunk !=
NULL; chunk = chunk->next) {
149 memcpy(p, chunk->data, chunk->len);
153 ctx->api->chunk_free(data_out);
164 ctx->api->encoder_close(
ctx->encoder);
165 ctx->api->config_destroy(
ctx->config);
177 kvz_picture *input_pic =
NULL;
178 kvz_picture *recon_pic =
NULL;
179 kvz_frame_info frame_info;
180 kvz_data_chunk *data_out =
NULL;
181 uint32_t len_out = 0;
188 if (
frame->width !=
ctx->config->width ||
189 frame->height !=
ctx->config->height) {
191 "Changing video dimensions during encoding is not supported. "
192 "(changed from %dx%d to %dx%d)\n",
193 ctx->config->width,
ctx->config->height,
201 "Changing pixel format during encoding is not supported. "
202 "(changed from %s to %s)\n",
210 input_pic =
ctx->api->picture_alloc(
frame->width,
frame->height);
225 int dst_linesizes[4] = {
236 input_pic->pts =
frame->pts;
239 retval =
ctx->api->encoder_encode(
ctx->encoder,
252 kvz_data_chunk *chunk =
NULL;
253 uint64_t written = 0;
261 for (chunk = data_out; chunk !=
NULL; chunk = chunk->next) {
263 memcpy(avpkt->
data + written, chunk->data, chunk->len);
264 written += chunk->len;
267 avpkt->
pts = recon_pic->pts;
268 avpkt->
dts = recon_pic->dts;
272 if (frame_info.nal_unit_type >= KVZ_NAL_BLA_W_LP &&
273 frame_info.nal_unit_type <= KVZ_NAL_RSV_IRAP_VCL23) {
277 switch (frame_info.slice_type) {
298 ctx->api->picture_free(input_pic);
299 ctx->api->picture_free(recon_pic);
300 ctx->api->chunk_free(data_out);
309 #define OFFSET(x) offsetof(LibkvazaarContext, x)
310 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
312 {
"kvazaar-params",
"Set kvazaar parameters as a comma-separated list of key=value pairs.",
330 .
p.
name =
"libkvazaar",
339 .p.priv_class = &
class,
350 .p.wrapper_name =
"libkvazaar",
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
static const AVOption options[]
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const FFCodecDefault defaults[]
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
AVCodec p
The public AVCodec.
static av_cold int libkvazaar_init(AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
const FFCodec ff_libkvazaar_encoder
#define FF_CODEC_ENCODE_CB(func)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
@ AVCHROMA_LOC_UNSPECIFIED
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static av_cold int libkvazaar_close(AVCodecContext *avctx)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PICTURE_TYPE_P
Predicted.
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
This structure stores compressed data.
int width
picture width / height.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
static int libkvazaar_encode(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.