Go to the documentation of this file.
24 #include <EbSvtAv1ErrorCodes.h>
25 #include <EbSvtAv1Enc.h>
26 #include <EbSvtAv1Metadata.h>
81 { EB_ErrorNone, 0,
"success" },
82 { EB_ErrorInsufficientResources,
AVERROR(ENOMEM),
"insufficient resources" },
83 { EB_ErrorUndefined,
AVERROR(EINVAL),
"undefined error" },
84 { EB_ErrorInvalidComponent,
AVERROR(EINVAL),
"invalid component" },
85 { EB_ErrorBadParameter,
AVERROR(EINVAL),
"bad parameter" },
86 { EB_ErrorDestroyThreadFailed,
AVERROR_EXTERNAL,
"failed to destroy thread" },
87 { EB_ErrorSemaphoreUnresponsive,
AVERROR_EXTERNAL,
"semaphore unresponsive" },
88 { EB_ErrorDestroySemaphoreFailed,
AVERROR_EXTERNAL,
"failed to destroy semaphore"},
92 { EB_NoErrorEmptyQueue,
AVERROR(EAGAIN),
"empty queue" },
106 *
desc =
"unknown error";
111 const char *error_string)
123 const size_t luma_size =
config->source_width *
config->source_height *
124 (
config->encoder_bit_depth > 8 ? 2 : 1);
126 EbSvtIOFormat *in_data;
128 svt_enc->
raw_size = luma_size * 3 / 2;
136 if (!svt_enc->
in_buf->p_buffer)
149 const struct EbSvtAv1ChromaPoints *
const points[] = {
155 for (
int i = 0;
i < 3;
i++) {
156 const struct EbSvtAv1ChromaPoints *
dst = points[
i];
184 EbSvtAv1EncConfiguration *param)
217 param->enc_mode = svt_enc->
enc_mode;
220 param->target_bit_rate = avctx->
bit_rate;
222 param->rate_control_mode = 1;
224 param->rate_control_mode = 2;
226 param->max_qp_allowed = avctx->
qmax;
227 param->min_qp_allowed = avctx->
qmin;
231 param->maximum_buffer_size_ms =
235 if (svt_enc->
crf > 0) {
236 param->qp = svt_enc->
crf;
237 param->rate_control_mode = 0;
238 }
else if (svt_enc->
qp > 0) {
239 param->qp = svt_enc->
qp;
240 param->rate_control_mode = 0;
241 param->enable_adaptive_quantization = 0;
248 param->transfer_characteristics = avctx->
color_trc;
255 #if SVT_AV1_CHECK_VERSION(1, 0, 0)
262 param->chroma_sample_position = EB_CSP_VERTICAL;
265 param->chroma_sample_position = EB_CSP_COLOCATED;
272 "Specified chroma sample location %s is unsupported "
273 "on the AV1 bit stream level. Usage of a container that "
274 "allows passing this information - such as Matroska - "
283 param->profile = avctx->
profile;
286 param->level = avctx->
level;
294 param->intra_period_length = avctx->
gop_size - 1;
296 #if SVT_AV1_CHECK_VERSION(1, 1, 0)
306 param->force_key_frames = 1;
316 #if FF_API_TICKS_PER_FRAME
328 #if SVT_AV1_CHECK_VERSION(0, 9, 1)
330 EbErrorType
ret = svt_av1_enc_parse_parameter(param, en->key, en->value);
331 if (
ret != EB_ErrorNone) {
333 av_log(avctx,
level,
"Error parsing option %s: %s.\n", en->key, en->value);
341 av_log(avctx,
level,
"svt-params needs libavcodec to be compiled with SVT-AV1 "
342 "headers >= 0.9.1.\n");
348 param->source_width = avctx->
width;
349 param->source_height = avctx->
height;
351 param->encoder_bit_depth =
desc->comp[0].depth;
353 if (
desc->log2_chroma_w == 1 &&
desc->log2_chroma_h == 1)
354 param->encoder_color_format = EB_YUV420;
355 else if (
desc->log2_chroma_w == 1 &&
desc->log2_chroma_h == 0)
356 param->encoder_color_format = EB_YUV422;
357 else if (!
desc->log2_chroma_w && !
desc->log2_chroma_h)
358 param->encoder_color_format = EB_YUV444;
364 if ((param->encoder_color_format == EB_YUV422 || param->encoder_bit_depth > 10)
368 }
else if (param->encoder_color_format == EB_YUV444 && param->profile !=
AV_PROFILE_AV1_HIGH) {
373 avctx->
bit_rate = param->rate_control_mode > 0 ?
374 param->target_bit_rate : 0;
393 EbBufferHeaderType *header_ptr)
395 EbSvtIOFormat *in_data = (EbSvtIOFormat *)header_ptr->p_buffer;
396 ptrdiff_t linesizes[4];
398 int bytes_shift = param->encoder_bit_depth > 8 ? 1 : 0;
401 for (
int i = 0;
i < 4;
i++)
402 linesizes[
i] =
frame->linesize[
i];
410 for (
int i = 0;
i < 4;
i++) {
416 in_data->luma =
frame->data[0];
417 in_data->cb =
frame->data[1];
418 in_data->cr =
frame->data[2];
425 svt_metadata_array_free(&header_ptr->metadata);
439 if (svt_ret != EB_ErrorNone) {
440 return svt_print_error(avctx, svt_ret,
"Error initializing encoder handle");
450 if (svt_ret != EB_ErrorNone) {
451 return svt_print_error(avctx, svt_ret,
"Error setting encoder parameters");
454 svt_ret = svt_av1_enc_init(svt_enc->
svt_handle);
455 if (svt_ret != EB_ErrorNone) {
465 EbBufferHeaderType *headerPtr =
NULL;
467 svt_ret = svt_av1_enc_stream_header(svt_enc->
svt_handle, &headerPtr);
468 if (svt_ret != EB_ErrorNone) {
469 return svt_print_error(avctx, svt_ret,
"Error building stream header");
476 "Cannot allocate AV1 header of size %d.\n", avctx->
extradata_size);
482 svt_ret = svt_av1_enc_stream_header_release(headerPtr);
483 if (svt_ret != EB_ErrorNone) {
498 EbBufferHeaderType *headerPtr = svt_enc->
in_buf;
504 EbBufferHeaderType headerPtrLast;
509 memset(&headerPtrLast, 0,
sizeof(headerPtrLast));
510 headerPtrLast.pic_type = EB_AV1_INVALID_PICTURE;
511 headerPtrLast.flags = EB_BUFFERFLAG_EOS;
513 svt_av1_enc_send_picture(svt_enc->
svt_handle, &headerPtrLast);
522 headerPtr->flags = 0;
523 headerPtr->p_app_private =
NULL;
524 headerPtr->pts =
frame->pts;
526 switch (
frame->pict_type) {
528 headerPtr->pic_type = EB_AV1_KEY_PICTURE;
532 headerPtr->pic_type = EB_AV1_INVALID_PICTURE;
537 headerPtr->pic_type = EB_AV1_KEY_PICTURE;
547 ret = svt_add_metadata(headerPtr, EB_AV1_METADATA_TYPE_ITUT_T35, t35,
size);
553 "without AV_FRAME_DATA_DOVI_METADATA\n");
558 svt_ret = svt_av1_enc_send_picture(svt_enc->
svt_handle, headerPtr);
559 if (svt_ret != EB_ErrorNone)
560 return svt_print_error(avctx, svt_ret,
"Error sending a frame to encoder");
568 const int max_frames = 8;
571 if (filled_len > svt_enc->
raw_size * max_frames) {
592 EbBufferHeaderType *headerPtr;
596 int ret = 0, pict_type;
613 if (svt_ret == EB_NoErrorEmptyQueue)
615 else if (svt_ret != EB_ErrorNone)
616 return svt_print_error(avctx, svt_ret,
"Error getting an output packet from encoder");
618 #if SVT_AV1_CHECK_VERSION(2, 0, 0)
619 if (headerPtr->flags & EB_BUFFERFLAG_EOS) {
621 svt_av1_enc_release_out_buffer(&headerPtr);
629 svt_av1_enc_release_out_buffer(&headerPtr);
635 memcpy(
pkt->
data, headerPtr->p_buffer, headerPtr->n_filled_len);
638 pkt->
size = headerPtr->n_filled_len;
639 pkt->
pts = headerPtr->pts;
640 pkt->
dts = headerPtr->dts;
642 switch (headerPtr->pic_type) {
643 case EB_AV1_KEY_PICTURE:
646 case EB_AV1_INTRA_ONLY_PICTURE:
649 case EB_AV1_INVALID_PICTURE:
657 if (headerPtr->pic_type == EB_AV1_NON_REF_PICTURE)
660 #if !(SVT_AV1_CHECK_VERSION(2, 0, 0))
661 if (headerPtr->flags & EB_BUFFERFLAG_EOS)
667 svt_av1_enc_release_out_buffer(&headerPtr);
678 svt_av1_enc_deinit_handle(svt_enc->
svt_handle);
682 svt_metadata_array_free(&svt_enc->
in_buf->metadata);
693 #define OFFSET(x) offsetof(SvtContext, x)
694 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
696 {
"preset",
"Encoding preset",
701 #define LEVEL(name, value) name, NULL, 0, AV_OPT_TYPE_CONST, \
702 { .i64 = value }, 0, 0, VE, .unit = "avctx.level"
703 {
LEVEL(
"2.0", 20) },
704 {
LEVEL(
"2.1", 21) },
705 {
LEVEL(
"2.2", 22) },
706 {
LEVEL(
"2.3", 23) },
707 {
LEVEL(
"3.0", 30) },
708 {
LEVEL(
"3.1", 31) },
709 {
LEVEL(
"3.2", 32) },
710 {
LEVEL(
"3.3", 33) },
711 {
LEVEL(
"4.0", 40) },
712 {
LEVEL(
"4.1", 41) },
713 {
LEVEL(
"4.2", 42) },
714 {
LEVEL(
"4.3", 43) },
715 {
LEVEL(
"5.0", 50) },
716 {
LEVEL(
"5.1", 51) },
717 {
LEVEL(
"5.2", 52) },
718 {
LEVEL(
"5.3", 53) },
719 {
LEVEL(
"6.0", 60) },
720 {
LEVEL(
"6.1", 61) },
721 {
LEVEL(
"6.2", 62) },
722 {
LEVEL(
"6.3", 63) },
723 {
LEVEL(
"7.0", 70) },
724 {
LEVEL(
"7.1", 71) },
725 {
LEVEL(
"7.2", 72) },
726 {
LEVEL(
"7.3", 73) },
729 {
"crf",
"Constant Rate Factor value",
OFFSET(crf),
731 {
"qp",
"Initial Quantizer level value",
OFFSET(qp),
733 {
"svtav1-params",
"Set the SVT-AV1 configuration using a :-separated list of key=value parameters",
OFFSET(svtav1_opts),
AV_OPT_TYPE_DICT, { 0 }, 0, 0,
VE },
750 {
"flags",
"+cgop" },
758 .
p.
name =
"libsvtav1",
773 .p.priv_class = &
class,
775 .p.wrapper_name =
"libsvtav1",
AVDOVIDecoderConfigurationRecord cfg
Currently active dolby vision configuration, or {0} for none.
#define FF_ENABLE_DEPRECATION_WARNINGS
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
enum AVColorSpace colorspace
YUV colorspace type.
static const FFCodecDefault eb_enc_defaults[]
AVFrameSideData ** decoded_side_data
Array containing static side data, such as HDR10 CLL / MDCV structures.
static AVBufferRef * get_output_ref(AVCodecContext *avctx, SvtContext *svt_enc, int filled_len)
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
#define FF_AV1_PROFILE_OPTS
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUV420P10
static int eb_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
static av_cold int eb_enc_init(AVCodecContext *avctx)
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
static av_cold int eb_enc_close(AVCodecContext *avctx)
#define AV_PKT_FLAG_DISPOSABLE
Flag is used to indicate packets that contain frames that can be discarded by the decoder.
#define AV_PROFILE_AV1_PROFESSIONAL
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int qmax
maximum quantizer
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
const char * av_chroma_location_name(enum AVChromaLocation location)
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
AVCodec p
The public AVCodec.
static int eb_send_frame(AVCodecContext *avctx, const AVFrame *frame)
int flags
AV_CODEC_FLAG_*.
@ FF_DOVI_WRAP_T35
wrap inside T.35+EMDF
int ff_dovi_configure(DOVIContext *s, AVCodecContext *avctx)
Helper wrapper around ff_dovi_configure_ext which infers the codec parameters from an AVCodecContext.
#define FF_DOVI_AUTOMATIC
Enable tri-state.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define AV_PROFILE_UNKNOWN
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
#define AV_CEIL_RSHIFT(a, b)
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int64_t rc_max_rate
maximum bitrate
static void handle_side_data(AVCodecContext *avctx, EbSvtAv1EncConfiguration *param)
This structure describes the bitrate properties of an encoded bitstream.
#define CODEC_LONG_NAME(str)
int rc_buffer_size
decoder bitstream buffer size
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static const int sizes[][2]
enum AVColorRange color_range
MPEG vs JPEG YUV range.
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
int ff_dovi_rpu_generate(DOVIContext *s, const AVDOVIMetadata *metadata, int flags, uint8_t **out_rpu, int *out_size)
Synthesize a Dolby Vision RPU reflecting the current state.
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Rational number (pair of numerator and denominator).
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
#define FF_CODEC_RECEIVE_PACKET_CB(func)
int64_t bit_rate
the average bitrate
@ AV_OPT_TYPE_DICT
Underlying C type is AVDictionary*.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
int level
Encoding level descriptor.
const FFCodec ff_libsvtav1_encoder
@ AVCOL_RANGE_UNSPECIFIED
#define LEVEL(name, value)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static int config_enc_params(EbSvtAv1EncConfiguration *param, AVCodecContext *avctx)
static const struct @160 svt_errors[]
EbComponentType * svt_handle
AVDictionary * svtav1_opts
@ AVCHROMA_LOC_UNSPECIFIED
@ AV_PICTURE_TYPE_NONE
Undefined.
static int alloc_buffer(EbSvtAv1EncConfiguration *config, SvtContext *svt_enc)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
static const AVOption options[]
#define AVERROR_EXTERNAL
Generic error in an external library.
#define AV_PROFILE_AV1_HIGH
int flags
A combination of AV_PKT_FLAG values.
static int svt_map_error(EbErrorType eb_err, const char **desc)
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
EbSvtAv1EncConfiguration enc_params
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define AV_CODEC_FLAG_CLOSED_GOP
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
#define AV_INPUT_BUFFER_PADDING_SIZE
static int svt_print_error(void *log_ctx, EbErrorType err, const char *error_string)
main external API structure.
int qmin
minimum quantizer
@ AV_OPT_TYPE_INT
Underlying C type is int.
static int ref[MAX_W *MAX_W]
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
EbBufferHeaderType * in_buf
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PICTURE_TYPE_P
Predicted.
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
A reference to a data buffer.
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
Structure to hold side data for an AVFrame.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
static void handle_mdcv(struct EbSvtAv1MasteringDisplayInfo *dst, const AVMasteringDisplayMetadata *mdcv)
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
int width
picture width / height.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
static int read_in_data(EbSvtAv1EncConfiguration *param, const AVFrame *frame, EbBufferHeaderType *header_ptr)
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.