Go to the documentation of this file.
71 return RA_PIXEL_RANGE_FULL;
76 return RA_PIXEL_RANGE_FULL;
79 return RA_PIXEL_RANGE_LIMITED;
90 return RA_CHROMA_SAMPLING_CS420;
95 return RA_CHROMA_SAMPLING_CS422;
100 return RA_CHROMA_SAMPLING_CS444;
108 switch (chroma_loc) {
110 return RA_CHROMA_SAMPLE_POSITION_VERTICAL;
112 return RA_CHROMA_SAMPLE_POSITION_COLOCATED;
114 return RA_CHROMA_SAMPLE_POSITION_UNKNOWN;
121 RaData* buf = rav1e_twopass_out(
ctx->ctx);
127 ctx->pass_pos + buf->len);
129 rav1e_data_unref(buf);
134 memcpy(
ctx->pass_data +
ctx->pass_pos, buf->data, buf->len);
135 ctx->pass_pos += buf->len;
139 memcpy(
ctx->pass_data, buf->data, buf->len);
143 rav1e_data_unref(buf);
152 rav1e_data_unref(buf);
162 while (
ret > 0 &&
ctx->pass_size -
ctx->pass_pos > 0) {
163 ret = rav1e_twopass_in(
ctx->ctx,
ctx->pass_data +
ctx->pass_pos,
ctx->pass_size);
177 rav1e_context_unref(
ctx->ctx);
181 rav1e_frame_unref(
ctx->rframe);
195 RaConfig *cfg =
NULL;
203 cfg = rav1e_config_default();
215 rav1e_config_set_time_base(cfg, (RaRational) {
220 rav1e_config_set_time_base(cfg, (RaRational) {
222 #if FF_API_TICKS_PER_FRAME
243 ctx->pass_size = (strlen(avctx->
stats_in) * 3) / 4;
245 if (!
ctx->pass_data) {
252 if (
ctx->pass_size < 0) {
262 if (rav1e_config_parse(cfg, en->
key, en->
value) < 0)
267 rret = rav1e_config_parse_int(cfg,
"width", avctx->
width);
274 rret = rav1e_config_parse_int(cfg,
"height", avctx->
height);
282 rav1e_config_set_sample_aspect_ratio(cfg, (RaRational) {
287 rret = rav1e_config_parse_int(cfg,
"threads", avctx->
thread_count);
291 if (
ctx->speed >= 0) {
292 rret = rav1e_config_parse_int(cfg,
"speed",
ctx->speed);
301 if (
ctx->tiles > 0) {
302 rret = rav1e_config_parse_int(cfg,
"tiles",
ctx->tiles);
309 if (
ctx->tile_rows > 0) {
310 rret = rav1e_config_parse_int(cfg,
"tile_rows",
ctx->tile_rows);
317 if (
ctx->tile_cols > 0) {
318 rret = rav1e_config_parse_int(cfg,
"tile_cols",
ctx->tile_cols);
327 rret = rav1e_config_parse_int(cfg,
"key_frame_interval", avctx->
gop_size);
336 rret = rav1e_config_parse_int(cfg,
"min_key_frame_interval", avctx->
keyint_min);
345 int max_quantizer = avctx->
qmax >= 0 ? avctx->
qmax : 255;
347 rret = rav1e_config_parse_int(cfg,
"quantizer", max_quantizer);
354 if (avctx->
qmin >= 0) {
355 rret = rav1e_config_parse_int(cfg,
"min_quantizer", avctx->
qmin);
363 rret = rav1e_config_parse_int(cfg,
"bitrate", avctx->
bit_rate);
369 }
else if (
ctx->quantizer >= 0) {
373 rret = rav1e_config_parse_int(cfg,
"quantizer",
ctx->quantizer);
381 rret = rav1e_config_set_pixel_format(cfg,
desc->comp[0].depth,
392 rret = rav1e_config_set_color_description(cfg, (RaMatrixCoefficients) avctx->
colorspace,
394 (RaTransferCharacteristics) avctx->
color_trc);
403 ctx->ctx = rav1e_context_new(cfg);
411 RaData *seq_hdr = rav1e_container_sequence_header(
ctx->ctx);
416 rav1e_data_unref(seq_hdr);
422 memcpy(avctx->
extradata, seq_hdr->data, seq_hdr->len);
424 rav1e_data_unref(seq_hdr);
431 rav1e_config_unref(cfg);
450 RaFrame *rframe =
ctx->rframe;
451 RaPacket *rpkt =
NULL;
479 rframe = rav1e_frame_new(
ctx->ctx);
487 for (
int i = 0;
i <
desc->nb_components;
i++) {
489 int bytes =
desc->comp[0].depth == 8 ? 1 : 2;
499 ret = rav1e_send_frame(
ctx->ctx, rframe);
501 if (
ret == RA_ENCODER_STATUS_ENOUGH_DATA) {
502 ctx->rframe = rframe;
504 rav1e_frame_unref(rframe);
509 case RA_ENCODER_STATUS_SUCCESS:
510 case RA_ENCODER_STATUS_ENOUGH_DATA:
512 case RA_ENCODER_STATUS_FAILURE:
532 ret = rav1e_receive_packet(
ctx->ctx, &rpkt);
534 case RA_ENCODER_STATUS_SUCCESS:
536 case RA_ENCODER_STATUS_LIMIT_REACHED:
543 case RA_ENCODER_STATUS_ENCODED:
545 case RA_ENCODER_STATUS_NEED_MORE_DATA:
551 case RA_ENCODER_STATUS_FAILURE:
555 av_log(avctx,
AV_LOG_ERROR,
"Unknown return code %d from rav1e_receive_packet: %s\n",
ret, rav1e_status_to_str(
ret));
562 rav1e_packet_unref(rpkt);
566 memcpy(
pkt->
data, rpkt->data, rpkt->len);
568 if (rpkt->frame_type == RA_FRAME_TYPE_KEY)
596 rav1e_packet_unref(rpkt);
600 for (
int i = 0;
i <
desc->nb_components;
i++) {
602 rav1e_frame_extract_plane(rpkt->rec,
i,
frame->
data[
i],
608 rav1e_packet_unref(rpkt);
613 #define OFFSET(x) offsetof(librav1eContext, x)
614 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
622 {
"rav1e-params",
"set the rav1e configuration using a :-separated list of key=value parameters",
OFFSET(rav1e_opts),
AV_OPT_TYPE_DICT, { 0 }, 0, 0,
VE },
629 {
"keyint_min",
"0" },
659 .
p.
name =
"librav1e",
667 .p.priv_class = &
class,
675 .p.wrapper_name =
"librav1e",
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
int keyint_min
minimum GOP size
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
int64_t duration
Duration of the frame, in the same units as pts.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
void * opaque
Frame owner's private data.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
static int set_stats(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV420P10
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int qmax
maximum quantizer
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
const FFCodec ff_librav1e_encoder
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
AVBufferRef * opaque_ref
Frame owner's private data.
#define AV_CODEC_FLAG_COPY_OPAQUE
AVCodec p
The public AVCodec.
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
int flags
AV_CODEC_FLAG_*.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_PIX_FMT_YUV444P10
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
static av_cold int librav1e_encode_init(AVCodecContext *avctx)
static enum AVPixelFormat pix_fmt
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void * opaque
for some private data of the user
#define CODEC_LONG_NAME(str)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
AVDictionary * rav1e_opts
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
struct AVCodecInternal * internal
Private context used for internal data.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define FF_CODEC_RECEIVE_PACKET_CB(func)
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
#define AV_PIX_FMT_YUV422P10
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
char * stats_out
pass1 encoding statistics output buffer
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
static int shift(int a, int b)
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV444P12
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
static RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
AVBufferRef * frame_opaque_ref
AVChromaLocation
Location of chroma samples.
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
static const AVOption options[]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
#define AV_PIX_FMT_YUV420P12
#define AV_INPUT_BUFFER_PADDING_SIZE
static void frame_data_free(void *data)
main external API structure.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
int qmin
minimum quantizer
static const FFCodecDefault librav1e_defaults[]
attribute_deprecated int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int draining
checks API usage: after codec draining, flush is required to resume operation
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
A reference to a data buffer.
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
enum AVPixelFormat librav1e_pix_fmts[]
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
AVColorRange
Visual content value range.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
static av_cold int librav1e_encode_close(AVCodecContext *avctx)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
static int get_stats(AVCodecContext *avctx, int eos)