62 return RA_PIXEL_RANGE_FULL;
67 return RA_PIXEL_RANGE_FULL;
70 return RA_PIXEL_RANGE_LIMITED;
81 return RA_CHROMA_SAMPLING_CS420;
86 return RA_CHROMA_SAMPLING_CS422;
91 return RA_CHROMA_SAMPLING_CS444;
101 return RA_CHROMA_SAMPLE_POSITION_VERTICAL;
103 return RA_CHROMA_SAMPLE_POSITION_COLOCATED;
105 return RA_CHROMA_SAMPLE_POSITION_UNKNOWN;
112 RaData* buf = rav1e_twopass_out(ctx->
ctx);
120 rav1e_data_unref(buf);
130 memcpy(ctx->
pass_data, buf->data, buf->len);
134 rav1e_data_unref(buf);
143 rav1e_data_unref(buf);
168 rav1e_context_unref(ctx->
ctx);
172 rav1e_frame_unref(ctx->
rframe);
187 RaConfig *cfg =
NULL;
195 cfg = rav1e_config_default();
207 rav1e_config_set_time_base(cfg, (RaRational) {
211 rav1e_config_set_time_base(cfg, (RaRational) {
252 "not found. This is a bug, please report it.\n");
279 int parse_ret = rav1e_config_parse(cfg, en->
key, en->
value);
285 rret = rav1e_config_parse_int(cfg,
"width", avctx->
width);
292 rret = rav1e_config_parse_int(cfg,
"height", avctx->
height);
299 rret = rav1e_config_parse_int(cfg,
"threads", avctx->
thread_count);
303 if (ctx->
speed >= 0) {
304 rret = rav1e_config_parse_int(cfg,
"speed", ctx->
speed);
313 if (ctx->
tiles > 0) {
314 rret = rav1e_config_parse_int(cfg,
"tiles", ctx->
tiles);
322 rret = rav1e_config_parse_int(cfg,
"tile_rows", ctx->
tile_rows);
330 rret = rav1e_config_parse_int(cfg,
"tile_cols", ctx->
tile_cols);
339 rret = rav1e_config_parse_int(cfg,
"key_frame_interval", avctx->
gop_size);
348 rret = rav1e_config_parse_int(cfg,
"min_key_frame_interval", avctx->
keyint_min);
357 int max_quantizer = avctx->
qmax >= 0 ? avctx->
qmax : 255;
359 rret = rav1e_config_parse_int(cfg,
"quantizer", max_quantizer);
366 if (avctx->
qmin >= 0) {
367 rret = rav1e_config_parse_int(cfg,
"min_quantizer", avctx->
qmin);
375 rret = rav1e_config_parse_int(cfg,
"bitrate", avctx->
bit_rate);
385 rret = rav1e_config_parse_int(cfg,
"quantizer", ctx->
quantizer);
393 rret = rav1e_config_set_pixel_format(cfg, desc->
comp[0].
depth,
404 rret = rav1e_config_set_color_description(cfg, (RaMatrixCoefficients) avctx->
colorspace,
406 (RaTransferCharacteristics) avctx->
color_trc);
415 ctx->
ctx = rav1e_context_new(cfg);
426 rav1e_config_unref(cfg);
435 RaPacket *rpkt =
NULL;
455 rframe = rav1e_frame_new(ctx->
ctx);
465 int bytes = desc->
comp[0].
depth == 8 ? 1 : 2;
466 rav1e_frame_fill_plane(rframe,
i, frame->
data[
i],
471 rav1e_frame_set_opaque(rframe, pts,
av_free);
475 ret = rav1e_send_frame(ctx->
ctx, rframe);
477 if (ret == RA_ENCODER_STATUS_ENOUGH_DATA) {
480 rav1e_frame_unref(rframe);
485 case RA_ENCODER_STATUS_SUCCESS:
486 case RA_ENCODER_STATUS_ENOUGH_DATA:
488 case RA_ENCODER_STATUS_FAILURE:
492 av_log(avctx,
AV_LOG_ERROR,
"Unknown return code %d from rav1e_send_frame: %s\n", ret, rav1e_status_to_str(ret));
508 ret = rav1e_receive_packet(ctx->
ctx, &rpkt);
510 case RA_ENCODER_STATUS_SUCCESS:
512 case RA_ENCODER_STATUS_LIMIT_REACHED:
519 case RA_ENCODER_STATUS_ENCODED:
521 case RA_ENCODER_STATUS_NEED_MORE_DATA:
527 case RA_ENCODER_STATUS_FAILURE:
528 av_log(avctx,
AV_LOG_ERROR,
"Could not encode frame: %s\n", rav1e_status_to_str(ret));
531 av_log(avctx,
AV_LOG_ERROR,
"Unknown return code %d from rav1e_receive_packet: %s\n", ret, rav1e_status_to_str(ret));
538 rav1e_packet_unref(rpkt);
542 memcpy(pkt->
data, rpkt->data, rpkt->len);
544 if (rpkt->frame_type == RA_FRAME_TYPE_KEY)
547 pkt->
pts = pkt->
dts = *((int64_t *) rpkt->opaque);
549 rav1e_packet_unref(rpkt);
570 #define OFFSET(x) offsetof(librav1eContext, x) 571 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 586 {
"keyint_min",
"0" },
624 .priv_class = &
class,
629 .wrapper_name =
"librav1e",
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static enum AVPixelFormat pix_fmt
static int shift(int a, int b)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
This structure describes decoded (raw) audio or video data.
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
int64_t bit_rate
the average bitrate
#define LIBAVUTIL_VERSION_INT
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
AVCodec ff_librav1e_encoder
static RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
The bitstream filter state.
const char * av_default_item_name(void *ptr)
Return the context name.
AVDictionary * rav1e_opts
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV420P12
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
#define AV_CODEC_CAP_AUTO_THREADS
Codec supports avctx->thread_count == 0 (auto).
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold int librav1e_encode_init(AVCodecContext *avctx)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AVERROR_EOF
End of file.
AVColorRange
Visual content value range.
#define AV_PIX_FMT_YUV422P12
char * stats_out
pass1 encoding statistics output buffer
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static int get_stats(AVCodecContext *avctx, int eos)
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
Allocate a context for a given bitstream filter.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int qmax
maximum quantizer
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int flags
AV_CODEC_FLAG_*.
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
static const AVCodecDefault defaults[]
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
int flags
A combination of AV_PKT_FLAG values.
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
common internal API header
uint8_t nb_components
The number of components each pixel has, (1-4)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
static const AVCodecDefault librav1e_defaults[]
static av_cold int librav1e_encode_close(AVCodecContext *avctx)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int width
picture width / height.
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
#define AV_EF_EXPLODE
abort decoding on minor error detection
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
static RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
int draining
checks API usage: after codec draining, flush is required to resume operation
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
enum AVPixelFormat librav1e_pix_fmts[]
static int set_stats(AVCodecContext *avctx)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
int qmin
minimum quantizer
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define AV_PIX_FMT_YUV420P10
static RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
Describe the class of an AVClass context structure.
enum AVColorSpace colorspace
YUV colorspace type.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
static enum AVPixelFormat pix_fmts[]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Narrow or limited range content.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
common internal and external API header
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
struct AVCodecInternal * internal
Private context used for internal data.
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
AVChromaLocation
Location of chroma samples.
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
static const AVOption options[]
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
AVCodecParameters * par_in
Parameters of the input stream.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
int keyint_min
minimum GOP size