Go to the documentation of this file.
22 #include <dav1d/dav1d.h>
74 h, DAV1D_PICTURE_ALIGNMENT);
103 p->data[0] =
data[0];
104 p->data[1] =
data[1];
105 p->data[2] =
data[2];
106 p->stride[0] = linesize[0];
107 p->stride[1] = linesize[1];
108 p->allocator_data = buf;
124 int threads = (
c->thread_count ?
c->thread_count :
av_cpu_count()) * 3 / 2;
129 dav1d_default_settings(&
s);
132 s.allocator.cookie = dav1d;
135 s.frame_size_limit =
c->max_pixels;
145 :
FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
148 :
FFMIN(ceil(threads /
s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
150 s.n_frame_threads,
s.n_tile_threads);
152 res = dav1d_open(&dav1d->
c, &
s);
163 dav1d_data_unref(&dav1d->
data);
164 dav1d_flush(dav1d->
c);
182 Dav1dPicture pic = { 0 }, *p = &pic;
208 if (!reordered_opaque) {
209 dav1d_data_unref(
data);
213 memcpy(reordered_opaque, &
c->reordered_opaque,
sizeof(
c->reordered_opaque));
214 res = dav1d_data_wrap_user_data(
data, reordered_opaque,
218 dav1d_data_unref(
data);
225 res = dav1d_send_data(dav1d->
c,
data);
230 dav1d_data_unref(
data);
235 res = dav1d_get_picture(dav1d->
c, p);
239 else if (res ==
AVERROR(EAGAIN) &&
c->internal->draining)
249 if (!
frame->buf[0]) {
250 dav1d_picture_unref(p);
254 frame->data[0] = p->data[0];
255 frame->data[1] = p->data[1];
256 frame->data[2] = p->data[2];
257 frame->linesize[0] = p->stride[0];
258 frame->linesize[1] = p->stride[1];
259 frame->linesize[2] = p->stride[1];
261 c->profile = p->seq_hdr->profile;
262 c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
263 | p->seq_hdr->operating_points[0].minor_level;
264 frame->width = p->p.w;
265 frame->height = p->p.h;
266 if (
c->width != p->p.w ||
c->height != p->p.h) {
273 &
frame->sample_aspect_ratio.den,
274 frame->height * (int64_t)p->frame_hdr->render_width,
275 frame->width * (int64_t)p->frame_hdr->render_height,
278 switch (p->seq_hdr->chr) {
279 case DAV1D_CHR_VERTICAL:
282 case DAV1D_CHR_COLOCATED:
291 if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
292 p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
293 p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
294 p->seq_hdr->trc == DAV1D_TRC_SRGB)
297 frame->format =
c->pix_fmt =
pix_fmt[p->p.layout][p->seq_hdr->hbd];
299 if (p->m.user_data.data)
300 memcpy(&
frame->reordered_opaque, p->m.user_data.data,
sizeof(
frame->reordered_opaque));
304 if (p->seq_hdr->num_units_in_tick && p->seq_hdr->time_scale) {
306 p->seq_hdr->num_units_in_tick, p->seq_hdr->time_scale, INT_MAX);
307 if (p->seq_hdr->equal_picture_interval)
308 c->ticks_per_frame = p->seq_hdr->num_ticks_per_picture;
312 frame->pts =
frame->best_effort_timestamp = p->m.timestamp;
315 frame->pkt_pts = p->m.timestamp;
318 frame->pkt_dts = p->m.timestamp;
319 frame->pkt_pos = p->m.offset;
320 frame->pkt_size = p->m.size;
321 frame->pkt_duration = p->m.duration;
322 frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
324 switch (p->frame_hdr->frame_type) {
325 case DAV1D_FRAME_TYPE_KEY:
326 case DAV1D_FRAME_TYPE_INTRA:
329 case DAV1D_FRAME_TYPE_INTER:
332 case DAV1D_FRAME_TYPE_SWITCH:
340 if (p->mastering_display) {
347 for (
int i = 0;
i < 3;
i++) {
360 if (p->content_light) {
366 light->
MaxCLL = p->content_light->max_content_light_level;
367 light->
MaxFALL = p->content_light->max_frame_average_light_level;
372 dav1d_picture_unref(p);
383 dav1d_data_unref(&dav1d->
data);
384 dav1d_close(&dav1d->
c);
389 #define OFFSET(x) offsetof(Libdav1dContext, x)
390 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
392 {
"tilethreads",
"Tile threads",
OFFSET(tile_threads),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS,
VD },
393 {
"framethreads",
"Frame threads",
OFFSET(frame_threads),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS,
VD },
395 {
"oppoint",
"Select an operating point of the scalable bitstream",
OFFSET(operating_point),
AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31,
VD },
420 .wrapper_name =
"libdav1d",
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define FF_ENABLE_DEPRECATION_WARNINGS
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
Color Transfer Characteristic.
static const AVClass libdav1d_class
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
#define AV_PIX_FMT_YUV420P10
static void libdav1d_user_data_free(const uint8_t *data, void *opaque)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
#define AV_PIX_FMT_GBRP10
static av_cold int libdav1d_init(AVCodecContext *c)
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_PIX_FMT_YUV444P10
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAY10
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static void flush(AVCodecContext *avctx)
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define AV_CODEC_CAP_AUTO_THREADS
Codec supports avctx->thread_count == 0 (auto).
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
static enum AVPixelFormat pix_fmt_rgb[3]
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
#define AV_PIX_FMT_YUV422P12
static AVRational av_make_q(int num, int den)
Create an AVRational.
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define AV_PIX_FMT_YUV444P12
AVCodec ff_libdav1d_decoder
#define AV_LOG_INFO
Standard information.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_PIX_FMT_GBRP12
AVColorSpace
YUV colorspace type.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Name of the codec implementation.
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_YUV420P12
main external API structure.
static enum AVPixelFormat pix_fmt[][3]
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
static const AVOption libdav1d_options[]
@ AV_PICTURE_TYPE_P
Predicted.
static void libdav1d_flush(AVCodecContext *c)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
A reference to a data buffer.
static void libdav1d_data_free(const uint8_t *data, void *opaque)
unsigned MaxFALL
Max average light level per frame (cd/m^2).
This structure stores compressed data.
int64_t pos
byte position in stream, -1 if unknown
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_GRAY12
static av_cold int libdav1d_close(AVCodecContext *c)