FFmpeg
libdav1d.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Ronald S. Bultje <rsbultje gmail com>
3  * Copyright (c) 2018 James Almer <jamrial gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <dav1d/dav1d.h>
23 
24 #include "libavutil/avassert.h"
26 #include "libavutil/imgutils.h"
27 #include "libavutil/opt.h"
28 
29 #include "avcodec.h"
30 #include "decode.h"
31 #include "internal.h"
32 
33 typedef struct Libdav1dContext {
34  AVClass *class;
35  Dav1dContext *c;
37  int pool_size;
38 
39  Dav1dData data;
44 
45 static const enum AVPixelFormat pix_fmt[][3] = {
46  [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12 },
47  [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12 },
48  [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12 },
49  [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
50 };
51 
52 static const enum AVPixelFormat pix_fmt_rgb[3] = {
54 };
55 
56 static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
57 {
58  AVCodecContext *c = opaque;
59 
60  av_vlog(c, AV_LOG_ERROR, fmt, vl);
61 }
62 
63 static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
64 {
65  Libdav1dContext *dav1d = cookie;
66  enum AVPixelFormat format = pix_fmt[p->p.layout][p->seq_hdr->hbd];
67  int ret, linesize[4], h = FFALIGN(p->p.h, 128);
68  uint8_t *aligned_ptr, *data[4];
70 
71  ret = av_image_fill_arrays(data, linesize, NULL, format, FFALIGN(p->p.w, 128),
72  h, DAV1D_PICTURE_ALIGNMENT);
73  if (ret < 0)
74  return ret;
75 
76  if (ret != dav1d->pool_size) {
77  av_buffer_pool_uninit(&dav1d->pool);
78  // Use twice the amount of required padding bytes for aligned_ptr below.
79  dav1d->pool = av_buffer_pool_init(ret + DAV1D_PICTURE_ALIGNMENT * 2, NULL);
80  if (!dav1d->pool) {
81  dav1d->pool_size = 0;
82  return AVERROR(ENOMEM);
83  }
84  dav1d->pool_size = ret;
85  }
86  buf = av_buffer_pool_get(dav1d->pool);
87  if (!buf)
88  return AVERROR(ENOMEM);
89 
90  // libdav1d requires DAV1D_PICTURE_ALIGNMENT aligned buffers, which av_malloc()
91  // doesn't guarantee for example when AVX is disabled at configure time.
92  // Use the extra DAV1D_PICTURE_ALIGNMENT padding bytes in the buffer to align it
93  // if required.
94  aligned_ptr = (uint8_t *)FFALIGN((uintptr_t)buf->data, DAV1D_PICTURE_ALIGNMENT);
95  ret = av_image_fill_pointers(data, format, h, aligned_ptr, linesize);
96  if (ret < 0) {
97  av_buffer_unref(&buf);
98  return ret;
99  }
100 
101  p->data[0] = data[0];
102  p->data[1] = data[1];
103  p->data[2] = data[2];
104  p->stride[0] = linesize[0];
105  p->stride[1] = linesize[1];
106  p->allocator_data = buf;
107 
108  return 0;
109 }
110 
111 static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
112 {
113  AVBufferRef *buf = p->allocator_data;
114 
115  av_buffer_unref(&buf);
116 }
117 
119 {
120  Libdav1dContext *dav1d = c->priv_data;
121  Dav1dSettings s;
122  int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
123  int res;
124 
125  av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
126 
127  dav1d_default_settings(&s);
128  s.logger.cookie = c;
129  s.logger.callback = libdav1d_log_callback;
130  s.allocator.cookie = dav1d;
131  s.allocator.alloc_picture_callback = libdav1d_picture_allocator;
132  s.allocator.release_picture_callback = libdav1d_picture_release;
133  s.apply_grain = dav1d->apply_grain;
134 
135  s.n_tile_threads = dav1d->tile_threads
136  ? dav1d->tile_threads
137  : FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
138  s.n_frame_threads = dav1d->frame_threads
139  ? dav1d->frame_threads
140  : FFMIN(ceil(threads / s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
141  av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n",
142  s.n_frame_threads, s.n_tile_threads);
143 
144  res = dav1d_open(&dav1d->c, &s);
145  if (res < 0)
146  return AVERROR(ENOMEM);
147 
148  return 0;
149 }
150 
152 {
153  Libdav1dContext *dav1d = c->priv_data;
154 
155  dav1d_data_unref(&dav1d->data);
156  dav1d_flush(dav1d->c);
157 }
158 
159 static void libdav1d_data_free(const uint8_t *data, void *opaque) {
160  AVBufferRef *buf = opaque;
161 
162  av_buffer_unref(&buf);
163 }
164 
166 {
167  Libdav1dContext *dav1d = c->priv_data;
168  Dav1dData *data = &dav1d->data;
169  Dav1dPicture pic = { 0 }, *p = &pic;
170  int res;
171 
172  if (!data->sz) {
173  AVPacket pkt = { 0 };
174 
175  res = ff_decode_get_packet(c, &pkt);
176  if (res < 0 && res != AVERROR_EOF)
177  return res;
178 
179  if (pkt.size) {
180  res = dav1d_data_wrap(data, pkt.data, pkt.size, libdav1d_data_free, pkt.buf);
181  if (res < 0) {
182  av_packet_unref(&pkt);
183  return res;
184  }
185 
186  data->m.timestamp = pkt.pts;
187  data->m.offset = pkt.pos;
188  data->m.duration = pkt.duration;
189 
190  pkt.buf = NULL;
191  av_packet_unref(&pkt);
192  }
193  }
194 
195  res = dav1d_send_data(dav1d->c, data);
196  if (res < 0) {
197  if (res == AVERROR(EINVAL))
198  res = AVERROR_INVALIDDATA;
199  if (res != AVERROR(EAGAIN))
200  return res;
201  }
202 
203  res = dav1d_get_picture(dav1d->c, p);
204  if (res < 0) {
205  if (res == AVERROR(EINVAL))
206  res = AVERROR_INVALIDDATA;
207  else if (res == AVERROR(EAGAIN) && c->internal->draining)
208  res = AVERROR_EOF;
209 
210  return res;
211  }
212 
213  av_assert0(p->data[0] && p->allocator_data);
214 
215  // This requires the custom allocator above
216  frame->buf[0] = av_buffer_ref(p->allocator_data);
217  if (!frame->buf[0]) {
218  dav1d_picture_unref(p);
219  return AVERROR(ENOMEM);
220  }
221 
222  frame->data[0] = p->data[0];
223  frame->data[1] = p->data[1];
224  frame->data[2] = p->data[2];
225  frame->linesize[0] = p->stride[0];
226  frame->linesize[1] = p->stride[1];
227  frame->linesize[2] = p->stride[1];
228 
229  c->profile = p->seq_hdr->profile;
230  c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
231  | p->seq_hdr->operating_points[0].minor_level;
232  frame->width = p->p.w;
233  frame->height = p->p.h;
234  if (c->width != p->p.w || c->height != p->p.h) {
235  res = ff_set_dimensions(c, p->p.w, p->p.h);
236  if (res < 0)
237  goto fail;
238  }
239 
240  switch (p->seq_hdr->chr) {
241  case DAV1D_CHR_VERTICAL:
243  break;
244  case DAV1D_CHR_COLOCATED:
246  break;
247  }
248  frame->colorspace = c->colorspace = (enum AVColorSpace) p->seq_hdr->mtrx;
249  frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p->seq_hdr->pri;
250  frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc;
251  frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
252 
253  if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
254  p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
255  p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
256  p->seq_hdr->trc == DAV1D_TRC_SRGB)
257  frame->format = c->pix_fmt = pix_fmt_rgb[p->seq_hdr->hbd];
258  else
259  frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
260 
261  // match timestamps and packet size
262  frame->pts = frame->best_effort_timestamp = p->m.timestamp;
263 #if FF_API_PKT_PTS
265  frame->pkt_pts = p->m.timestamp;
267 #endif
268  frame->pkt_dts = p->m.timestamp;
269  frame->pkt_pos = p->m.offset;
270  frame->pkt_size = p->m.size;
271  frame->pkt_duration = p->m.duration;
272  frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
273 
274  switch (p->frame_hdr->frame_type) {
275  case DAV1D_FRAME_TYPE_KEY:
276  case DAV1D_FRAME_TYPE_INTRA:
277  frame->pict_type = AV_PICTURE_TYPE_I;
278  break;
279  case DAV1D_FRAME_TYPE_INTER:
280  frame->pict_type = AV_PICTURE_TYPE_P;
281  break;
282  case DAV1D_FRAME_TYPE_SWITCH:
283  frame->pict_type = AV_PICTURE_TYPE_SP;
284  break;
285  default:
286  res = AVERROR_INVALIDDATA;
287  goto fail;
288  }
289 
290  if (p->mastering_display) {
292  if (!mastering) {
293  res = AVERROR(ENOMEM);
294  goto fail;
295  }
296 
297  for (int i = 0; i < 3; i++) {
298  mastering->display_primaries[i][0] = av_make_q(p->mastering_display->primaries[i][0], 1 << 16);
299  mastering->display_primaries[i][1] = av_make_q(p->mastering_display->primaries[i][1], 1 << 16);
300  }
301  mastering->white_point[0] = av_make_q(p->mastering_display->white_point[0], 1 << 16);
302  mastering->white_point[1] = av_make_q(p->mastering_display->white_point[1], 1 << 16);
303 
304  mastering->max_luminance = av_make_q(p->mastering_display->max_luminance, 1 << 8);
305  mastering->min_luminance = av_make_q(p->mastering_display->min_luminance, 1 << 14);
306 
307  mastering->has_primaries = 1;
308  mastering->has_luminance = 1;
309  }
310  if (p->content_light) {
312  if (!light) {
313  res = AVERROR(ENOMEM);
314  goto fail;
315  }
316  light->MaxCLL = p->content_light->max_content_light_level;
317  light->MaxFALL = p->content_light->max_frame_average_light_level;
318  }
319 
320  res = 0;
321 fail:
322  dav1d_picture_unref(p);
323  if (res < 0)
324  av_frame_unref(frame);
325  return res;
326 }
327 
329 {
330  Libdav1dContext *dav1d = c->priv_data;
331 
332  av_buffer_pool_uninit(&dav1d->pool);
333  dav1d_data_unref(&dav1d->data);
334  dav1d_close(&dav1d->c);
335 
336  return 0;
337 }
338 
339 #define OFFSET(x) offsetof(Libdav1dContext, x)
340 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
341 static const AVOption libdav1d_options[] = {
342  { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
343  { "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
344  { "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VD },
345  { NULL }
346 };
347 
348 static const AVClass libdav1d_class = {
349  .class_name = "libdav1d decoder",
350  .item_name = av_default_item_name,
351  .option = libdav1d_options,
352  .version = LIBAVUTIL_VERSION_INT,
353 };
354 
356  .name = "libdav1d",
357  .long_name = NULL_IF_CONFIG_SMALL("dav1d AV1 decoder by VideoLAN"),
358  .type = AVMEDIA_TYPE_VIDEO,
359  .id = AV_CODEC_ID_AV1,
360  .priv_data_size = sizeof(Libdav1dContext),
361  .init = libdav1d_init,
362  .close = libdav1d_close,
367  .priv_class = &libdav1d_class,
368  .wrapper_name = "libdav1d",
369 };
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:543
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
unsigned MaxCLL
Max content light level (cd/m^2).
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
int av_cpu_count(void)
Definition: cpu.c:267
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:539
const char * fmt
Definition: avisynth_c.h:861
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1497
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:459
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2196
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int size
Definition: avcodec.h:1478
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array...
Definition: imgutils.c:411
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
static AVPacket pkt
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:560
#define AV_CODEC_CAP_AUTO_THREADS
Codec supports avctx->thread_count == 0 (auto).
Definition: avcodec.h:1049
int profile
profile
Definition: avcodec.h:2894
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
AVCodec.
Definition: avcodec.h:3477
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:467
Dav1dData data
Definition: libdav1d.c:39
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:111
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:496
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1495
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:329
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
uint8_t * data
Definition: avcodec.h:1477
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:52
static void libdav1d_flush(AVCodecContext *c)
Definition: libdav1d.c:151
#define AVERROR_EOF
End of file.
Definition: error.h:55
int frame_threads
Definition: libdav1d.c:41
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:443
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2203
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
The buffer pool.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:512
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1460
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:523
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
#define fail()
Definition: checkasm.h:120
Dav1dContext * c
Definition: libdav1d.c:35
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static av_cold int libdav1d_init(AVCodecContext *c)
Definition: libdav1d.c:118
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:351
#define FFMIN(a, b)
Definition: common.h:96
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
int width
picture width / height.
Definition: avcodec.h:1738
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:545
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2175
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int level
level
Definition: avcodec.h:3014
static const AVOption libdav1d_options[]
Definition: libdav1d.c:341
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:195
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2820
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
if(ret)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:341
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libavcodec external API header.
int tile_threads
Definition: libdav1d.c:40
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:547
static void libdav1d_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:159
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
main external API structure.
Definition: avcodec.h:1565
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
uint8_t * data
The data buffer.
Definition: buffer.h:89
static enum AVPixelFormat pix_fmt[][3]
Definition: libdav1d.c:45
void * buf
Definition: avisynth_c.h:766
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Switching Predicted.
Definition: avutil.h:279
Describe the class of an AVClass context structure.
Definition: log.h:67
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2189
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2182
Mastering display metadata capable of representing the color volume of the display used to master the...
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level...
Definition: log.c:373
static const AVClass libdav1d_class
Definition: libdav1d.c:348
#define OFFSET(x)
Definition: libdav1d.c:339
enum AVChromaLocation chroma_location
Definition: frame.h:525
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:532
#define VD
Definition: libdav1d.c:340
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:369
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:377
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
Definition: libdav1d.c:56
A reference to a data buffer.
Definition: buffer.h:81
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:63
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
static av_cold int libdav1d_close(AVCodecContext *c)
Definition: libdav1d.c:328
void * priv_data
Definition: avcodec.h:1592
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1600
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:346
enum AVColorPrimaries color_primaries
Definition: frame.h:514
int height
Definition: frame.h:326
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:516
AVBufferPool * pool
Definition: libdav1d.c:36
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVCodec ff_libdav1d_decoder
Definition: libdav1d.c:355
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:582
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
unsigned MaxFALL
Max average light level per frame (cd/m^2).
This structure stores compressed data.
Definition: avcodec.h:1454
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
Predicted.
Definition: avutil.h:275
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
Definition: libdav1d.c:165