FFmpeg
libdav1d.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Ronald S. Bultje <rsbultje gmail com>
3  * Copyright (c) 2018 James Almer <jamrial gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <dav1d/dav1d.h>
23 
24 #include "libavutil/avassert.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/opt.h"
29 
30 #include "atsc_a53.h"
31 #include "avcodec.h"
32 #include "bytestream.h"
33 #include "decode.h"
34 #include "internal.h"
35 
36 typedef struct Libdav1dContext {
37  AVClass *class;
38  Dav1dContext *c;
40  int pool_size;
41 
42  Dav1dData data;
49 
50 static const enum AVPixelFormat pix_fmt[][3] = {
51  [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12 },
52  [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12 },
53  [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12 },
54  [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
55 };
56 
57 static const enum AVPixelFormat pix_fmt_rgb[3] = {
59 };
60 
61 static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
62 {
63  AVCodecContext *c = opaque;
64 
65  av_vlog(c, AV_LOG_ERROR, fmt, vl);
66 }
67 
68 static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
69 {
70  Libdav1dContext *dav1d = cookie;
71  enum AVPixelFormat format = pix_fmt[p->p.layout][p->seq_hdr->hbd];
72  int ret, linesize[4], h = FFALIGN(p->p.h, 128), w = FFALIGN(p->p.w, 128);
73  uint8_t *aligned_ptr, *data[4];
74  AVBufferRef *buf;
75 
76  ret = av_image_get_buffer_size(format, w, h, DAV1D_PICTURE_ALIGNMENT);
77  if (ret < 0)
78  return ret;
79 
80  if (ret != dav1d->pool_size) {
81  av_buffer_pool_uninit(&dav1d->pool);
82  // Use twice the amount of required padding bytes for aligned_ptr below.
83  dav1d->pool = av_buffer_pool_init(ret + DAV1D_PICTURE_ALIGNMENT * 2, NULL);
84  if (!dav1d->pool) {
85  dav1d->pool_size = 0;
86  return AVERROR(ENOMEM);
87  }
88  dav1d->pool_size = ret;
89  }
90  buf = av_buffer_pool_get(dav1d->pool);
91  if (!buf)
92  return AVERROR(ENOMEM);
93 
94  // libdav1d requires DAV1D_PICTURE_ALIGNMENT aligned buffers, which av_malloc()
95  // doesn't guarantee for example when AVX is disabled at configure time.
96  // Use the extra DAV1D_PICTURE_ALIGNMENT padding bytes in the buffer to align it
97  // if required.
98  aligned_ptr = (uint8_t *)FFALIGN((uintptr_t)buf->data, DAV1D_PICTURE_ALIGNMENT);
99  ret = av_image_fill_arrays(data, linesize, aligned_ptr, format, w, h,
100  DAV1D_PICTURE_ALIGNMENT);
101  if (ret < 0) {
102  av_buffer_unref(&buf);
103  return ret;
104  }
105 
106  p->data[0] = data[0];
107  p->data[1] = data[1];
108  p->data[2] = data[2];
109  p->stride[0] = linesize[0];
110  p->stride[1] = linesize[1];
111  p->allocator_data = buf;
112 
113  return 0;
114 }
115 
116 static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
117 {
118  AVBufferRef *buf = p->allocator_data;
119 
120  av_buffer_unref(&buf);
121 }
122 
124 {
125  Libdav1dContext *dav1d = c->priv_data;
126  Dav1dSettings s;
127  int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
128  int res;
129 
130  av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
131 
132  dav1d_default_settings(&s);
133  s.logger.cookie = c;
134  s.logger.callback = libdav1d_log_callback;
135  s.allocator.cookie = dav1d;
136  s.allocator.alloc_picture_callback = libdav1d_picture_allocator;
137  s.allocator.release_picture_callback = libdav1d_picture_release;
138  s.frame_size_limit = c->max_pixels;
139  if (dav1d->apply_grain >= 0)
140  s.apply_grain = dav1d->apply_grain;
142  s.apply_grain = 0;
143 
144  s.all_layers = dav1d->all_layers;
145  if (dav1d->operating_point >= 0)
146  s.operating_point = dav1d->operating_point;
147 
148  s.n_tile_threads = dav1d->tile_threads
149  ? dav1d->tile_threads
150  : FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
151  s.n_frame_threads = dav1d->frame_threads
152  ? dav1d->frame_threads
153  : FFMIN(ceil(threads / s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
154  av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n",
155  s.n_frame_threads, s.n_tile_threads);
156 
157  res = dav1d_open(&dav1d->c, &s);
158  if (res < 0)
159  return AVERROR(ENOMEM);
160 
161  return 0;
162 }
163 
165 {
166  Libdav1dContext *dav1d = c->priv_data;
167 
168  dav1d_data_unref(&dav1d->data);
169  dav1d_flush(dav1d->c);
170 }
171 
172 static void libdav1d_data_free(const uint8_t *data, void *opaque) {
173  AVBufferRef *buf = opaque;
174 
175  av_buffer_unref(&buf);
176 }
177 
178 static void libdav1d_user_data_free(const uint8_t *data, void *opaque) {
179  av_assert0(data == opaque);
180  av_free(opaque);
181 }
182 
184 {
185  Libdav1dContext *dav1d = c->priv_data;
186  Dav1dData *data = &dav1d->data;
187  Dav1dPicture pic = { 0 }, *p = &pic;
188  int res;
189 
190  if (!data->sz) {
191  AVPacket pkt = { 0 };
192 
193  res = ff_decode_get_packet(c, &pkt);
194  if (res < 0 && res != AVERROR_EOF)
195  return res;
196 
197  if (pkt.size) {
198  res = dav1d_data_wrap(data, pkt.data, pkt.size, libdav1d_data_free, pkt.buf);
199  if (res < 0) {
200  av_packet_unref(&pkt);
201  return res;
202  }
203 
204  data->m.timestamp = pkt.pts;
205  data->m.offset = pkt.pos;
206  data->m.duration = pkt.duration;
207 
208  pkt.buf = NULL;
209  av_packet_unref(&pkt);
210 
211  if (c->reordered_opaque != AV_NOPTS_VALUE) {
212  uint8_t *reordered_opaque = av_malloc(sizeof(c->reordered_opaque));
213  if (!reordered_opaque) {
214  dav1d_data_unref(data);
215  return AVERROR(ENOMEM);
216  }
217 
218  memcpy(reordered_opaque, &c->reordered_opaque, sizeof(c->reordered_opaque));
219  res = dav1d_data_wrap_user_data(data, reordered_opaque,
220  libdav1d_user_data_free, reordered_opaque);
221  if (res < 0) {
222  av_free(reordered_opaque);
223  dav1d_data_unref(data);
224  return res;
225  }
226  }
227  }
228  }
229 
230  res = dav1d_send_data(dav1d->c, data);
231  if (res < 0) {
232  if (res == AVERROR(EINVAL))
233  res = AVERROR_INVALIDDATA;
234  if (res != AVERROR(EAGAIN))
235  return res;
236  }
237 
238  res = dav1d_get_picture(dav1d->c, p);
239  if (res < 0) {
240  if (res == AVERROR(EINVAL))
241  res = AVERROR_INVALIDDATA;
242  else if (res == AVERROR(EAGAIN) && c->internal->draining)
243  res = AVERROR_EOF;
244 
245  return res;
246  }
247 
248  av_assert0(p->data[0] && p->allocator_data);
249 
250  // This requires the custom allocator above
251  frame->buf[0] = av_buffer_ref(p->allocator_data);
252  if (!frame->buf[0]) {
253  dav1d_picture_unref(p);
254  return AVERROR(ENOMEM);
255  }
256 
257  frame->data[0] = p->data[0];
258  frame->data[1] = p->data[1];
259  frame->data[2] = p->data[2];
260  frame->linesize[0] = p->stride[0];
261  frame->linesize[1] = p->stride[1];
262  frame->linesize[2] = p->stride[1];
263 
264  c->profile = p->seq_hdr->profile;
265  c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
266  | p->seq_hdr->operating_points[0].minor_level;
267  frame->width = p->p.w;
268  frame->height = p->p.h;
269  if (c->width != p->p.w || c->height != p->p.h) {
270  res = ff_set_dimensions(c, p->p.w, p->p.h);
271  if (res < 0)
272  goto fail;
273  }
274 
276  &frame->sample_aspect_ratio.den,
277  frame->height * (int64_t)p->frame_hdr->render_width,
278  frame->width * (int64_t)p->frame_hdr->render_height,
279  INT_MAX);
280  ff_set_sar(c, frame->sample_aspect_ratio);
281 
282  switch (p->seq_hdr->chr) {
283  case DAV1D_CHR_VERTICAL:
285  break;
286  case DAV1D_CHR_COLOCATED:
288  break;
289  }
290  frame->colorspace = c->colorspace = (enum AVColorSpace) p->seq_hdr->mtrx;
291  frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p->seq_hdr->pri;
292  frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc;
293  frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
294 
295  if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
296  p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
297  p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
298  p->seq_hdr->trc == DAV1D_TRC_SRGB)
299  frame->format = c->pix_fmt = pix_fmt_rgb[p->seq_hdr->hbd];
300  else
301  frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
302 
303  if (p->m.user_data.data)
304  memcpy(&frame->reordered_opaque, p->m.user_data.data, sizeof(frame->reordered_opaque));
305  else
307 
308  if (p->seq_hdr->num_units_in_tick && p->seq_hdr->time_scale) {
310  p->seq_hdr->num_units_in_tick, p->seq_hdr->time_scale, INT_MAX);
311  if (p->seq_hdr->equal_picture_interval)
312  c->ticks_per_frame = p->seq_hdr->num_ticks_per_picture;
313  }
314 
315  // match timestamps and packet size
316  frame->pts = frame->best_effort_timestamp = p->m.timestamp;
317 #if FF_API_PKT_PTS
319  frame->pkt_pts = p->m.timestamp;
321 #endif
322  frame->pkt_dts = p->m.timestamp;
323  frame->pkt_pos = p->m.offset;
324  frame->pkt_size = p->m.size;
325  frame->pkt_duration = p->m.duration;
326  frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
327 
328  switch (p->frame_hdr->frame_type) {
329  case DAV1D_FRAME_TYPE_KEY:
330  case DAV1D_FRAME_TYPE_INTRA:
331  frame->pict_type = AV_PICTURE_TYPE_I;
332  break;
333  case DAV1D_FRAME_TYPE_INTER:
334  frame->pict_type = AV_PICTURE_TYPE_P;
335  break;
336  case DAV1D_FRAME_TYPE_SWITCH:
337  frame->pict_type = AV_PICTURE_TYPE_SP;
338  break;
339  default:
340  res = AVERROR_INVALIDDATA;
341  goto fail;
342  }
343 
344  if (p->mastering_display) {
346  if (!mastering) {
347  res = AVERROR(ENOMEM);
348  goto fail;
349  }
350 
351  for (int i = 0; i < 3; i++) {
352  mastering->display_primaries[i][0] = av_make_q(p->mastering_display->primaries[i][0], 1 << 16);
353  mastering->display_primaries[i][1] = av_make_q(p->mastering_display->primaries[i][1], 1 << 16);
354  }
355  mastering->white_point[0] = av_make_q(p->mastering_display->white_point[0], 1 << 16);
356  mastering->white_point[1] = av_make_q(p->mastering_display->white_point[1], 1 << 16);
357 
358  mastering->max_luminance = av_make_q(p->mastering_display->max_luminance, 1 << 8);
359  mastering->min_luminance = av_make_q(p->mastering_display->min_luminance, 1 << 14);
360 
361  mastering->has_primaries = 1;
362  mastering->has_luminance = 1;
363  }
364  if (p->content_light) {
366  if (!light) {
367  res = AVERROR(ENOMEM);
368  goto fail;
369  }
370  light->MaxCLL = p->content_light->max_content_light_level;
371  light->MaxFALL = p->content_light->max_frame_average_light_level;
372  }
373  if (p->itut_t35) {
374  GetByteContext gb;
375  unsigned int user_identifier;
376 
377  bytestream2_init(&gb, p->itut_t35->payload, p->itut_t35->payload_size);
378  bytestream2_skip(&gb, 1); // terminal provider code
379  bytestream2_skip(&gb, 1); // terminal provider oriented code
380  user_identifier = bytestream2_get_be32(&gb);
381  switch (user_identifier) {
382  case MKBETAG('G', 'A', '9', '4'): { // closed captions
383  AVBufferRef *buf = NULL;
384 
385  res = ff_parse_a53_cc(&buf, gb.buffer, bytestream2_get_bytes_left(&gb));
386  if (res < 0)
387  goto fail;
388  if (!res)
389  break;
390 
392  av_buffer_unref(&buf);
393 
395  break;
396  }
397  default: // ignore unsupported identifiers
398  break;
399  }
400  }
401  if (p->frame_hdr->film_grain.present && (!dav1d->apply_grain ||
404  if (!fgp) {
405  res = AVERROR(ENOMEM);
406  goto fail;
407  }
408 
410  fgp->seed = p->frame_hdr->film_grain.data.seed;
411  fgp->codec.aom.num_y_points = p->frame_hdr->film_grain.data.num_y_points;
412  fgp->codec.aom.chroma_scaling_from_luma = p->frame_hdr->film_grain.data.chroma_scaling_from_luma;
413  fgp->codec.aom.scaling_shift = p->frame_hdr->film_grain.data.scaling_shift;
414  fgp->codec.aom.ar_coeff_lag = p->frame_hdr->film_grain.data.ar_coeff_lag;
415  fgp->codec.aom.ar_coeff_shift = p->frame_hdr->film_grain.data.ar_coeff_shift;
416  fgp->codec.aom.grain_scale_shift = p->frame_hdr->film_grain.data.grain_scale_shift;
417  fgp->codec.aom.overlap_flag = p->frame_hdr->film_grain.data.overlap_flag;
418  fgp->codec.aom.limit_output_range = p->frame_hdr->film_grain.data.clip_to_restricted_range;
419 
420  memcpy(&fgp->codec.aom.y_points, &p->frame_hdr->film_grain.data.y_points,
421  sizeof(fgp->codec.aom.y_points));
422  memcpy(&fgp->codec.aom.num_uv_points, &p->frame_hdr->film_grain.data.num_uv_points,
423  sizeof(fgp->codec.aom.num_uv_points));
424  memcpy(&fgp->codec.aom.uv_points, &p->frame_hdr->film_grain.data.uv_points,
425  sizeof(fgp->codec.aom.uv_points));
426  memcpy(&fgp->codec.aom.ar_coeffs_y, &p->frame_hdr->film_grain.data.ar_coeffs_y,
427  sizeof(fgp->codec.aom.ar_coeffs_y));
428  memcpy(&fgp->codec.aom.ar_coeffs_uv[0], &p->frame_hdr->film_grain.data.ar_coeffs_uv[0],
429  sizeof(fgp->codec.aom.ar_coeffs_uv[0]));
430  memcpy(&fgp->codec.aom.ar_coeffs_uv[1], &p->frame_hdr->film_grain.data.ar_coeffs_uv[1],
431  sizeof(fgp->codec.aom.ar_coeffs_uv[1]));
432  memcpy(&fgp->codec.aom.uv_mult, &p->frame_hdr->film_grain.data.uv_mult,
433  sizeof(fgp->codec.aom.uv_mult));
434  memcpy(&fgp->codec.aom.uv_mult_luma, &p->frame_hdr->film_grain.data.uv_luma_mult,
435  sizeof(fgp->codec.aom.uv_mult_luma));
436  memcpy(&fgp->codec.aom.uv_offset, &p->frame_hdr->film_grain.data.uv_offset,
437  sizeof(fgp->codec.aom.uv_offset));
438  }
439 
440  res = 0;
441 fail:
442  dav1d_picture_unref(p);
443  if (res < 0)
444  av_frame_unref(frame);
445  return res;
446 }
447 
449 {
450  Libdav1dContext *dav1d = c->priv_data;
451 
452  av_buffer_pool_uninit(&dav1d->pool);
453  dav1d_data_unref(&dav1d->data);
454  dav1d_close(&dav1d->c);
455 
456  return 0;
457 }
458 
459 #define OFFSET(x) offsetof(Libdav1dContext, x)
460 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
461 static const AVOption libdav1d_options[] = {
462  { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
463  { "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
464  { "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD | AV_OPT_FLAG_DEPRECATED },
465  { "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
466  { "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
467  { NULL }
468 };
469 
470 static const AVClass libdav1d_class = {
471  .class_name = "libdav1d decoder",
472  .item_name = av_default_item_name,
473  .option = libdav1d_options,
474  .version = LIBAVUTIL_VERSION_INT,
475 };
476 
478  .name = "libdav1d",
479  .long_name = NULL_IF_CONFIG_SMALL("dav1d AV1 decoder by VideoLAN"),
480  .type = AVMEDIA_TYPE_VIDEO,
481  .id = AV_CODEC_ID_AV1,
482  .priv_data_size = sizeof(Libdav1dContext),
483  .init = libdav1d_init,
484  .close = libdav1d_close,
489  .priv_class = &libdav1d_class,
490  .wrapper_name = "libdav1d",
491 };
#define NULL
Definition: coverity.c:32
AVRational framerate
Definition: avcodec.h:2074
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:607
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
unsigned MaxCLL
Max content light level (cd/m^2).
int ar_coeff_lag
Specifies the auto-regression lag.
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
int av_cpu_count(void)
Definition: cpu.c:275
#define AV_OPT_FLAG_DEPRECATED
set if option is deprecated, users should refer to AVOption.help text for more information ...
Definition: opt.h:295
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:585
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
This structure describes how to handle film grain synthesis in video for specific codecs...
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:383
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1166
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
int size
Definition: packet.h:364
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array...
Definition: imgutils.c:446
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
static AVPacket pkt
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:560
#define AV_CODEC_CAP_AUTO_THREADS
Codec supports avctx->thread_count == 0 (auto).
Definition: codec.h:118
int profile
profile
Definition: avcodec.h:1864
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
AVCodec.
Definition: codec.h:190
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:483
Dav1dData data
Definition: libdav1d.c:42
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:116
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int limit_output_range
Signals to clip to limited color levels after film grain application.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame...
Definition: avcodec.h:2359
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
int scaling_shift
Specifies the shift applied to the chroma components.
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:248
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:407
int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
Parse a data array for ATSC A53 Part 4 Closed Captions and store them in an AVBufferRef.
Definition: atsc_a53.c:68
uint8_t * data
Definition: packet.h:363
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:57
const uint8_t * buffer
Definition: bytestream.h:34
static void libdav1d_flush(AVCodecContext *c)
Definition: libdav1d.c:164
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:121
#define AVERROR_EOF
End of file.
Definition: error.h:55
int frame_threads
Definition: libdav1d.c:44
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:458
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1173
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
uint8_t y_points[14][2]
The buffer pool.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters...
Definition: imgutils.c:466
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:558
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level...
Definition: log.c:424
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:346
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:569
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2261
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
#define fail()
Definition: checkasm.h:123
Dav1dContext * c
Definition: libdav1d.c:38
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static av_cold int libdav1d_init(AVCodecContext *c)
Definition: libdav1d.c:123
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:397
#define FFMIN(a, b)
Definition: common.h:96
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:694
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:38
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:609
static void libdav1d_user_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:178
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1145
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int level
level
Definition: avcodec.h:1987
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
int operating_point
Definition: libdav1d.c:46
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:1688
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:663
int overlap_flag
Signals whether to overlap film grain blocks.
static const AVOption libdav1d_options[]
Definition: libdav1d.c:461
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:174
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1790
Full range content.
Definition: pixfmt.h:586
if(ret)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:417
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
Libavcodec external API header.
int tile_threads
Definition: libdav1d.c:43
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:593
static void libdav1d_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:172
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
main external API structure.
Definition: avcodec.h:531
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
uint8_t * data
The data buffer.
Definition: buffer.h:89
static enum AVPixelFormat pix_fmt[][3]
Definition: libdav1d.c:50
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:402
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:481
Switching Predicted.
Definition: avutil.h:279
Describe the class of an AVClass context structure.
Definition: log.h:67
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:2198
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:303
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1159
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1152
Mastering display metadata capable of representing the color volume of the display used to master the...
union AVFilmGrainParams::@296 codec
Additional fields may be added both here and in any structure included.
static const AVClass libdav1d_class
Definition: libdav1d.c:470
#define OFFSET(x)
Definition: libdav1d.c:459
enum AVChromaLocation chroma_location
Definition: frame.h:571
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:578
#define VD
Definition: libdav1d.c:460
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:415
Narrow or limited range content.
Definition: pixfmt.h:569
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:423
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
Definition: libdav1d.c:61
A reference to a data buffer.
Definition: buffer.h:81
AVFilmGrainAOMParams aom
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:68
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:266
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
static av_cold int libdav1d_close(AVCodecContext *c)
Definition: libdav1d.c:448
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2196
int den
Denominator.
Definition: rational.h:60
#define MKBETAG(a, b, c, d)
Definition: common.h:406
void * priv_data
Definition: avcodec.h:558
int uv_offset[2]
Offset used for component scaling function.
#define av_free(p)
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:566
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:392
enum AVColorPrimaries color_primaries
Definition: frame.h:560
int height
Definition: frame.h:372
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:562
AVBufferPool * pool
Definition: libdav1d.c:39
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:364
uint8_t uv_points[2][10][2]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVCodec ff_libdav1d_decoder
Definition: libdav1d.c:477
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:629
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
unsigned MaxFALL
Max average light level per frame (cd/m^2).
This structure stores compressed data.
Definition: packet.h:340
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Predicted.
Definition: avutil.h:275
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
Definition: libdav1d.c:183