FFmpeg
libdav1d.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Ronald S. Bultje <rsbultje gmail com>
3  * Copyright (c) 2018 James Almer <jamrial gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <dav1d/dav1d.h>
23 
24 #include "libavutil/avassert.h"
25 #include "libavutil/cpu.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/opt.h"
30 
31 #include "atsc_a53.h"
32 #include "avcodec.h"
33 #include "bytestream.h"
34 #include "codec_internal.h"
35 #include "decode.h"
36 #include "internal.h"
37 
38 #define FF_DAV1D_VERSION_AT_LEAST(x,y) \
39  (DAV1D_API_VERSION_MAJOR > (x) || DAV1D_API_VERSION_MAJOR == (x) && DAV1D_API_VERSION_MINOR >= (y))
40 
41 typedef struct Libdav1dContext {
42  AVClass *class;
43  Dav1dContext *c;
45  int pool_size;
46 
47  Dav1dData data;
55 
56 static const enum AVPixelFormat pix_fmt[][3] = {
57  [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12 },
58  [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12 },
59  [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12 },
60  [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
61 };
62 
63 static const enum AVPixelFormat pix_fmt_rgb[3] = {
65 };
66 
67 static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
68 {
69  AVCodecContext *c = opaque;
70 
71  av_vlog(c, AV_LOG_ERROR, fmt, vl);
72 }
73 
74 static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
75 {
76  Libdav1dContext *dav1d = cookie;
77  enum AVPixelFormat format = pix_fmt[p->p.layout][p->seq_hdr->hbd];
78  int ret, linesize[4], h = FFALIGN(p->p.h, 128), w = FFALIGN(p->p.w, 128);
79  uint8_t *aligned_ptr, *data[4];
80  AVBufferRef *buf;
81 
82  ret = av_image_get_buffer_size(format, w, h, DAV1D_PICTURE_ALIGNMENT);
83  if (ret < 0)
84  return ret;
85 
86  if (ret != dav1d->pool_size) {
87  av_buffer_pool_uninit(&dav1d->pool);
88  // Use twice the amount of required padding bytes for aligned_ptr below.
89  dav1d->pool = av_buffer_pool_init(ret + DAV1D_PICTURE_ALIGNMENT * 2, NULL);
90  if (!dav1d->pool) {
91  dav1d->pool_size = 0;
92  return AVERROR(ENOMEM);
93  }
94  dav1d->pool_size = ret;
95  }
96  buf = av_buffer_pool_get(dav1d->pool);
97  if (!buf)
98  return AVERROR(ENOMEM);
99 
100  // libdav1d requires DAV1D_PICTURE_ALIGNMENT aligned buffers, which av_malloc()
101  // doesn't guarantee for example when AVX is disabled at configure time.
102  // Use the extra DAV1D_PICTURE_ALIGNMENT padding bytes in the buffer to align it
103  // if required.
104  aligned_ptr = (uint8_t *)FFALIGN((uintptr_t)buf->data, DAV1D_PICTURE_ALIGNMENT);
105  ret = av_image_fill_arrays(data, linesize, aligned_ptr, format, w, h,
106  DAV1D_PICTURE_ALIGNMENT);
107  if (ret < 0) {
108  av_buffer_unref(&buf);
109  return ret;
110  }
111 
112  p->data[0] = data[0];
113  p->data[1] = data[1];
114  p->data[2] = data[2];
115  p->stride[0] = linesize[0];
116  p->stride[1] = linesize[1];
117  p->allocator_data = buf;
118 
119  return 0;
120 }
121 
122 static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
123 {
124  AVBufferRef *buf = p->allocator_data;
125 
126  av_buffer_unref(&buf);
127 }
128 
129 static void libdav1d_init_params(AVCodecContext *c, const Dav1dSequenceHeader *seq)
130 {
131  c->profile = seq->profile;
132  c->level = ((seq->operating_points[0].major_level - 2) << 2)
133  | seq->operating_points[0].minor_level;
134 
135  switch (seq->chr) {
136  case DAV1D_CHR_VERTICAL:
137  c->chroma_sample_location = AVCHROMA_LOC_LEFT;
138  break;
139  case DAV1D_CHR_COLOCATED:
140  c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
141  break;
142  }
143  c->colorspace = (enum AVColorSpace) seq->mtrx;
144  c->color_primaries = (enum AVColorPrimaries) seq->pri;
145  c->color_trc = (enum AVColorTransferCharacteristic) seq->trc;
146  c->color_range = seq->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
147 
148  if (seq->layout == DAV1D_PIXEL_LAYOUT_I444 &&
149  seq->mtrx == DAV1D_MC_IDENTITY &&
150  seq->pri == DAV1D_COLOR_PRI_BT709 &&
151  seq->trc == DAV1D_TRC_SRGB)
152  c->pix_fmt = pix_fmt_rgb[seq->hbd];
153  else
154  c->pix_fmt = pix_fmt[seq->layout][seq->hbd];
155 
156  if (seq->num_units_in_tick && seq->time_scale) {
157  av_reduce(&c->framerate.den, &c->framerate.num,
158  seq->num_units_in_tick, seq->time_scale, INT_MAX);
159  if (seq->equal_picture_interval)
160  c->ticks_per_frame = seq->num_ticks_per_picture;
161  }
162 
163  if (seq->film_grain_present)
164  c->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
165  else
166  c->properties &= ~FF_CODEC_PROPERTY_FILM_GRAIN;
167 }
168 
170 {
171  Dav1dSequenceHeader seq;
172  size_t offset = 0;
173  int res;
174 
175  if (!c->extradata || c->extradata_size <= 0)
176  return 0;
177 
178  if (c->extradata[0] & 0x80) {
179  int version = c->extradata[0] & 0x7F;
180 
181  if (version != 1 || c->extradata_size < 4) {
182  int explode = !!(c->err_recognition & AV_EF_EXPLODE);
183  av_log(c, explode ? AV_LOG_ERROR : AV_LOG_WARNING,
184  "Error decoding extradata\n");
185  return explode ? AVERROR_INVALIDDATA : 0;
186  }
187 
188  // Do nothing if there are no configOBUs to parse
189  if (c->extradata_size == 4)
190  return 0;
191 
192  offset = 4;
193  }
194 
195  res = dav1d_parse_sequence_header(&seq, c->extradata + offset,
196  c->extradata_size - offset);
197  if (res < 0)
198  return 0; // Assume no seqhdr OBUs are present
199 
200  libdav1d_init_params(c, &seq);
201  res = ff_set_dimensions(c, seq.max_width, seq.max_height);
202  if (res < 0)
203  return res;
204 
205  return 0;
206 }
207 
209 {
210  Libdav1dContext *dav1d = c->priv_data;
211  Dav1dSettings s;
212 #if FF_DAV1D_VERSION_AT_LEAST(6,0)
213  int threads = c->thread_count;
214 #else
215  int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
216 #endif
217  int res;
218 
219  av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
220 
221  dav1d_default_settings(&s);
222  s.logger.cookie = c;
223  s.logger.callback = libdav1d_log_callback;
224  s.allocator.cookie = dav1d;
225  s.allocator.alloc_picture_callback = libdav1d_picture_allocator;
226  s.allocator.release_picture_callback = libdav1d_picture_release;
227  s.frame_size_limit = c->max_pixels;
228  if (dav1d->apply_grain >= 0)
229  s.apply_grain = dav1d->apply_grain;
230  else
231  s.apply_grain = !(c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
232 
233  s.all_layers = dav1d->all_layers;
234  if (dav1d->operating_point >= 0)
235  s.operating_point = dav1d->operating_point;
236 #if FF_DAV1D_VERSION_AT_LEAST(6,2)
237  s.strict_std_compliance = c->strict_std_compliance > 0;
238 #endif
239 
240 #if FF_DAV1D_VERSION_AT_LEAST(6,0)
241  if (dav1d->frame_threads || dav1d->tile_threads)
242  s.n_threads = FFMAX(dav1d->frame_threads, dav1d->tile_threads);
243  else
244  s.n_threads = FFMIN(threads, DAV1D_MAX_THREADS);
245  if (dav1d->max_frame_delay > 0 && (c->flags & AV_CODEC_FLAG_LOW_DELAY))
246  av_log(c, AV_LOG_WARNING, "Low delay mode requested, forcing max_frame_delay 1\n");
247  s.max_frame_delay = (c->flags & AV_CODEC_FLAG_LOW_DELAY) ? 1 : dav1d->max_frame_delay;
248  av_log(c, AV_LOG_DEBUG, "Using %d threads, %d max_frame_delay\n",
249  s.n_threads, s.max_frame_delay);
250 #else
251  s.n_tile_threads = dav1d->tile_threads
252  ? dav1d->tile_threads
253  : FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
254  s.n_frame_threads = dav1d->frame_threads
255  ? dav1d->frame_threads
256  : FFMIN(ceil(threads / s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
257  if (dav1d->max_frame_delay > 0)
258  s.n_frame_threads = FFMIN(s.n_frame_threads, dav1d->max_frame_delay);
259  av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n",
260  s.n_frame_threads, s.n_tile_threads);
261 #endif
262 
263 #if FF_DAV1D_VERSION_AT_LEAST(6,8)
264  if (c->skip_frame >= AVDISCARD_NONKEY)
265  s.decode_frame_type = DAV1D_DECODEFRAMETYPE_KEY;
266  else if (c->skip_frame >= AVDISCARD_NONINTRA)
267  s.decode_frame_type = DAV1D_DECODEFRAMETYPE_INTRA;
268  else if (c->skip_frame >= AVDISCARD_NONREF)
269  s.decode_frame_type = DAV1D_DECODEFRAMETYPE_REFERENCE;
270 #endif
271 
273  if (res < 0)
274  return res;
275 
276  res = dav1d_open(&dav1d->c, &s);
277  if (res < 0)
278  return AVERROR(ENOMEM);
279 
280  return 0;
281 }
282 
284 {
285  Libdav1dContext *dav1d = c->priv_data;
286 
287  dav1d_data_unref(&dav1d->data);
288  dav1d_flush(dav1d->c);
289 }
290 
291 typedef struct OpaqueData {
293 #if FF_API_REORDERED_OPAQUE
294  int64_t reordered_opaque;
295 #endif
296 } OpaqueData;
297 
298 static void libdav1d_data_free(const uint8_t *data, void *opaque) {
299  AVBufferRef *buf = opaque;
300 
301  av_buffer_unref(&buf);
302 }
303 
304 static void libdav1d_user_data_free(const uint8_t *data, void *opaque) {
305  AVPacket *pkt = opaque;
306  av_assert0(data == opaque);
307  av_free(pkt->opaque);
309 }
310 
312 {
313  Libdav1dContext *dav1d = c->priv_data;
314  Dav1dData *data = &dav1d->data;
315  Dav1dPicture pic = { 0 }, *p = &pic;
316  AVPacket *pkt;
317  OpaqueData *od = NULL;
318 #if FF_DAV1D_VERSION_AT_LEAST(5,1)
319  enum Dav1dEventFlags event_flags = 0;
320 #endif
321  int res;
322 
323  if (!data->sz) {
324  pkt = av_packet_alloc();
325 
326  if (!pkt)
327  return AVERROR(ENOMEM);
328 
329  res = ff_decode_get_packet(c, pkt);
330  if (res < 0 && res != AVERROR_EOF) {
332  return res;
333  }
334 
335  if (pkt->size) {
336  res = dav1d_data_wrap(data, pkt->data, pkt->size,
338  if (res < 0) {
340  return res;
341  }
342 
343  pkt->buf = NULL;
344 
346  if (
348  c->reordered_opaque != AV_NOPTS_VALUE ||
349 #endif
350  (pkt->opaque && (c->flags & AV_CODEC_FLAG_COPY_OPAQUE))) {
351  od = av_mallocz(sizeof(*od));
352  if (!od) {
354  dav1d_data_unref(data);
355  return AVERROR(ENOMEM);
356  }
357  od->pkt_orig_opaque = pkt->opaque;
358 #if FF_API_REORDERED_OPAQUE
359  od->reordered_opaque = c->reordered_opaque;
360 #endif
362  }
363  pkt->opaque = od;
364 
365  res = dav1d_data_wrap_user_data(data, (const uint8_t *)pkt,
367  if (res < 0) {
368  av_free(pkt->opaque);
370  dav1d_data_unref(data);
371  return res;
372  }
373  pkt = NULL;
374  } else {
376  if (res >= 0)
377  return AVERROR(EAGAIN);
378  }
379  }
380 
381  res = dav1d_send_data(dav1d->c, data);
382  if (res < 0) {
383  if (res == AVERROR(EINVAL))
384  res = AVERROR_INVALIDDATA;
385  if (res != AVERROR(EAGAIN)) {
386  dav1d_data_unref(data);
387  return res;
388  }
389  }
390 
391  res = dav1d_get_picture(dav1d->c, p);
392  if (res < 0) {
393  if (res == AVERROR(EINVAL))
394  res = AVERROR_INVALIDDATA;
395  else if (res == AVERROR(EAGAIN) && c->internal->draining)
396  res = AVERROR_EOF;
397 
398  return res;
399  }
400 
401  av_assert0(p->data[0] && p->allocator_data);
402 
403  // This requires the custom allocator above
404  frame->buf[0] = av_buffer_ref(p->allocator_data);
405  if (!frame->buf[0]) {
406  dav1d_picture_unref(p);
407  return AVERROR(ENOMEM);
408  }
409 
410  frame->data[0] = p->data[0];
411  frame->data[1] = p->data[1];
412  frame->data[2] = p->data[2];
413  frame->linesize[0] = p->stride[0];
414  frame->linesize[1] = p->stride[1];
415  frame->linesize[2] = p->stride[1];
416 
417 #if FF_DAV1D_VERSION_AT_LEAST(5,1)
418  dav1d_get_event_flags(dav1d->c, &event_flags);
419  if (c->pix_fmt == AV_PIX_FMT_NONE ||
420  event_flags & DAV1D_EVENT_FLAG_NEW_SEQUENCE)
421 #endif
422  libdav1d_init_params(c, p->seq_hdr);
423  res = ff_decode_frame_props(c, frame);
424  if (res < 0)
425  goto fail;
426 
427  frame->width = p->p.w;
428  frame->height = p->p.h;
429  if (c->width != p->p.w || c->height != p->p.h) {
430  res = ff_set_dimensions(c, p->p.w, p->p.h);
431  if (res < 0)
432  goto fail;
433  }
434 
435  av_reduce(&frame->sample_aspect_ratio.num,
436  &frame->sample_aspect_ratio.den,
437  frame->height * (int64_t)p->frame_hdr->render_width,
438  frame->width * (int64_t)p->frame_hdr->render_height,
439  INT_MAX);
440  ff_set_sar(c, frame->sample_aspect_ratio);
441 
442  pkt = (AVPacket *)p->m.user_data.data;
443  od = pkt->opaque;
446  if (od && od->reordered_opaque != AV_NOPTS_VALUE)
447  frame->reordered_opaque = od->reordered_opaque;
448  else
449  frame->reordered_opaque = AV_NOPTS_VALUE;
451 #endif
452 
453  // restore the original user opaque value for
454  // ff_decode_frame_props_from_pkt()
455  pkt->opaque = od ? od->pkt_orig_opaque : NULL;
456  av_freep(&od);
457 
458  // match timestamps and packet size
460  pkt->opaque = NULL;
461  if (res < 0)
462  goto fail;
463 
464  frame->pkt_dts = pkt->pts;
465  frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
466 
467  switch (p->frame_hdr->frame_type) {
468  case DAV1D_FRAME_TYPE_KEY:
469  case DAV1D_FRAME_TYPE_INTRA:
470  frame->pict_type = AV_PICTURE_TYPE_I;
471  break;
472  case DAV1D_FRAME_TYPE_INTER:
473  frame->pict_type = AV_PICTURE_TYPE_P;
474  break;
475  case DAV1D_FRAME_TYPE_SWITCH:
476  frame->pict_type = AV_PICTURE_TYPE_SP;
477  break;
478  default:
479  res = AVERROR_INVALIDDATA;
480  goto fail;
481  }
482 
483  if (p->mastering_display) {
485  if (!mastering) {
486  res = AVERROR(ENOMEM);
487  goto fail;
488  }
489 
490  for (int i = 0; i < 3; i++) {
491  mastering->display_primaries[i][0] = av_make_q(p->mastering_display->primaries[i][0], 1 << 16);
492  mastering->display_primaries[i][1] = av_make_q(p->mastering_display->primaries[i][1], 1 << 16);
493  }
494  mastering->white_point[0] = av_make_q(p->mastering_display->white_point[0], 1 << 16);
495  mastering->white_point[1] = av_make_q(p->mastering_display->white_point[1], 1 << 16);
496 
497  mastering->max_luminance = av_make_q(p->mastering_display->max_luminance, 1 << 8);
498  mastering->min_luminance = av_make_q(p->mastering_display->min_luminance, 1 << 14);
499 
500  mastering->has_primaries = 1;
501  mastering->has_luminance = 1;
502  }
503  if (p->content_light) {
505  if (!light) {
506  res = AVERROR(ENOMEM);
507  goto fail;
508  }
509  light->MaxCLL = p->content_light->max_content_light_level;
510  light->MaxFALL = p->content_light->max_frame_average_light_level;
511  }
512  if (p->itut_t35) {
513  GetByteContext gb;
514  unsigned int user_identifier;
515 
516  bytestream2_init(&gb, p->itut_t35->payload, p->itut_t35->payload_size);
517  bytestream2_skip(&gb, 1); // terminal provider code
518  bytestream2_skip(&gb, 1); // terminal provider oriented code
519  user_identifier = bytestream2_get_be32(&gb);
520  switch (user_identifier) {
521  case MKBETAG('G', 'A', '9', '4'): { // closed captions
522  AVBufferRef *buf = NULL;
523 
524  res = ff_parse_a53_cc(&buf, gb.buffer, bytestream2_get_bytes_left(&gb));
525  if (res < 0)
526  goto fail;
527  if (!res)
528  break;
529 
531  av_buffer_unref(&buf);
532 
533  c->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
534  break;
535  }
536  default: // ignore unsupported identifiers
537  break;
538  }
539  }
540  if (p->frame_hdr->film_grain.present && (!dav1d->apply_grain ||
541  (c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN))) {
543  if (!fgp) {
544  res = AVERROR(ENOMEM);
545  goto fail;
546  }
547 
549  fgp->seed = p->frame_hdr->film_grain.data.seed;
550  fgp->codec.aom.num_y_points = p->frame_hdr->film_grain.data.num_y_points;
551  fgp->codec.aom.chroma_scaling_from_luma = p->frame_hdr->film_grain.data.chroma_scaling_from_luma;
552  fgp->codec.aom.scaling_shift = p->frame_hdr->film_grain.data.scaling_shift;
553  fgp->codec.aom.ar_coeff_lag = p->frame_hdr->film_grain.data.ar_coeff_lag;
554  fgp->codec.aom.ar_coeff_shift = p->frame_hdr->film_grain.data.ar_coeff_shift;
555  fgp->codec.aom.grain_scale_shift = p->frame_hdr->film_grain.data.grain_scale_shift;
556  fgp->codec.aom.overlap_flag = p->frame_hdr->film_grain.data.overlap_flag;
557  fgp->codec.aom.limit_output_range = p->frame_hdr->film_grain.data.clip_to_restricted_range;
558 
559  memcpy(&fgp->codec.aom.y_points, &p->frame_hdr->film_grain.data.y_points,
560  sizeof(fgp->codec.aom.y_points));
561  memcpy(&fgp->codec.aom.num_uv_points, &p->frame_hdr->film_grain.data.num_uv_points,
562  sizeof(fgp->codec.aom.num_uv_points));
563  memcpy(&fgp->codec.aom.uv_points, &p->frame_hdr->film_grain.data.uv_points,
564  sizeof(fgp->codec.aom.uv_points));
565  memcpy(&fgp->codec.aom.ar_coeffs_y, &p->frame_hdr->film_grain.data.ar_coeffs_y,
566  sizeof(fgp->codec.aom.ar_coeffs_y));
567  memcpy(&fgp->codec.aom.ar_coeffs_uv[0], &p->frame_hdr->film_grain.data.ar_coeffs_uv[0],
568  sizeof(fgp->codec.aom.ar_coeffs_uv[0]));
569  memcpy(&fgp->codec.aom.ar_coeffs_uv[1], &p->frame_hdr->film_grain.data.ar_coeffs_uv[1],
570  sizeof(fgp->codec.aom.ar_coeffs_uv[1]));
571  memcpy(&fgp->codec.aom.uv_mult, &p->frame_hdr->film_grain.data.uv_mult,
572  sizeof(fgp->codec.aom.uv_mult));
573  memcpy(&fgp->codec.aom.uv_mult_luma, &p->frame_hdr->film_grain.data.uv_luma_mult,
574  sizeof(fgp->codec.aom.uv_mult_luma));
575  memcpy(&fgp->codec.aom.uv_offset, &p->frame_hdr->film_grain.data.uv_offset,
576  sizeof(fgp->codec.aom.uv_offset));
577  }
578 
579  res = 0;
580 fail:
581  dav1d_picture_unref(p);
582  if (res < 0)
584  return res;
585 }
586 
588 {
589  Libdav1dContext *dav1d = c->priv_data;
590 
591  av_buffer_pool_uninit(&dav1d->pool);
592  dav1d_data_unref(&dav1d->data);
593  dav1d_close(&dav1d->c);
594 
595  return 0;
596 }
597 
598 #ifndef DAV1D_MAX_FRAME_THREADS
599 #define DAV1D_MAX_FRAME_THREADS DAV1D_MAX_THREADS
600 #endif
601 #ifndef DAV1D_MAX_TILE_THREADS
602 #define DAV1D_MAX_TILE_THREADS DAV1D_MAX_THREADS
603 #endif
604 #ifndef DAV1D_MAX_FRAME_DELAY
605 #define DAV1D_MAX_FRAME_DELAY DAV1D_MAX_FRAME_THREADS
606 #endif
607 
608 #define OFFSET(x) offsetof(Libdav1dContext, x)
609 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
610 static const AVOption libdav1d_options[] = {
611  { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD | AV_OPT_FLAG_DEPRECATED },
612  { "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD | AV_OPT_FLAG_DEPRECATED },
613  { "max_frame_delay", "Max frame delay", OFFSET(max_frame_delay), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_DELAY, VD },
614  { "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD | AV_OPT_FLAG_DEPRECATED },
615  { "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
616  { "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
617  { NULL }
618 };
619 
620 static const AVClass libdav1d_class = {
621  .class_name = "libdav1d decoder",
622  .item_name = av_default_item_name,
623  .option = libdav1d_options,
624  .version = LIBAVUTIL_VERSION_INT,
625 };
626 
628  .p.name = "libdav1d",
629  CODEC_LONG_NAME("dav1d AV1 decoder by VideoLAN"),
630  .p.type = AVMEDIA_TYPE_VIDEO,
631  .p.id = AV_CODEC_ID_AV1,
632  .priv_data_size = sizeof(Libdav1dContext),
633  .init = libdav1d_init,
634  .close = libdav1d_close,
640  .p.priv_class = &libdav1d_class,
641  .p.wrapper_name = "libdav1d",
642 };
Libdav1dContext::c
Dav1dContext * c
Definition: libdav1d.c:43
av_vlog
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level.
Definition: log.c:426
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
Libdav1dContext::pool_size
int pool_size
Definition: libdav1d.c:45
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:183
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
libdav1d_picture_allocator
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:74
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
OpaqueData
Definition: libdav1d.c:291
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:558
libdav1d_class
static const AVClass libdav1d_class
Definition: libdav1d.c:620
GetByteContext
Definition: bytestream.h:33
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
FF_API_REORDERED_OPAQUE
#define FF_API_REORDERED_OPAQUE
Definition: version.h:114
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
OpaqueData::pkt_orig_opaque
void * pkt_orig_opaque
Definition: libdav1d.c:292
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:63
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:236
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVOption
AVOption.
Definition: opt.h:251
data
const char data[16]
Definition: mxf.c:146
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
FFCodec
Definition: codec_internal.h:127
libdav1d_user_data_free
static void libdav1d_user_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:304
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:533
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
AV_CODEC_FLAG_COPY_OPAQUE
#define AV_CODEC_FLAG_COPY_OPAQUE
Definition: avcodec.h:278
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
fail
#define fail()
Definition: checkasm.h:134
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:99
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
libdav1d_init_params
static void libdav1d_init_params(AVCodecContext *c, const Dav1dSequenceHeader *seq)
Definition: libdav1d.c:129
libdav1d_init
static av_cold int libdav1d_init(AVCodecContext *c)
Definition: libdav1d.c:208
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:313
ff_decode_frame_props_from_pkt
int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx, AVFrame *frame, const AVPacket *pkt)
Set various frame properties from the provided packet.
Definition: decode.c:1294
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:122
Libdav1dContext::max_frame_delay
int max_frame_delay
Definition: libdav1d.c:50
Libdav1dContext::tile_threads
int tile_threads
Definition: libdav1d.c:48
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:49
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
DAV1D_MAX_FRAME_THREADS
#define DAV1D_MAX_FRAME_THREADS
Definition: libdav1d.c:599
avassert.h
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:90
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
s
#define s(width, name)
Definition: cbs_vp9.c:256
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
AV_CODEC_CAP_OTHER_THREADS
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
Definition: codec.h:121
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
Libdav1dContext
Definition: libdav1d.c:41
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVPacket::opaque
void * opaque
for some private data of the user
Definition: packet.h:399
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1854
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:440
if
if(ret)
Definition: filter_design.txt:179
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:106
ff_parse_a53_cc
int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
Parse a data array for ATSC A53 Part 4 Closed Captions and store them in an AVBufferRef.
Definition: atsc_a53.c:68
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:357
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:283
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:682
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:684
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
libdav1d_picture_release
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:122
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:638
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:206
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:62
DAV1D_MAX_FRAME_DELAY
#define DAV1D_MAX_FRAME_DELAY
Definition: libdav1d.c:605
Libdav1dContext::data
Dav1dData data
Definition: libdav1d.c:47
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
AVPacket::size
int size
Definition: packet.h:375
av_image_fill_arrays
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:446
Libdav1dContext::pool
AVBufferPool * pool
Definition: libdav1d.c:44
libdav1d_log_callback
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
Definition: libdav1d.c:67
codec_internal.h
cpu.h
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:63
libdav1d_receive_frame
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
Definition: libdav1d.c:311
OFFSET
#define OFFSET(x)
Definition: libdav1d.c:608
FF_CODEC_CAP_SETS_FRAME_PROPS
#define FF_CODEC_CAP_SETS_FRAME_PROPS
Codec handles output frame properties internally instead of letting the internal logic derive them fr...
Definition: codec_internal.h:78
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
MKBETAG
#define MKBETAG(a, b, c, d)
Definition: macros.h:56
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:62
version
version
Definition: libkvazaar.c:313
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:80
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_libdav1d_decoder
const FFCodec ff_libdav1d_decoder
Definition: libdav1d.c:627
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:74
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:587
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:69
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: codec_internal.h:49
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
Libdav1dContext::apply_grain
int apply_grain
Definition: libdav1d.c:51
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
atsc_a53.h
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1350
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:74
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:312
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:50
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:112
pix_fmt
static enum AVPixelFormat pix_fmt[][3]
Definition: libdav1d.c:56
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:105
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1853
Libdav1dContext::frame_threads
int frame_threads
Definition: libdav1d.c:49
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:117
libdav1d_options
static const AVOption libdav1d_options[]
Definition: libdav1d.c:610
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
libdav1d_flush
static void libdav1d_flush(AVCodecContext *c)
Definition: libdav1d.c:283
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:367
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
FF_CODEC_CAP_AUTO_THREADS
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
Definition: codec_internal.h:73
mastering_display_metadata.h
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
libdav1d_data_free
static void libdav1d_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:298
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
Libdav1dContext::operating_point
int operating_point
Definition: libdav1d.c:52
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
Libdav1dContext::all_layers
int all_layers
Definition: libdav1d.c:53
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:441
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:56
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
AVFilmGrainParams::codec
union AVFilmGrainParams::@319 codec
Additional fields may be added both here and in any structure included.
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
VD
#define VD
Definition: libdav1d.c:609
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:220
DAV1D_MAX_TILE_THREADS
#define DAV1D_MAX_TILE_THREADS
Definition: libdav1d.c:602
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:399
libdav1d_parse_extradata
static av_cold int libdav1d_parse_extradata(AVCodecContext *c)
Definition: libdav1d.c:169
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:93
libdav1d_close
static av_cold int libdav1d_close(AVCodecContext *c)
Definition: libdav1d.c:587
AV_OPT_FLAG_DEPRECATED
#define AV_OPT_FLAG_DEPRECATED
set if option is deprecated, users should refer to AVOption.help text for more information
Definition: opt.h:298
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:86