FFmpeg
libdav1d.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Ronald S. Bultje <rsbultje gmail com>
3  * Copyright (c) 2018 James Almer <jamrial gmail com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <dav1d/dav1d.h>
23 
24 #include "libavutil/avassert.h"
27 #include "libavutil/imgutils.h"
28 #include "libavutil/opt.h"
29 
30 #include "atsc_a53.h"
31 #include "avcodec.h"
32 #include "bytestream.h"
33 #include "decode.h"
34 #include "internal.h"
35 
36 typedef struct Libdav1dContext {
37  AVClass *class;
38  Dav1dContext *c;
40  int pool_size;
41 
42  Dav1dData data;
49 
50 static const enum AVPixelFormat pix_fmt[][3] = {
51  [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12 },
52  [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12 },
53  [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12 },
54  [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
55 };
56 
57 static const enum AVPixelFormat pix_fmt_rgb[3] = {
59 };
60 
61 static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
62 {
63  AVCodecContext *c = opaque;
64 
65  av_vlog(c, AV_LOG_ERROR, fmt, vl);
66 }
67 
68 static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
69 {
70  Libdav1dContext *dav1d = cookie;
71  enum AVPixelFormat format = pix_fmt[p->p.layout][p->seq_hdr->hbd];
72  int ret, linesize[4], h = FFALIGN(p->p.h, 128), w = FFALIGN(p->p.w, 128);
73  uint8_t *aligned_ptr, *data[4];
74  AVBufferRef *buf;
75 
76  ret = av_image_get_buffer_size(format, w, h, DAV1D_PICTURE_ALIGNMENT);
77  if (ret < 0)
78  return ret;
79 
80  if (ret != dav1d->pool_size) {
81  av_buffer_pool_uninit(&dav1d->pool);
82  // Use twice the amount of required padding bytes for aligned_ptr below.
83  dav1d->pool = av_buffer_pool_init(ret + DAV1D_PICTURE_ALIGNMENT * 2, NULL);
84  if (!dav1d->pool) {
85  dav1d->pool_size = 0;
86  return AVERROR(ENOMEM);
87  }
88  dav1d->pool_size = ret;
89  }
90  buf = av_buffer_pool_get(dav1d->pool);
91  if (!buf)
92  return AVERROR(ENOMEM);
93 
94  // libdav1d requires DAV1D_PICTURE_ALIGNMENT aligned buffers, which av_malloc()
95  // doesn't guarantee for example when AVX is disabled at configure time.
96  // Use the extra DAV1D_PICTURE_ALIGNMENT padding bytes in the buffer to align it
97  // if required.
98  aligned_ptr = (uint8_t *)FFALIGN((uintptr_t)buf->data, DAV1D_PICTURE_ALIGNMENT);
99  ret = av_image_fill_arrays(data, linesize, aligned_ptr, format, w, h,
100  DAV1D_PICTURE_ALIGNMENT);
101  if (ret < 0) {
102  av_buffer_unref(&buf);
103  return ret;
104  }
105 
106  p->data[0] = data[0];
107  p->data[1] = data[1];
108  p->data[2] = data[2];
109  p->stride[0] = linesize[0];
110  p->stride[1] = linesize[1];
111  p->allocator_data = buf;
112 
113  return 0;
114 }
115 
116 static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
117 {
118  AVBufferRef *buf = p->allocator_data;
119 
120  av_buffer_unref(&buf);
121 }
122 
124 {
125  Libdav1dContext *dav1d = c->priv_data;
126  Dav1dSettings s;
127  int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2;
128  int res;
129 
130  av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version());
131 
132  dav1d_default_settings(&s);
133  s.logger.cookie = c;
134  s.logger.callback = libdav1d_log_callback;
135  s.allocator.cookie = dav1d;
136  s.allocator.alloc_picture_callback = libdav1d_picture_allocator;
137  s.allocator.release_picture_callback = libdav1d_picture_release;
138  s.frame_size_limit = c->max_pixels;
139  if (dav1d->apply_grain >= 0)
140  s.apply_grain = dav1d->apply_grain;
141  else if (c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN)
142  s.apply_grain = 0;
143 
144  s.all_layers = dav1d->all_layers;
145  if (dav1d->operating_point >= 0)
146  s.operating_point = dav1d->operating_point;
147 
148  s.n_tile_threads = dav1d->tile_threads
149  ? dav1d->tile_threads
150  : FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
151  s.n_frame_threads = dav1d->frame_threads
152  ? dav1d->frame_threads
153  : FFMIN(ceil(threads / s.n_tile_threads), DAV1D_MAX_FRAME_THREADS);
154  av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n",
155  s.n_frame_threads, s.n_tile_threads);
156 
157  res = dav1d_open(&dav1d->c, &s);
158  if (res < 0)
159  return AVERROR(ENOMEM);
160 
161  return 0;
162 }
163 
165 {
166  Libdav1dContext *dav1d = c->priv_data;
167 
168  dav1d_data_unref(&dav1d->data);
169  dav1d_flush(dav1d->c);
170 }
171 
172 static void libdav1d_data_free(const uint8_t *data, void *opaque) {
173  AVBufferRef *buf = opaque;
174 
175  av_buffer_unref(&buf);
176 }
177 
178 static void libdav1d_user_data_free(const uint8_t *data, void *opaque) {
179  av_assert0(data == opaque);
180  av_free(opaque);
181 }
182 
184 {
185  Libdav1dContext *dav1d = c->priv_data;
186  Dav1dData *data = &dav1d->data;
187  Dav1dPicture pic = { 0 }, *p = &pic;
188  int res;
189 
190  if (!data->sz) {
191  AVPacket pkt = { 0 };
192 
193  res = ff_decode_get_packet(c, &pkt);
194  if (res < 0 && res != AVERROR_EOF)
195  return res;
196 
197  if (pkt.size) {
198  res = dav1d_data_wrap(data, pkt.data, pkt.size, libdav1d_data_free, pkt.buf);
199  if (res < 0) {
201  return res;
202  }
203 
204  data->m.timestamp = pkt.pts;
205  data->m.offset = pkt.pos;
206  data->m.duration = pkt.duration;
207 
208  pkt.buf = NULL;
210 
211  if (c->reordered_opaque != AV_NOPTS_VALUE) {
212  uint8_t *reordered_opaque = av_malloc(sizeof(c->reordered_opaque));
213  if (!reordered_opaque) {
214  dav1d_data_unref(data);
215  return AVERROR(ENOMEM);
216  }
217 
218  memcpy(reordered_opaque, &c->reordered_opaque, sizeof(c->reordered_opaque));
219  res = dav1d_data_wrap_user_data(data, reordered_opaque,
220  libdav1d_user_data_free, reordered_opaque);
221  if (res < 0) {
222  av_free(reordered_opaque);
223  dav1d_data_unref(data);
224  return res;
225  }
226  }
227  }
228  }
229 
230  res = dav1d_send_data(dav1d->c, data);
231  if (res < 0) {
232  if (res == AVERROR(EINVAL))
233  res = AVERROR_INVALIDDATA;
234  if (res != AVERROR(EAGAIN))
235  return res;
236  }
237 
238  res = dav1d_get_picture(dav1d->c, p);
239  if (res < 0) {
240  if (res == AVERROR(EINVAL))
241  res = AVERROR_INVALIDDATA;
242  else if (res == AVERROR(EAGAIN) && c->internal->draining)
243  res = AVERROR_EOF;
244 
245  return res;
246  }
247 
248  av_assert0(p->data[0] && p->allocator_data);
249 
250  // This requires the custom allocator above
251  frame->buf[0] = av_buffer_ref(p->allocator_data);
252  if (!frame->buf[0]) {
253  dav1d_picture_unref(p);
254  return AVERROR(ENOMEM);
255  }
256 
257  frame->data[0] = p->data[0];
258  frame->data[1] = p->data[1];
259  frame->data[2] = p->data[2];
260  frame->linesize[0] = p->stride[0];
261  frame->linesize[1] = p->stride[1];
262  frame->linesize[2] = p->stride[1];
263 
264  c->profile = p->seq_hdr->profile;
265  c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
266  | p->seq_hdr->operating_points[0].minor_level;
267  frame->width = p->p.w;
268  frame->height = p->p.h;
269  if (c->width != p->p.w || c->height != p->p.h) {
270  res = ff_set_dimensions(c, p->p.w, p->p.h);
271  if (res < 0)
272  goto fail;
273  }
274 
275  av_reduce(&frame->sample_aspect_ratio.num,
276  &frame->sample_aspect_ratio.den,
277  frame->height * (int64_t)p->frame_hdr->render_width,
278  frame->width * (int64_t)p->frame_hdr->render_height,
279  INT_MAX);
280  ff_set_sar(c, frame->sample_aspect_ratio);
281 
282  switch (p->seq_hdr->chr) {
283  case DAV1D_CHR_VERTICAL:
284  frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT;
285  break;
286  case DAV1D_CHR_COLOCATED:
287  frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
288  break;
289  }
290  frame->colorspace = c->colorspace = (enum AVColorSpace) p->seq_hdr->mtrx;
291  frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p->seq_hdr->pri;
292  frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc;
293  frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
294 
295  if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
296  p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
297  p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
298  p->seq_hdr->trc == DAV1D_TRC_SRGB)
299  frame->format = c->pix_fmt = pix_fmt_rgb[p->seq_hdr->hbd];
300  else
301  frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
302 
303  if (p->m.user_data.data)
304  memcpy(&frame->reordered_opaque, p->m.user_data.data, sizeof(frame->reordered_opaque));
305  else
306  frame->reordered_opaque = AV_NOPTS_VALUE;
307 
308  if (p->seq_hdr->num_units_in_tick && p->seq_hdr->time_scale) {
309  av_reduce(&c->framerate.den, &c->framerate.num,
310  p->seq_hdr->num_units_in_tick, p->seq_hdr->time_scale, INT_MAX);
311  if (p->seq_hdr->equal_picture_interval)
312  c->ticks_per_frame = p->seq_hdr->num_ticks_per_picture;
313  }
314 
315  // match timestamps and packet size
316  frame->pts = p->m.timestamp;
317  frame->pkt_dts = p->m.timestamp;
318  frame->pkt_pos = p->m.offset;
319  frame->pkt_size = p->m.size;
320  frame->pkt_duration = p->m.duration;
321  frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
322 
323  switch (p->frame_hdr->frame_type) {
324  case DAV1D_FRAME_TYPE_KEY:
325  case DAV1D_FRAME_TYPE_INTRA:
326  frame->pict_type = AV_PICTURE_TYPE_I;
327  break;
328  case DAV1D_FRAME_TYPE_INTER:
329  frame->pict_type = AV_PICTURE_TYPE_P;
330  break;
331  case DAV1D_FRAME_TYPE_SWITCH:
332  frame->pict_type = AV_PICTURE_TYPE_SP;
333  break;
334  default:
335  res = AVERROR_INVALIDDATA;
336  goto fail;
337  }
338 
339  if (p->mastering_display) {
341  if (!mastering) {
342  res = AVERROR(ENOMEM);
343  goto fail;
344  }
345 
346  for (int i = 0; i < 3; i++) {
347  mastering->display_primaries[i][0] = av_make_q(p->mastering_display->primaries[i][0], 1 << 16);
348  mastering->display_primaries[i][1] = av_make_q(p->mastering_display->primaries[i][1], 1 << 16);
349  }
350  mastering->white_point[0] = av_make_q(p->mastering_display->white_point[0], 1 << 16);
351  mastering->white_point[1] = av_make_q(p->mastering_display->white_point[1], 1 << 16);
352 
353  mastering->max_luminance = av_make_q(p->mastering_display->max_luminance, 1 << 8);
354  mastering->min_luminance = av_make_q(p->mastering_display->min_luminance, 1 << 14);
355 
356  mastering->has_primaries = 1;
357  mastering->has_luminance = 1;
358  }
359  if (p->content_light) {
361  if (!light) {
362  res = AVERROR(ENOMEM);
363  goto fail;
364  }
365  light->MaxCLL = p->content_light->max_content_light_level;
366  light->MaxFALL = p->content_light->max_frame_average_light_level;
367  }
368  if (p->itut_t35) {
369  GetByteContext gb;
370  unsigned int user_identifier;
371 
372  bytestream2_init(&gb, p->itut_t35->payload, p->itut_t35->payload_size);
373  bytestream2_skip(&gb, 1); // terminal provider code
374  bytestream2_skip(&gb, 1); // terminal provider oriented code
375  user_identifier = bytestream2_get_be32(&gb);
376  switch (user_identifier) {
377  case MKBETAG('G', 'A', '9', '4'): { // closed captions
378  AVBufferRef *buf = NULL;
379 
380  res = ff_parse_a53_cc(&buf, gb.buffer, bytestream2_get_bytes_left(&gb));
381  if (res < 0)
382  goto fail;
383  if (!res)
384  break;
385 
387  av_buffer_unref(&buf);
388 
389  c->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
390  break;
391  }
392  default: // ignore unsupported identifiers
393  break;
394  }
395  }
396  if (p->frame_hdr->film_grain.present && (!dav1d->apply_grain ||
397  (c->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN))) {
399  if (!fgp) {
400  res = AVERROR(ENOMEM);
401  goto fail;
402  }
403 
405  fgp->seed = p->frame_hdr->film_grain.data.seed;
406  fgp->codec.aom.num_y_points = p->frame_hdr->film_grain.data.num_y_points;
407  fgp->codec.aom.chroma_scaling_from_luma = p->frame_hdr->film_grain.data.chroma_scaling_from_luma;
408  fgp->codec.aom.scaling_shift = p->frame_hdr->film_grain.data.scaling_shift;
409  fgp->codec.aom.ar_coeff_lag = p->frame_hdr->film_grain.data.ar_coeff_lag;
410  fgp->codec.aom.ar_coeff_shift = p->frame_hdr->film_grain.data.ar_coeff_shift;
411  fgp->codec.aom.grain_scale_shift = p->frame_hdr->film_grain.data.grain_scale_shift;
412  fgp->codec.aom.overlap_flag = p->frame_hdr->film_grain.data.overlap_flag;
413  fgp->codec.aom.limit_output_range = p->frame_hdr->film_grain.data.clip_to_restricted_range;
414 
415  memcpy(&fgp->codec.aom.y_points, &p->frame_hdr->film_grain.data.y_points,
416  sizeof(fgp->codec.aom.y_points));
417  memcpy(&fgp->codec.aom.num_uv_points, &p->frame_hdr->film_grain.data.num_uv_points,
418  sizeof(fgp->codec.aom.num_uv_points));
419  memcpy(&fgp->codec.aom.uv_points, &p->frame_hdr->film_grain.data.uv_points,
420  sizeof(fgp->codec.aom.uv_points));
421  memcpy(&fgp->codec.aom.ar_coeffs_y, &p->frame_hdr->film_grain.data.ar_coeffs_y,
422  sizeof(fgp->codec.aom.ar_coeffs_y));
423  memcpy(&fgp->codec.aom.ar_coeffs_uv[0], &p->frame_hdr->film_grain.data.ar_coeffs_uv[0],
424  sizeof(fgp->codec.aom.ar_coeffs_uv[0]));
425  memcpy(&fgp->codec.aom.ar_coeffs_uv[1], &p->frame_hdr->film_grain.data.ar_coeffs_uv[1],
426  sizeof(fgp->codec.aom.ar_coeffs_uv[1]));
427  memcpy(&fgp->codec.aom.uv_mult, &p->frame_hdr->film_grain.data.uv_mult,
428  sizeof(fgp->codec.aom.uv_mult));
429  memcpy(&fgp->codec.aom.uv_mult_luma, &p->frame_hdr->film_grain.data.uv_luma_mult,
430  sizeof(fgp->codec.aom.uv_mult_luma));
431  memcpy(&fgp->codec.aom.uv_offset, &p->frame_hdr->film_grain.data.uv_offset,
432  sizeof(fgp->codec.aom.uv_offset));
433  }
434 
435  res = 0;
436 fail:
437  dav1d_picture_unref(p);
438  if (res < 0)
440  return res;
441 }
442 
444 {
445  Libdav1dContext *dav1d = c->priv_data;
446 
447  av_buffer_pool_uninit(&dav1d->pool);
448  dav1d_data_unref(&dav1d->data);
449  dav1d_close(&dav1d->c);
450 
451  return 0;
452 }
453 
454 #define OFFSET(x) offsetof(Libdav1dContext, x)
455 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
456 static const AVOption libdav1d_options[] = {
457  { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
458  { "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
459  { "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD | AV_OPT_FLAG_DEPRECATED },
460  { "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
461  { "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
462  { NULL }
463 };
464 
465 static const AVClass libdav1d_class = {
466  .class_name = "libdav1d decoder",
467  .item_name = av_default_item_name,
468  .option = libdav1d_options,
469  .version = LIBAVUTIL_VERSION_INT,
470 };
471 
473  .name = "libdav1d",
474  .long_name = NULL_IF_CONFIG_SMALL("dav1d AV1 decoder by VideoLAN"),
475  .type = AVMEDIA_TYPE_VIDEO,
476  .id = AV_CODEC_ID_AV1,
477  .priv_data_size = sizeof(Libdav1dContext),
478  .init = libdav1d_init,
479  .close = libdav1d_close,
485  .priv_class = &libdav1d_class,
486  .wrapper_name = "libdav1d",
487 };
Libdav1dContext::c
Dav1dContext * c
Definition: libdav1d.c:38
av_vlog
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level.
Definition: log.c:424
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:403
Libdav1dContext::pool_size
int pool_size
Definition: libdav1d.c:40
AVCodec
AVCodec.
Definition: codec.h:197
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:266
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:222
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:56
libdav1d_picture_allocator
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:68
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:473
libdav1d_class
static const AVClass libdav1d_class
Definition: libdav1d.c:465
GetByteContext
Definition: bytestream.h:33
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:78
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:92
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:58
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:145
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
w
uint8_t w
Definition: llviddspenc.c:39
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:365
AVOption
AVOption.
Definition: opt.h:248
data
const char data[16]
Definition: mxf.c:142
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:389
libdav1d_user_data_free
static void libdav1d_user_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:178
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:383
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:448
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:137
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
fail
#define fail()
Definition: checkasm.h:134
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:94
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:405
libdav1d_init
static av_cold int libdav1d_init(AVCodecContext *c)
Definition: libdav1d.c:123
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:117
Libdav1dContext::tile_threads
int tile_threads
Definition: libdav1d.c:43
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:44
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:392
avassert.h
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:90
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:373
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
s
#define s(width, name)
Definition: cbs_vp9.c:257
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
AV_CODEC_CAP_OTHER_THREADS
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
Definition: codec.h:122
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:202
decode.h
Libdav1dContext
Definition: libdav1d.c:36
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:370
if
if(ret)
Definition: filter_design.txt:179
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:101
ff_parse_a53_cc
int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
Parse a data array for ATSC A53 Part 4 Closed Captions and store them in an AVBufferRef.
Definition: atsc_a53.c:68
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:348
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:592
NULL
#define NULL
Definition: coverity.c:32
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:278
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:597
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:599
receive_frame
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:555
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:308
libdav1d_picture_release
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
Definition: libdav1d.c:116
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:572
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:390
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:184
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:57
FF_CODEC_CAP_AUTO_THREADS
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
Definition: internal.h:80
Libdav1dContext::data
Dav1dData data
Definition: libdav1d.c:42
AVPacket::size
int size
Definition: packet.h:366
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_image_fill_arrays
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:443
Libdav1dContext::pool
AVBufferPool * pool
Definition: libdav1d.c:39
libdav1d_log_callback
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
Definition: libdav1d.c:61
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:57
libdav1d_receive_frame
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
Definition: libdav1d.c:183
OFFSET
#define OFFSET(x)
Definition: libdav1d.c:454
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:394
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:396
MKBETAG
#define MKBETAG(a, b, c, d)
Definition: common.h:479
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:128
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:463
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:75
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:192
i
int i
Definition: input.c:407
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:358
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:406
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:502
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:436
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:64
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:559
Libdav1dContext::apply_grain
int apply_grain
Definition: libdav1d.c:45
avcodec.h
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
atsc_a53.h
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:393
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:99
AVCodecContext
main external API structure.
Definition: avcodec.h:501
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:69
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:45
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:107
pix_fmt
static enum AVPixelFormat pix_fmt[][3]
Definition: libdav1d.c:50
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:100
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1930
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
Libdav1dContext::frame_threads
int frame_threads
Definition: libdav1d.c:44
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:112
libdav1d_options
static const AVOption libdav1d_options[]
Definition: libdav1d.c:456
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
libdav1d_flush
static void libdav1d_flush(AVCodecContext *c)
Definition: libdav1d.c:164
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:84
mastering_display_metadata.h
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
libdav1d_data_free
static void libdav1d_data_free(const uint8_t *data, void *opaque)
Definition: libdav1d.c:172
AVFilmGrainParams::codec
union AVFilmGrainParams::@294 codec
Additional fields may be added both here and in any structure included.
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVPacket
This structure stores compressed data.
Definition: packet.h:342
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:385
format
fg outputs[0] format
Definition: ffmpeg_filter.c:175
Libdav1dContext::operating_point
int operating_point
Definition: libdav1d.c:46
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Libdav1dContext::all_layers
int all_layers
Definition: libdav1d.c:47
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:371
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:51
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
VD
#define VD
Definition: libdav1d.c:455
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:132
ff_libdav1d_decoder
const AVCodec ff_libdav1d_decoder
Definition: libdav1d.c:472
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:394
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:88
libdav1d_close
static av_cold int libdav1d_close(AVCodecContext *c)
Definition: libdav1d.c:443
AV_OPT_FLAG_DEPRECATED
#define AV_OPT_FLAG_DEPRECATED
set if option is deprecated, users should refer to AVOption.help text for more information
Definition: opt.h:295
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:81