FFmpeg
librav1e.c
Go to the documentation of this file.
1 /*
2  * librav1e encoder
3  *
4  * Copyright (c) 2019 Derek Buitenhuis
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <rav1e.h>
24 
25 #include "libavutil/buffer.h"
26 #include "libavutil/internal.h"
27 #include "libavutil/avassert.h"
28 #include "libavutil/base64.h"
29 #include "libavutil/common.h"
30 #include "libavutil/mathematics.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "encode.h"
36 #include "internal.h"
37 
38 typedef struct librav1eContext {
39  const AVClass *class;
40 
41  RaContext *ctx;
43  RaFrame *rframe;
44 
45  uint8_t *pass_data;
46  size_t pass_pos;
47  int pass_size;
48 
50  int quantizer;
51  int speed;
52  int tiles;
53  int tile_rows;
54  int tile_cols;
56 
57 typedef struct FrameData {
58  int64_t pts;
59  int64_t duration;
60 #if FF_API_REORDERED_OPAQUE
61  int64_t reordered_opaque;
62 #endif
63 
64  void *frame_opaque;
66 } FrameData;
67 
68 static inline RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
69 {
70  switch (pix_fmt) {
74  return RA_PIXEL_RANGE_FULL;
75  }
76 
77  switch (range) {
78  case AVCOL_RANGE_JPEG:
79  return RA_PIXEL_RANGE_FULL;
80  case AVCOL_RANGE_MPEG:
81  default:
82  return RA_PIXEL_RANGE_LIMITED;
83  }
84 }
85 
86 static inline RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
87 {
88  switch (pix_fmt) {
89  case AV_PIX_FMT_YUV420P:
93  return RA_CHROMA_SAMPLING_CS420;
94  case AV_PIX_FMT_YUV422P:
98  return RA_CHROMA_SAMPLING_CS422;
99  case AV_PIX_FMT_YUV444P:
100  case AV_PIX_FMT_YUVJ444P:
103  return RA_CHROMA_SAMPLING_CS444;
104  default:
105  av_assert0(0);
106  }
107 }
108 
109 static inline RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
110 {
111  switch (chroma_loc) {
112  case AVCHROMA_LOC_LEFT:
113  return RA_CHROMA_SAMPLE_POSITION_VERTICAL;
115  return RA_CHROMA_SAMPLE_POSITION_COLOCATED;
116  default:
117  return RA_CHROMA_SAMPLE_POSITION_UNKNOWN;
118  }
119 }
120 
121 static int get_stats(AVCodecContext *avctx, int eos)
122 {
123  librav1eContext *ctx = avctx->priv_data;
124  RaData* buf = rav1e_twopass_out(ctx->ctx);
125  if (!buf)
126  return 0;
127 
128  if (!eos) {
129  uint8_t *tmp = av_fast_realloc(ctx->pass_data, &ctx->pass_size,
130  ctx->pass_pos + buf->len);
131  if (!tmp) {
132  rav1e_data_unref(buf);
133  return AVERROR(ENOMEM);
134  }
135 
136  ctx->pass_data = tmp;
137  memcpy(ctx->pass_data + ctx->pass_pos, buf->data, buf->len);
138  ctx->pass_pos += buf->len;
139  } else {
140  size_t b64_size = AV_BASE64_SIZE(ctx->pass_pos);
141 
142  memcpy(ctx->pass_data, buf->data, buf->len);
143 
144  avctx->stats_out = av_malloc(b64_size);
145  if (!avctx->stats_out) {
146  rav1e_data_unref(buf);
147  return AVERROR(ENOMEM);
148  }
149 
150  av_base64_encode(avctx->stats_out, b64_size, ctx->pass_data, ctx->pass_pos);
151 
152  av_freep(&ctx->pass_data);
153  }
154 
155  rav1e_data_unref(buf);
156 
157  return 0;
158 }
159 
160 static int set_stats(AVCodecContext *avctx)
161 {
162  librav1eContext *ctx = avctx->priv_data;
163  int ret = 1;
164 
165  while (ret > 0 && ctx->pass_size - ctx->pass_pos > 0) {
166  ret = rav1e_twopass_in(ctx->ctx, ctx->pass_data + ctx->pass_pos, ctx->pass_size);
167  if (ret < 0)
168  return AVERROR_EXTERNAL;
169  ctx->pass_pos += ret;
170  }
171 
172  return 0;
173 }
174 
176 {
177  librav1eContext *ctx = avctx->priv_data;
178 
179  if (ctx->ctx) {
180  rav1e_context_unref(ctx->ctx);
181  ctx->ctx = NULL;
182  }
183  if (ctx->rframe) {
184  rav1e_frame_unref(ctx->rframe);
185  ctx->rframe = NULL;
186  }
187 
188  av_frame_free(&ctx->frame);
189  av_freep(&ctx->pass_data);
190 
191  return 0;
192 }
193 
195 {
196  librav1eContext *ctx = avctx->priv_data;
198  RaConfig *cfg = NULL;
199  int rret;
200  int ret = 0;
201 
202  ctx->frame = av_frame_alloc();
203  if (!ctx->frame)
204  return AVERROR(ENOMEM);
205 
206  cfg = rav1e_config_default();
207  if (!cfg) {
208  av_log(avctx, AV_LOG_ERROR, "Could not allocate rav1e config.\n");
209  return AVERROR_EXTERNAL;
210  }
211 
212  /*
213  * Rav1e currently uses the time base given to it only for ratecontrol... where
214  * the inverse is taken and used as a framerate. So, do what we do in other wrappers
215  * and use the framerate if we can.
216  */
217  if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
218  rav1e_config_set_time_base(cfg, (RaRational) {
219  avctx->framerate.den, avctx->framerate.num
220  });
221  } else {
222  rav1e_config_set_time_base(cfg, (RaRational) {
223  avctx->time_base.num * avctx->ticks_per_frame,
224  avctx->time_base.den
225  });
226  }
227 
228  if ((avctx->flags & AV_CODEC_FLAG_PASS1 || avctx->flags & AV_CODEC_FLAG_PASS2) && !avctx->bit_rate) {
229  av_log(avctx, AV_LOG_ERROR, "A bitrate must be set to use two pass mode.\n");
231  goto end;
232  }
233 
234  if (avctx->flags & AV_CODEC_FLAG_PASS2) {
235  if (!avctx->stats_in) {
236  av_log(avctx, AV_LOG_ERROR, "No stats file provided for second pass.\n");
237  ret = AVERROR(EINVAL);
238  goto end;
239  }
240 
241  ctx->pass_size = (strlen(avctx->stats_in) * 3) / 4;
242  ctx->pass_data = av_malloc(ctx->pass_size);
243  if (!ctx->pass_data) {
244  av_log(avctx, AV_LOG_ERROR, "Could not allocate stats buffer.\n");
245  ret = AVERROR(ENOMEM);
246  goto end;
247  }
248 
249  ctx->pass_size = av_base64_decode(ctx->pass_data, avctx->stats_in, ctx->pass_size);
250  if (ctx->pass_size < 0) {
251  av_log(avctx, AV_LOG_ERROR, "Invalid pass file.\n");
252  ret = AVERROR(EINVAL);
253  goto end;
254  }
255  }
256 
257  {
258  const AVDictionaryEntry *en = NULL;
259  while ((en = av_dict_iterate(ctx->rav1e_opts, en))) {
260  if (rav1e_config_parse(cfg, en->key, en->value) < 0)
261  av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
262  }
263  }
264 
265  rret = rav1e_config_parse_int(cfg, "width", avctx->width);
266  if (rret < 0) {
267  av_log(avctx, AV_LOG_ERROR, "Invalid width passed to rav1e.\n");
269  goto end;
270  }
271 
272  rret = rav1e_config_parse_int(cfg, "height", avctx->height);
273  if (rret < 0) {
274  av_log(avctx, AV_LOG_ERROR, "Invalid height passed to rav1e.\n");
276  goto end;
277  }
278 
279  if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0)
280  rav1e_config_set_sample_aspect_ratio(cfg, (RaRational) {
281  avctx->sample_aspect_ratio.num,
282  avctx->sample_aspect_ratio.den
283  });
284 
285  rret = rav1e_config_parse_int(cfg, "threads", avctx->thread_count);
286  if (rret < 0)
287  av_log(avctx, AV_LOG_WARNING, "Invalid number of threads, defaulting to auto.\n");
288 
289  if (ctx->speed >= 0) {
290  rret = rav1e_config_parse_int(cfg, "speed", ctx->speed);
291  if (rret < 0) {
292  av_log(avctx, AV_LOG_ERROR, "Could not set speed preset.\n");
294  goto end;
295  }
296  }
297 
298  /* rav1e handles precedence between 'tiles' and cols/rows for us. */
299  if (ctx->tiles > 0) {
300  rret = rav1e_config_parse_int(cfg, "tiles", ctx->tiles);
301  if (rret < 0) {
302  av_log(avctx, AV_LOG_ERROR, "Could not set number of tiles to encode with.\n");
304  goto end;
305  }
306  }
307  if (ctx->tile_rows > 0) {
308  rret = rav1e_config_parse_int(cfg, "tile_rows", ctx->tile_rows);
309  if (rret < 0) {
310  av_log(avctx, AV_LOG_ERROR, "Could not set number of tile rows to encode with.\n");
312  goto end;
313  }
314  }
315  if (ctx->tile_cols > 0) {
316  rret = rav1e_config_parse_int(cfg, "tile_cols", ctx->tile_cols);
317  if (rret < 0) {
318  av_log(avctx, AV_LOG_ERROR, "Could not set number of tile cols to encode with.\n");
320  goto end;
321  }
322  }
323 
324  if (avctx->gop_size > 0) {
325  rret = rav1e_config_parse_int(cfg, "key_frame_interval", avctx->gop_size);
326  if (rret < 0) {
327  av_log(avctx, AV_LOG_ERROR, "Could not set max keyint.\n");
329  goto end;
330  }
331  }
332 
333  if (avctx->keyint_min > 0) {
334  rret = rav1e_config_parse_int(cfg, "min_key_frame_interval", avctx->keyint_min);
335  if (rret < 0) {
336  av_log(avctx, AV_LOG_ERROR, "Could not set min keyint.\n");
338  goto end;
339  }
340  }
341 
342  if (avctx->bit_rate && ctx->quantizer < 0) {
343  int max_quantizer = avctx->qmax >= 0 ? avctx->qmax : 255;
344 
345  rret = rav1e_config_parse_int(cfg, "quantizer", max_quantizer);
346  if (rret < 0) {
347  av_log(avctx, AV_LOG_ERROR, "Could not set max quantizer.\n");
349  goto end;
350  }
351 
352  if (avctx->qmin >= 0) {
353  rret = rav1e_config_parse_int(cfg, "min_quantizer", avctx->qmin);
354  if (rret < 0) {
355  av_log(avctx, AV_LOG_ERROR, "Could not set min quantizer.\n");
357  goto end;
358  }
359  }
360 
361  rret = rav1e_config_parse_int(cfg, "bitrate", avctx->bit_rate);
362  if (rret < 0) {
363  av_log(avctx, AV_LOG_ERROR, "Could not set bitrate.\n");
365  goto end;
366  }
367  } else if (ctx->quantizer >= 0) {
368  if (avctx->bit_rate)
369  av_log(avctx, AV_LOG_WARNING, "Both bitrate and quantizer specified. Using quantizer mode.");
370 
371  rret = rav1e_config_parse_int(cfg, "quantizer", ctx->quantizer);
372  if (rret < 0) {
373  av_log(avctx, AV_LOG_ERROR, "Could not set quantizer.\n");
375  goto end;
376  }
377  }
378 
379  rret = rav1e_config_set_pixel_format(cfg, desc->comp[0].depth,
380  pix_fmt_map(avctx->pix_fmt),
382  range_map(avctx->pix_fmt, avctx->color_range));
383  if (rret < 0) {
384  av_log(avctx, AV_LOG_ERROR, "Failed to set pixel format properties.\n");
386  goto end;
387  }
388 
389  /* rav1e's colorspace enums match standard values. */
390  rret = rav1e_config_set_color_description(cfg, (RaMatrixCoefficients) avctx->colorspace,
391  (RaColorPrimaries) avctx->color_primaries,
392  (RaTransferCharacteristics) avctx->color_trc);
393  if (rret < 0) {
394  av_log(avctx, AV_LOG_WARNING, "Failed to set color properties.\n");
395  if (avctx->err_recognition & AV_EF_EXPLODE) {
397  goto end;
398  }
399  }
400 
401  ctx->ctx = rav1e_context_new(cfg);
402  if (!ctx->ctx) {
403  av_log(avctx, AV_LOG_ERROR, "Failed to create rav1e encode context.\n");
405  goto end;
406  }
407 
408  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
409  RaData *seq_hdr = rav1e_container_sequence_header(ctx->ctx);
410 
411  if (seq_hdr)
412  avctx->extradata = av_mallocz(seq_hdr->len + AV_INPUT_BUFFER_PADDING_SIZE);
413  if (!seq_hdr || !avctx->extradata) {
414  rav1e_data_unref(seq_hdr);
415  av_log(avctx, AV_LOG_ERROR, "Failed to get extradata.\n");
416  ret = seq_hdr ? AVERROR(ENOMEM) : AVERROR_EXTERNAL;
417  goto end;
418  }
419 
420  memcpy(avctx->extradata, seq_hdr->data, seq_hdr->len);
421  avctx->extradata_size = seq_hdr->len;
422  rav1e_data_unref(seq_hdr);
423  }
424 
425  ret = 0;
426 
427 end:
428 
429  rav1e_config_unref(cfg);
430 
431  return ret;
432 }
433 
434 static void frame_data_free(void *data)
435 {
436  FrameData *fd = data;
437 
438  if (!fd)
439  return;
440 
442  av_free(data);
443 }
444 
446 {
447  librav1eContext *ctx = avctx->priv_data;
448  RaFrame *rframe = ctx->rframe;
449  RaPacket *rpkt = NULL;
450  FrameData *fd;
451  int ret;
452 
453  if (!rframe) {
454  AVFrame *frame = ctx->frame;
455 
456  ret = ff_encode_get_frame(avctx, frame);
457  if (ret < 0 && ret != AVERROR_EOF)
458  return ret;
459 
460  if (frame->buf[0]) {
462 
463  fd = av_mallocz(sizeof(*fd));
464  if (!fd) {
465  av_log(avctx, AV_LOG_ERROR, "Could not allocate PTS buffer.\n");
466  return AVERROR(ENOMEM);
467  }
468  fd->pts = frame->pts;
469  fd->duration = frame->duration;
470 #if FF_API_REORDERED_OPAQUE
472  fd->reordered_opaque = frame->reordered_opaque;
474 #endif
475 
476  if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
477  fd->frame_opaque = frame->opaque;
478  ret = av_buffer_replace(&fd->frame_opaque_ref, frame->opaque_ref);
479  if (ret < 0) {
480  frame_data_free(fd);
482  return ret;
483  }
484  }
485 
486  rframe = rav1e_frame_new(ctx->ctx);
487  if (!rframe) {
488  av_log(avctx, AV_LOG_ERROR, "Could not allocate new rav1e frame.\n");
490  frame_data_free(fd);
491  return AVERROR(ENOMEM);
492  }
493 
494  for (int i = 0; i < desc->nb_components; i++) {
495  int shift = i ? desc->log2_chroma_h : 0;
496  int bytes = desc->comp[0].depth == 8 ? 1 : 2;
497  rav1e_frame_fill_plane(rframe, i, frame->data[i],
498  (frame->height >> shift) * frame->linesize[i],
499  frame->linesize[i], bytes);
500  }
502  rav1e_frame_set_opaque(rframe, fd, frame_data_free);
503  }
504  }
505 
506  ret = rav1e_send_frame(ctx->ctx, rframe);
507  if (rframe)
508  if (ret == RA_ENCODER_STATUS_ENOUGH_DATA) {
509  ctx->rframe = rframe; /* Queue is full. Store the RaFrame to retry next call */
510  } else {
511  rav1e_frame_unref(rframe); /* No need to unref if flushing. */
512  ctx->rframe = NULL;
513  }
514 
515  switch (ret) {
516  case RA_ENCODER_STATUS_SUCCESS:
517  case RA_ENCODER_STATUS_ENOUGH_DATA:
518  break;
519  case RA_ENCODER_STATUS_FAILURE:
520  av_log(avctx, AV_LOG_ERROR, "Could not send frame: %s\n", rav1e_status_to_str(ret));
521  return AVERROR_EXTERNAL;
522  default:
523  av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_send_frame: %s\n", ret, rav1e_status_to_str(ret));
524  return AVERROR_UNKNOWN;
525  }
526 
527 retry:
528 
529  if (avctx->flags & AV_CODEC_FLAG_PASS1) {
530  int sret = get_stats(avctx, 0);
531  if (sret < 0)
532  return sret;
533  } else if (avctx->flags & AV_CODEC_FLAG_PASS2) {
534  int sret = set_stats(avctx);
535  if (sret < 0)
536  return sret;
537  }
538 
539  ret = rav1e_receive_packet(ctx->ctx, &rpkt);
540  switch (ret) {
541  case RA_ENCODER_STATUS_SUCCESS:
542  break;
543  case RA_ENCODER_STATUS_LIMIT_REACHED:
544  if (avctx->flags & AV_CODEC_FLAG_PASS1) {
545  int sret = get_stats(avctx, 1);
546  if (sret < 0)
547  return sret;
548  }
549  return AVERROR_EOF;
550  case RA_ENCODER_STATUS_ENCODED:
551  goto retry;
552  case RA_ENCODER_STATUS_NEED_MORE_DATA:
553  if (avctx->internal->draining) {
554  av_log(avctx, AV_LOG_ERROR, "Unexpected error when receiving packet after EOF.\n");
555  return AVERROR_EXTERNAL;
556  }
557  return AVERROR(EAGAIN);
558  case RA_ENCODER_STATUS_FAILURE:
559  av_log(avctx, AV_LOG_ERROR, "Could not encode frame: %s\n", rav1e_status_to_str(ret));
560  return AVERROR_EXTERNAL;
561  default:
562  av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_receive_packet: %s\n", ret, rav1e_status_to_str(ret));
563  return AVERROR_UNKNOWN;
564  }
565 
566  ret = ff_get_encode_buffer(avctx, pkt, rpkt->len, 0);
567  if (ret < 0) {
568  av_log(avctx, AV_LOG_ERROR, "Could not allocate packet.\n");
569  rav1e_packet_unref(rpkt);
570  return ret;
571  }
572 
573  memcpy(pkt->data, rpkt->data, rpkt->len);
574 
575  if (rpkt->frame_type == RA_FRAME_TYPE_KEY)
577 
578  fd = rpkt->opaque;
579  pkt->pts = pkt->dts = fd->pts;
580  pkt->duration = fd->duration;
581 #if FF_API_REORDERED_OPAQUE
583  avctx->reordered_opaque = fd->reordered_opaque;
585 #endif
586 
587  if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
588  pkt->opaque = fd->frame_opaque;
590  fd->frame_opaque_ref = NULL;
591  }
592 
593  frame_data_free(fd);
594 
595  if (avctx->flags & AV_CODEC_FLAG_RECON_FRAME) {
596  AVCodecInternal *avci = avctx->internal;
597  AVFrame *frame = avci->recon_frame;
599 
601 
602  frame->format = avctx->pix_fmt;
603  frame->width = avctx->width;
604  frame->height = avctx->height;
605 
606  ret = ff_encode_alloc_frame(avctx, frame);
607  if (ret < 0) {
608  rav1e_packet_unref(rpkt);
609  return ret;
610  }
611 
612  for (int i = 0; i < desc->nb_components; i++) {
613  int shift = i ? desc->log2_chroma_h : 0;
614  rav1e_frame_extract_plane(rpkt->rec, i, frame->data[i],
615  (frame->height >> shift) * frame->linesize[i],
616  frame->linesize[i], desc->comp[i].step);
617  }
618  }
619 
620  rav1e_packet_unref(rpkt);
621 
622  return 0;
623 }
624 
625 #define OFFSET(x) offsetof(librav1eContext, x)
626 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
627 
628 static const AVOption options[] = {
629  { "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, VE },
630  { "speed", "what speed preset to use", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 10, VE },
631  { "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
632  { "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
633  { "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
634  { "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
635  { NULL }
636 };
637 
639  { "b", "0" },
640  { "g", "0" },
641  { "keyint_min", "0" },
642  { "qmax", "-1" },
643  { "qmin", "-1" },
644  { NULL }
645 };
646 
661 };
662 
663 static const AVClass class = {
664  .class_name = "librav1e",
665  .item_name = av_default_item_name,
666  .option = options,
668 };
669 
671  .p.name = "librav1e",
672  CODEC_LONG_NAME("librav1e AV1"),
673  .p.type = AVMEDIA_TYPE_VIDEO,
674  .p.id = AV_CODEC_ID_AV1,
675  .init = librav1e_encode_init,
677  .close = librav1e_encode_close,
678  .priv_data_size = sizeof(librav1eContext),
679  .p.priv_class = &class,
680  .defaults = librav1e_defaults,
681  .p.pix_fmts = librav1e_pix_fmts,
682  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS |
685  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
687  .p.wrapper_name = "librav1e",
688 };
OFFSET
#define OFFSET(x)
Definition: librav1e.c:625
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVCodecContext::keyint_min
int keyint_min
minimum GOP size
Definition: avcodec.h:967
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1002
FrameData
Definition: ffmpeg.c:117
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1382
AV_CODEC_CAP_ENCODER_RECON_FRAME
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Definition: codec.h:171
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
librav1eContext::rframe
RaFrame * rframe
Definition: librav1e.c:43
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:995
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
set_stats
static int set_stats(AVCodecContext *avctx)
Definition: librav1e.c:160
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVOption
AVOption.
Definition: opt.h:251
encode.h
librav1eContext::frame
AVFrame * frame
Definition: librav1e.c:42
data
const char data[16]
Definition: mxf.c:146
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
FF_CODEC_CAP_NOT_INIT_THREADSAFE
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
Definition: codec_internal.h:34
FFCodec
Definition: codec_internal.h:127
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
mathematics.h
AVDictionary
Definition: dict.c:32
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1225
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:429
ff_librav1e_encoder
const FFCodec ff_librav1e_encoder
Definition: librav1e.c:670
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:317
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1750
librav1eContext::tiles
int tiles
Definition: librav1e.c:52
AV_CODEC_FLAG_COPY_OPAQUE
#define AV_CODEC_FLAG_COPY_OPAQUE
Definition: avcodec.h:278
FFCodecDefault
Definition: codec_internal.h:97
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
librav1eContext::ctx
RaContext * ctx
Definition: librav1e.c:41
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:410
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1502
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
AVRational::num
int num
Numerator.
Definition: rational.h:59
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
avassert.h
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:988
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:495
librav1eContext
Definition: librav1e.c:38
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1304
librav1e_encode_init
static av_cold int librav1e_encode_init(AVCodecContext *avctx)
Definition: librav1e.c:194
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
librav1eContext::pass_pos
size_t pass_pos
Definition: librav1e.c:46
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:557
AV_CODEC_CAP_OTHER_THREADS
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
Definition: codec.h:121
librav1eContext::pass_size
int pass_size
Definition: librav1e.c:47
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:156
tile_rows
int tile_rows
Definition: h265_levels.c:217
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
ctx
AVFormatContext * ctx
Definition: movenc.c:48
tile_cols
int tile_cols
Definition: h265_levels.c:218
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVPacket::opaque
void * opaque
for some private data of the user
Definition: packet.h:399
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1009
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:283
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:682
librav1eContext::rav1e_opts
AVDictionary * rav1e_opts
Definition: librav1e.c:49
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:684
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:461
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
FF_CODEC_RECEIVE_PACKET_CB
#define FF_CODEC_RECEIVE_PACKET_CB(func)
Definition: codec_internal.h:321
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:476
AV_OPT_TYPE_DICT
@ AV_OPT_TYPE_DICT
Definition: opt.h:232
FrameData::duration
int64_t duration
Definition: librav1e.c:59
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
FrameData::frame_opaque
void * frame_opaque
Definition: librav1e.c:64
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:460
librav1eContext::tile_rows
int tile_rows
Definition: librav1e.c:53
av_base64_decode
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
Definition: base64.c:81
base64.h
librav1e_receive_packet
static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
Definition: librav1e.c:445
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:548
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:729
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1296
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:620
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:257
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
librav1eContext::pass_data
uint8_t * pass_data
Definition: librav1e.c:45
range_map
static RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
Definition: librav1e.c:68
buffer.h
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:293
FrameData::pts
int64_t pts
Definition: ffmpeg.c:119
pix_fmt_map
static RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
Definition: librav1e.c:86
librav1eContext::tile_cols
int tile_cols
Definition: librav1e.c:54
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:380
AVCodecInternal
Definition: internal.h:52
FrameData::frame_opaque_ref
AVBufferRef * frame_opaque_ref
Definition: librav1e.c:65
AVChromaLocation
AVChromaLocation
Location of chroma samples.
Definition: pixfmt.h:680
AV_BASE64_SIZE
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
Definition: base64.h:66
AV_CODEC_FLAG_RECON_FRAME
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
Definition: avcodec.h:243
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
librav1eContext::speed
int speed
Definition: librav1e.c:51
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
internal.h
VE
#define VE
Definition: librav1e.c:626
common.h
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1016
AVCodecContext::height
int height
Definition: avcodec.h:598
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
librav1eContext::quantizer
int quantizer
Definition: librav1e.c:50
avcodec.h
options
static const AVOption options[]
Definition: librav1e.c:628
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVCodecInternal::recon_frame
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
Definition: internal.h:121
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
frame_data_free
static void frame_data_free(void *data)
Definition: librav1e.c:434
AVCodecContext
main external API structure.
Definition: avcodec.h:426
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:79
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1218
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
librav1e_defaults
static const FFCodecDefault librav1e_defaults[]
Definition: librav1e.c:638
av_base64_encode
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
Definition: base64.c:145
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
chroma_loc_map
static RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
Definition: librav1e.c:109
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecInternal::draining
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:142
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:183
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
FF_CODEC_CAP_AUTO_THREADS
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
Definition: codec_internal.h:73
librav1e_pix_fmts
enum AVPixelFormat librav1e_pix_fmts[]
Definition: librav1e.c:647
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:626
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:795
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
librav1e_encode_close
static av_cold int librav1e_encode_close(AVCodecContext *avctx)
Definition: librav1e.c:175
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:289
get_stats
static int get_stats(AVCodecContext *avctx, int eos)
Definition: librav1e.c:121