FFmpeg
av1dec.c
Go to the documentation of this file.
1 /*
2  * AV1 video decoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/pixdesc.h"
22 #include "avcodec.h"
23 #include "av1dec.h"
24 #include "bytestream.h"
25 #include "hwconfig.h"
26 #include "internal.h"
27 #include "profiles.h"
28 
30 {
32 
41 
42  f->loop_filter_mode_deltas[0] = 0;
43  f->loop_filter_mode_deltas[1] = 0;
44 }
45 
47 {
48  uint8_t primary_frame, prev_frame;
49 
50  primary_frame = s->raw_frame_header->primary_ref_frame;
51  prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame];
53  s->ref[prev_frame].loop_filter_ref_deltas,
54  AV1_NUM_REF_FRAMES * sizeof(int8_t));
56  s->ref[prev_frame].loop_filter_mode_deltas,
57  2 * sizeof(int8_t));
58 
60  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
64  }
65 
66  for (int i = 0; i < 2; i++) {
70  }
71  }
72 
75 }
76 
77 static uint32_t inverse_recenter(int r, uint32_t v)
78 {
79  if (v > 2 * r)
80  return v;
81  else if (v & 1)
82  return r - ((v + 1) >> 1);
83  else
84  return r + (v >> 1);
85 }
86 
87 static uint32_t decode_unsigned_subexp_with_ref(uint32_t sub_exp,
88  int mx, int r)
89 {
90  if ((r << 1) <= mx) {
91  return inverse_recenter(r, sub_exp);
92  } else {
93  return mx - 1 - inverse_recenter(mx - 1 - r, sub_exp);
94  }
95 }
96 
97 static int32_t decode_signed_subexp_with_ref(uint32_t sub_exp, int low,
98  int high, int r)
99 {
100  int32_t x = decode_unsigned_subexp_with_ref(sub_exp, high - low, r - low);
101  return x + low;
102 }
103 
104 static void read_global_param(AV1DecContext *s, int type, int ref, int idx)
105 {
106  uint8_t primary_frame, prev_frame;
107  uint32_t abs_bits, prec_bits, round, prec_diff, sub, mx;
108  int32_t r;
109 
110  primary_frame = s->raw_frame_header->primary_ref_frame;
111  prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame];
112  abs_bits = AV1_GM_ABS_ALPHA_BITS;
113  prec_bits = AV1_GM_ALPHA_PREC_BITS;
114 
115  if (idx < 2) {
116  if (type == AV1_WARP_MODEL_TRANSLATION) {
117  abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS -
119  prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS -
121  } else {
122  abs_bits = AV1_GM_ABS_TRANS_BITS;
123  prec_bits = AV1_GM_TRANS_PREC_BITS;
124  }
125  }
126  round = (idx % 3) == 2 ? (1 << AV1_WARPEDMODEL_PREC_BITS) : 0;
127  prec_diff = AV1_WARPEDMODEL_PREC_BITS - prec_bits;
128  sub = (idx % 3) == 2 ? (1 << prec_bits) : 0;
129  mx = 1 << abs_bits;
130  r = (s->ref[prev_frame].gm_params[ref][idx] >> prec_diff) - sub;
131 
132  s->cur_frame.gm_params[ref][idx] =
134  -mx, mx + 1, r) << prec_diff) + round;
135 }
136 
137 /**
138 * update gm type/params, since cbs already implemented part of this funcation,
139 * so we don't need to full implement spec.
140 */
142 {
144  int type, ref;
145 
146  for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
148  for (int i = 0; i < 6; i++)
149  s->cur_frame.gm_params[ref][i] = (i % 3 == 2) ?
150  1 << AV1_WARPEDMODEL_PREC_BITS : 0;
151  }
152  if (header->frame_type == AV1_FRAME_KEY ||
153  header->frame_type == AV1_FRAME_INTRA_ONLY)
154  return;
155 
156  for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
157  if (header->is_global[ref]) {
158  if (header->is_rot_zoom[ref]) {
159  type = AV1_WARP_MODEL_ROTZOOM;
160  } else {
163  }
164  } else {
166  }
167  s->cur_frame.gm_type[ref] = type;
168 
169  if (type >= AV1_WARP_MODEL_ROTZOOM) {
170  read_global_param(s, type, ref, 2);
171  read_global_param(s, type, ref, 3);
172  if (type == AV1_WARP_MODEL_AFFINE) {
173  read_global_param(s, type, ref, 4);
174  read_global_param(s, type, ref, 5);
175  } else {
176  s->cur_frame.gm_params[ref][4] = -s->cur_frame.gm_params[ref][3];
177  s->cur_frame.gm_params[ref][5] = s->cur_frame.gm_params[ref][2];
178  }
179  }
180  if (type >= AV1_WARP_MODEL_TRANSLATION) {
181  read_global_param(s, type, ref, 0);
182  read_global_param(s, type, ref, 1);
183  }
184  }
185 }
186 
188 
189 {
190  int cur_tile_num =
192  if (s->tile_num < cur_tile_num) {
193  int ret = av_reallocp_array(&s->tile_group_info, cur_tile_num,
194  sizeof(TileGroupInfo));
195  if (ret < 0) {
196  s->tile_num = 0;
197  return ret;
198  }
199  }
200  s->tile_num = cur_tile_num;
201 
202  return 0;
203 }
204 
205 static int get_tiles_info(AVCodecContext *avctx, const AV1RawTileGroup *tile_group)
206 {
207  AV1DecContext *s = avctx->priv_data;
208  GetByteContext gb;
209  uint16_t tile_num, tile_row, tile_col;
210  uint32_t size = 0, size_bytes = 0;
211 
212  bytestream2_init(&gb, tile_group->tile_data.data,
213  tile_group->tile_data.data_size);
214  s->tg_start = tile_group->tg_start;
215  s->tg_end = tile_group->tg_end;
216 
217  for (tile_num = tile_group->tg_start; tile_num <= tile_group->tg_end; tile_num++) {
218  tile_row = tile_num / s->raw_frame_header->tile_cols;
219  tile_col = tile_num % s->raw_frame_header->tile_cols;
220 
221  if (tile_num == tile_group->tg_end) {
223  s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
224  s->tile_group_info[tile_num].tile_row = tile_row;
225  s->tile_group_info[tile_num].tile_column = tile_col;
226  return 0;
227  }
228  size_bytes = s->raw_frame_header->tile_size_bytes_minus1 + 1;
229  if (bytestream2_get_bytes_left(&gb) < size_bytes)
230  return AVERROR_INVALIDDATA;
231  size = 0;
232  for (int i = 0; i < size_bytes; i++)
233  size |= bytestream2_get_byteu(&gb) << 8 * i;
234  if (bytestream2_get_bytes_left(&gb) <= size)
235  return AVERROR_INVALIDDATA;
236  size++;
237 
238  s->tile_group_info[tile_num].tile_size = size;
239  s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
240  s->tile_group_info[tile_num].tile_row = tile_row;
241  s->tile_group_info[tile_num].tile_column = tile_col;
242 
243  bytestream2_skipu(&gb, size);
244  }
245 
246  return 0;
247 
248 }
249 
251 {
252  AV1DecContext *s = avctx->priv_data;
253  const AV1RawSequenceHeader *seq = s->raw_seq;
255  int ret;
257 #define HWACCEL_MAX (0)
258  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
259 
260  if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
261  bit_depth = seq->color_config.twelve_bit ? 12 : 10;
262  else if (seq->seq_profile <= 2)
263  bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
264  else {
265  av_log(avctx, AV_LOG_ERROR,
266  "Unknown AV1 profile %d.\n", seq->seq_profile);
267  return -1;
268  }
269 
270  if (!seq->color_config.mono_chrome) {
271  // 4:4:4 x:0 y:0, 4:2:2 x:1 y:0, 4:2:0 x:1 y:1
272  if (seq->color_config.subsampling_x == 0 &&
273  seq->color_config.subsampling_y == 0) {
274  if (bit_depth == 8)
275  pix_fmt = AV_PIX_FMT_YUV444P;
276  else if (bit_depth == 10)
277  pix_fmt = AV_PIX_FMT_YUV444P10;
278  else if (bit_depth == 12)
279  pix_fmt = AV_PIX_FMT_YUV444P12;
280  else
281  av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
282  } else if (seq->color_config.subsampling_x == 1 &&
283  seq->color_config.subsampling_y == 0) {
284  if (bit_depth == 8)
285  pix_fmt = AV_PIX_FMT_YUV422P;
286  else if (bit_depth == 10)
287  pix_fmt = AV_PIX_FMT_YUV422P10;
288  else if (bit_depth == 12)
289  pix_fmt = AV_PIX_FMT_YUV422P12;
290  else
291  av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
292  } else if (seq->color_config.subsampling_x == 1 &&
293  seq->color_config.subsampling_y == 1) {
294  if (bit_depth == 8)
295  pix_fmt = AV_PIX_FMT_YUV420P;
296  else if (bit_depth == 10)
297  pix_fmt = AV_PIX_FMT_YUV420P10;
298  else if (bit_depth == 12)
299  pix_fmt = AV_PIX_FMT_YUV420P12;
300  else
301  av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
302  }
303  } else {
304  if (seq->color_config.subsampling_x == 1 &&
305  seq->color_config.subsampling_y == 1)
306  pix_fmt = AV_PIX_FMT_YUV440P;
307  else
308  av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
309  }
310 
311  av_log(avctx, AV_LOG_DEBUG, "AV1 decode get format: %s.\n",
312  av_get_pix_fmt_name(pix_fmt));
313 
314  if (pix_fmt == AV_PIX_FMT_NONE)
315  return -1;
316  s->pix_fmt = pix_fmt;
317 
318  *fmtp++ = s->pix_fmt;
319  *fmtp = AV_PIX_FMT_NONE;
320 
321  ret = ff_thread_get_format(avctx, pix_fmts);
322  if (ret < 0)
323  return ret;
324 
325  /**
326  * check if the HW accel is inited correctly. If not, return un-implemented.
327  * Since now the av1 decoder doesn't support native decode, if it will be
328  * implemented in the future, need remove this check.
329  */
330  if (!avctx->hwaccel) {
331  av_log(avctx, AV_LOG_ERROR, "Your platform doesn't suppport"
332  " hardware accelerated AV1 decoding.\n");
333  return AVERROR(ENOSYS);
334  }
335 
336  avctx->pix_fmt = ret;
337 
338  return 0;
339 }
340 
342 {
343  ff_thread_release_buffer(avctx, &f->tf);
346  f->spatial_id = f->temporal_id = 0;
347 }
348 
349 static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *src)
350 {
351  int ret;
352 
353  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
354  if (ret < 0)
355  return ret;
356 
357  if (src->hwaccel_picture_private) {
359  if (!dst->hwaccel_priv_buf)
360  goto fail;
362  }
363 
364  dst->spatial_id = src->spatial_id;
365  dst->temporal_id = src->temporal_id;
367  memcpy(dst->loop_filter_ref_deltas,
369  AV1_NUM_REF_FRAMES * sizeof(int8_t));
370  memcpy(dst->loop_filter_mode_deltas,
372  2 * sizeof(int8_t));
373  memcpy(dst->gm_type,
374  src->gm_type,
375  AV1_NUM_REF_FRAMES * sizeof(uint8_t));
376  memcpy(dst->gm_params,
377  src->gm_params,
378  AV1_NUM_REF_FRAMES * 6 * sizeof(int32_t));
379 
380  return 0;
381 
382 fail:
383  av1_frame_unref(avctx, dst);
384  return AVERROR(ENOMEM);
385 }
386 
388 {
389  AV1DecContext *s = avctx->priv_data;
390 
391  for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
392  av1_frame_unref(avctx, &s->ref[i]);
393  av_frame_free(&s->ref[i].tf.f);
394  }
395  av1_frame_unref(avctx, &s->cur_frame);
397 
401 
403  ff_cbs_close(&s->cbc);
404 
405  return 0;
406 }
407 
409  const AV1RawSequenceHeader *seq)
410 {
411  int width = seq->max_frame_width_minus_1 + 1;
412  int height = seq->max_frame_height_minus_1 + 1;
413 
414  avctx->profile = seq->seq_profile;
415  avctx->level = seq->seq_level_idx[0];
416 
417  avctx->color_range =
422 
423  switch (seq->color_config.chroma_sample_position) {
424  case AV1_CSP_VERTICAL:
426  break;
427  case AV1_CSP_COLOCATED:
429  break;
430  }
431 
432  if (avctx->width != width || avctx->height != height) {
433  int ret = ff_set_dimensions(avctx, width, height);
434  if (ret < 0)
435  return ret;
436  }
437  avctx->sample_aspect_ratio = (AVRational) { 1, 1 };
438 
440  seq->timing_info.time_scale) {
441  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
443  seq->timing_info.time_scale,
444  INT_MAX);
447  }
448 
449  return 0;
450 }
451 
453  const AV1RawFrameHeader *header)
454 {
455  AVRational aspect_ratio;
456  int width = header->frame_width_minus_1 + 1;
457  int height = header->frame_height_minus_1 + 1;
458  int r_width = header->render_width_minus_1 + 1;
459  int r_height = header->render_height_minus_1 + 1;
460  int ret;
461 
462  if (avctx->width != width || avctx->height != height) {
463  ret = ff_set_dimensions(avctx, width, height);
464  if (ret < 0)
465  return ret;
466  }
467 
468  av_reduce(&aspect_ratio.num, &aspect_ratio.den,
469  (int64_t)height * r_width,
470  (int64_t)width * r_height,
471  INT_MAX);
472 
473  if (av_cmp_q(avctx->sample_aspect_ratio, aspect_ratio)) {
474  ret = ff_set_sar(avctx, aspect_ratio);
475  if (ret < 0)
476  return ret;
477  }
478 
479  return 0;
480 }
481 
483 {
484  AV1DecContext *s = avctx->priv_data;
486  int ret;
487 
488  s->avctx = avctx;
490 
491  for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
492  s->ref[i].tf.f = av_frame_alloc();
493  if (!s->ref[i].tf.f) {
494  av_log(avctx, AV_LOG_ERROR,
495  "Failed to allocate reference frame buffer %d.\n", i);
496  return AVERROR(ENOMEM);
497  }
498  }
499 
500  s->cur_frame.tf.f = av_frame_alloc();
501  if (!s->cur_frame.tf.f) {
502  av_log(avctx, AV_LOG_ERROR,
503  "Failed to allocate current frame buffer.\n");
504  return AVERROR(ENOMEM);
505  }
506 
507  ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, avctx);
508  if (ret < 0)
509  return ret;
510 
511  if (avctx->extradata && avctx->extradata_size) {
512  ret = ff_cbs_read(s->cbc, &s->current_obu, avctx->extradata,
513  avctx->extradata_size);
514  if (ret < 0) {
515  av_log(avctx, AV_LOG_WARNING, "Failed to read extradata.\n");
516  return ret;
517  }
518 
520  if (!seq) {
521  av_log(avctx, AV_LOG_WARNING, "No sequence header available.\n");
522  goto end;
523  }
524 
525  ret = set_context_with_sequence(avctx, seq);
526  if (ret < 0) {
527  av_log(avctx, AV_LOG_WARNING, "Failed to set decoder context.\n");
528  goto end;
529  }
530 
531  end:
533  }
534 
535  return ret;
536 }
537 
539 {
540  AV1DecContext *s = avctx->priv_data;
542  AVFrame *frame;
543  int ret;
544 
545  ret = update_context_with_frame_header(avctx, header);
546  if (ret < 0) {
547  av_log(avctx, AV_LOG_ERROR, "Failed to update context with frame header\n");
548  return ret;
549  }
550 
551  if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
552  return ret;
553 
554  frame = f->tf.f;
555  frame->key_frame = header->frame_type == AV1_FRAME_KEY;
556 
557  switch (header->frame_type) {
558  case AV1_FRAME_KEY:
560  frame->pict_type = AV_PICTURE_TYPE_I;
561  break;
562  case AV1_FRAME_INTER:
563  frame->pict_type = AV_PICTURE_TYPE_P;
564  break;
565  case AV1_FRAME_SWITCH:
566  frame->pict_type = AV_PICTURE_TYPE_SP;
567  break;
568  }
569 
570  if (avctx->hwaccel) {
571  const AVHWAccel *hwaccel = avctx->hwaccel;
572  if (hwaccel->frame_priv_data_size) {
573  f->hwaccel_priv_buf =
575  if (!f->hwaccel_priv_buf)
576  goto fail;
578  }
579  }
580  return 0;
581 
582 fail:
583  av1_frame_unref(avctx, f);
584  return AVERROR(ENOMEM);
585 }
586 
588  const AVPacket *pkt, int *got_frame)
589 {
590  AV1DecContext *s = avctx->priv_data;
591  const AVFrame *srcframe = s->cur_frame.tf.f;
592  int ret;
593 
594  ret = av_frame_ref(frame, srcframe);
595  if (ret < 0)
596  return ret;
597 
598  frame->pts = pkt->pts;
599  frame->pkt_dts = pkt->dts;
600  frame->pkt_size = pkt->size;
601 
602  *got_frame = 1;
603 
604  return 0;
605 }
606 
608 {
609  AV1DecContext *s = avctx->priv_data;
611  int ret;
612 
613  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
614  if (header->refresh_frame_flags & (1 << i)) {
615  if (s->ref[i].tf.f->buf[0])
616  av1_frame_unref(avctx, &s->ref[i]);
617  if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) {
618  av_log(avctx, AV_LOG_ERROR,
619  "Failed to update frame %d in reference list\n", i);
620  return ret;
621  }
622  }
623  }
624  return 0;
625 }
626 
628 {
629  AV1DecContext *s = avctx->priv_data;
630  int ret;
631 
632  if (s->cur_frame.tf.f->buf[0])
633  av1_frame_unref(avctx, &s->cur_frame);
634 
635  ret = av1_frame_alloc(avctx, &s->cur_frame);
636  if (ret < 0) {
637  av_log(avctx, AV_LOG_ERROR,
638  "Failed to allocate space for current frame.\n");
639  return ret;
640  }
641 
642  ret = init_tile_data(s);
643  if (ret < 0) {
644  av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n");
645  return ret;
646  }
647 
650  else
652 
654 
655  return ret;
656 }
657 
658 static int av1_decode_frame(AVCodecContext *avctx, void *frame,
659  int *got_frame, AVPacket *pkt)
660 {
661  AV1DecContext *s = avctx->priv_data;
662  AV1RawTileGroup *raw_tile_group = NULL;
663  int ret;
664 
665  ret = ff_cbs_read_packet(s->cbc, &s->current_obu, pkt);
666  if (ret < 0) {
667  av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
668  goto end;
669  }
670  av_log(avctx, AV_LOG_DEBUG, "Total obu for this frame:%d.\n",
671  s->current_obu.nb_units);
672 
673  for (int i = 0; i < s->current_obu.nb_units; i++) {
674  CodedBitstreamUnit *unit = &s->current_obu.units[i];
675  AV1RawOBU *obu = unit->content;
676  const AV1RawOBUHeader *header;
677 
678  if (!obu)
679  continue;
680 
681  header = &obu->header;
682  av_log(avctx, AV_LOG_DEBUG, "Obu idx:%d, obu type:%d.\n", i, unit->type);
683 
684  switch (unit->type) {
687  s->seq_ref = av_buffer_ref(unit->content_ref);
688  if (!s->seq_ref) {
689  ret = AVERROR(ENOMEM);
690  goto end;
691  }
692 
693  s->raw_seq = &obu->obu.sequence_header;
694 
695  ret = set_context_with_sequence(avctx, s->raw_seq);
696  if (ret < 0) {
697  av_log(avctx, AV_LOG_ERROR, "Failed to set context.\n");
698  s->raw_seq = NULL;
699  goto end;
700  }
701 
702  if (s->pix_fmt == AV_PIX_FMT_NONE) {
703  ret = get_pixel_format(avctx);
704  if (ret < 0) {
705  av_log(avctx, AV_LOG_ERROR,
706  "Failed to get pixel format.\n");
707  s->raw_seq = NULL;
708  goto end;
709  }
710  }
711 
712  if (avctx->hwaccel && avctx->hwaccel->decode_params) {
713  ret = avctx->hwaccel->decode_params(avctx, unit->type, unit->data,
714  unit->data_size);
715  if (ret < 0) {
716  av_log(avctx, AV_LOG_ERROR, "HW accel decode params fail.\n");
717  s->raw_seq = NULL;
718  goto end;
719  }
720  }
721  break;
723  if (s->raw_frame_header)
724  break;
725  // fall-through
726  case AV1_OBU_FRAME:
728  if (!s->raw_seq) {
729  av_log(avctx, AV_LOG_ERROR, "Missing Sequence Header.\n");
730  ret = AVERROR_INVALIDDATA;
731  goto end;
732  }
733 
736  if (!s->header_ref) {
737  ret = AVERROR(ENOMEM);
738  goto end;
739  }
740 
741  if (unit->type == AV1_OBU_FRAME)
742  s->raw_frame_header = &obu->obu.frame.header;
743  else
744  s->raw_frame_header = &obu->obu.frame_header;
745 
747  if (s->cur_frame.tf.f->buf[0])
748  av1_frame_unref(avctx, &s->cur_frame);
749 
750  ret = av1_frame_ref(avctx, &s->cur_frame,
752  if (ret < 0) {
753  av_log(avctx, AV_LOG_ERROR, "Failed to get reference frame.\n");
754  goto end;
755  }
756 
757  ret = update_reference_list(avctx);
758  if (ret < 0) {
759  av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
760  goto end;
761  }
762 
763  ret = set_output_frame(avctx, frame, pkt, got_frame);
764  if (ret < 0)
765  av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
766 
767  s->raw_frame_header = NULL;
768 
769  goto end;
770  }
771 
772  ret = get_current_frame(avctx);
773  if (ret < 0) {
774  av_log(avctx, AV_LOG_ERROR, "Get current frame error\n");
775  goto end;
776  }
777 
778  s->cur_frame.spatial_id = header->spatial_id;
779  s->cur_frame.temporal_id = header->temporal_id;
780 
781  if (avctx->hwaccel) {
782  ret = avctx->hwaccel->start_frame(avctx, unit->data,
783  unit->data_size);
784  if (ret < 0) {
785  av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n");
786  goto end;
787  }
788  }
789  if (unit->type != AV1_OBU_FRAME)
790  break;
791  // fall-through
792  case AV1_OBU_TILE_GROUP:
793  if (!s->raw_frame_header) {
794  av_log(avctx, AV_LOG_ERROR, "Missing Frame Header.\n");
795  ret = AVERROR_INVALIDDATA;
796  goto end;
797  }
798 
799  if (unit->type == AV1_OBU_FRAME)
800  raw_tile_group = &obu->obu.frame.tile_group;
801  else
802  raw_tile_group = &obu->obu.tile_group;
803 
804  ret = get_tiles_info(avctx, raw_tile_group);
805  if (ret < 0)
806  goto end;
807 
808  if (avctx->hwaccel) {
809  ret = avctx->hwaccel->decode_slice(avctx,
810  raw_tile_group->tile_data.data,
811  raw_tile_group->tile_data.data_size);
812  if (ret < 0) {
813  av_log(avctx, AV_LOG_ERROR,
814  "HW accel decode slice fail.\n");
815  goto end;
816  }
817  }
818  break;
819  case AV1_OBU_TILE_LIST:
821  case AV1_OBU_PADDING:
822  case AV1_OBU_METADATA:
823  break;
824  default:
825  av_log(avctx, AV_LOG_DEBUG,
826  "Unknown obu type: %d (%"SIZE_SPECIFIER" bits).\n",
827  unit->type, unit->data_size);
828  }
829 
830  if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
831  if (avctx->hwaccel) {
832  ret = avctx->hwaccel->end_frame(avctx);
833  if (ret < 0) {
834  av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
835  goto end;
836  }
837  }
838 
839  ret = update_reference_list(avctx);
840  if (ret < 0) {
841  av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
842  goto end;
843  }
844 
845  if (s->raw_frame_header->show_frame) {
846  ret = set_output_frame(avctx, frame, pkt, got_frame);
847  if (ret < 0) {
848  av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
849  goto end;
850  }
851  }
852  raw_tile_group = NULL;
853  s->raw_frame_header = NULL;
854  }
855  }
856 
857 end:
859  if (ret < 0)
860  s->raw_frame_header = NULL;
861  return ret;
862 }
863 
864 static void av1_decode_flush(AVCodecContext *avctx)
865 {
866  AV1DecContext *s = avctx->priv_data;
867 
868  for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++)
869  av1_frame_unref(avctx, &s->ref[i]);
870 
871  av1_frame_unref(avctx, &s->cur_frame);
872  s->raw_frame_header = NULL;
873  s->raw_seq = NULL;
874 
875  ff_cbs_flush(s->cbc);
876 }
877 
879  .name = "av1",
880  .long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
881  .type = AVMEDIA_TYPE_VIDEO,
882  .id = AV_CODEC_ID_AV1,
883  .priv_data_size = sizeof(AV1DecContext),
885  .close = av1_decode_free,
888  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
893  .hw_configs = (const AVCodecHWConfigInternal * []) {
894  NULL
895  },
896 };
#define HWACCEL_MAX
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
static uint32_t decode_unsigned_subexp_with_ref(uint32_t sub_exp, int mx, int r)
Definition: av1dec.c:87
#define NULL
Definition: coverity.c:32
AVRational framerate
Definition: avcodec.h:2069
int nb_units
Number of units in this fragment.
Definition: cbs.h:149
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
uint16_t render_width_minus_1
Definition: cbs_av1.h:161
int8_t ref_frame_idx[AV1_REFS_PER_FRAME]
Definition: cbs_av1.h:172
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:607
static int update_context_with_frame_header(AVCodecContext *avctx, const AV1RawFrameHeader *header)
Definition: av1dec.c:452
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AV1RawFrameHeader * raw_frame_header
Definition: av1dec.h:67
static void flush(AVCodecContext *avctx)
static void setup_past_independence(AV1Frame *f)
Definition: av1dec.c:29
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVFrame * f
Definition: thread.h:35
uint32_t time_scale
Definition: cbs_av1.h:60
uint8_t refresh_frame_flags
Definition: cbs_av1.h:166
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:499
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
uint8_t mono_chrome
Definition: cbs_av1.h:44
int ff_cbs_init(CodedBitstreamContext **ctx_ptr, enum AVCodecID codec_id, void *log_ctx)
Create and initialise a new context for the given codec.
Definition: cbs.c:74
CodedBitstreamUnitType type
Codec-specific type of this unit.
Definition: cbs.h:70
AVBufferRef * header_ref
Definition: av1dec.h:66
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
static void av1_decode_flush(AVCodecContext *avctx)
Definition: av1dec.c:864
int num
Numerator.
Definition: rational.h:59
uint8_t loop_filter_delta_enabled
Definition: av1dec.h:42
int size
Definition: packet.h:364
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
uint8_t primary_ref_frame
Definition: cbs_av1.h:155
uint8_t allow_high_precision_mv
Definition: cbs_av1.h:175
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
uint16_t tg_end
Definition: av1dec.h:71
GLint GLenum type
Definition: opengl_enc.c:104
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
uint32_t num_units_in_display_tick
Definition: cbs_av1.h:59
uint32_t gm_params[AV1_TOTAL_REFS_PER_FRAME][6]
Definition: cbs_av1.h:252
static AVPacket pkt
uint8_t color_range
Definition: cbs_av1.h:51
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
int profile
profile
Definition: avcodec.h:1859
uint16_t frame_width_minus_1
Definition: cbs_av1.h:156
AVCodec.
Definition: codec.h:190
uint8_t update_mode_delta[2]
Definition: cbs_av1.h:227
uint8_t seq_profile
Definition: cbs_av1.h:74
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AV1Frame cur_frame
Definition: av1dec.h:74
TileGroupInfo * tile_group_info
Definition: av1dec.h:68
uint16_t tile_rows
Definition: cbs_av1.h:194
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
static void read_global_param(AV1DecContext *s, int type, int ref, int idx)
Definition: av1dec.c:104
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int(* decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size)
Callback for parameter data (SPS/PPS/VPS etc).
Definition: avcodec.h:2486
uint8_t * data
Definition: cbs_av1.h:286
uint16_t max_frame_width_minus_1
Definition: cbs_av1.h:98
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:456
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
int ff_cbs_read_packet(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const AVPacket *pkt)
Read the data bitstream from a packet into a fragment, then split into units and decompose.
Definition: cbs.c:244
#define height
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:121
uint8_t high_bitdepth
Definition: cbs_av1.h:42
Coded bitstream unit structure.
Definition: cbs.h:66
ptrdiff_t size
Definition: opengl_enc.c:100
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
void * content
Pointer to the decomposed form of this unit.
Definition: cbs.h:103
static const uint8_t header[24]
Definition: sdr2.c:67
CodedBitstreamUnit * units
Pointer to an array of units of length nb_units_allocated.
Definition: cbs.h:164
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
uint8_t * data
Pointer to the directly-parsable bitstream form of this unit.
Definition: cbs.h:77
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
#define av_log(a,...)
void ff_cbs_fragment_reset(CodedBitstreamFragment *frag)
Free the units contained in a fragment as well as the fragment&#39;s own data buffer, but not the units a...
Definition: cbs.c:147
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
#define src
Definition: vp8dsp.c:254
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int temporal_id
Definition: av1dec.h:39
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
AV1RawTileData tile_data
Definition: cbs_av1.h:296
uint8_t temporal_id
Definition: cbs_av1.h:36
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
uint16_t frame_height_minus_1
Definition: cbs_av1.h:157
uint8_t twelve_bit
Definition: cbs_av1.h:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
AV1RawFrame frame
Definition: cbs_av1.h:395
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
static int32_t decode_signed_subexp_with_ref(uint32_t sub_exp, int low, int high, int r)
Definition: av1dec.c:97
const char * name
Name of the codec implementation.
Definition: codec.h:197
static av_always_inline av_const double round(double x)
Definition: libm.h:444
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
static int get_pixel_format(AVCodecContext *avctx)
Definition: av1dec.c:250
union AV1RawOBU::@26 obu
#define fail()
Definition: checkasm.h:123
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:206
AV1RawColorConfig color_config
Definition: cbs_av1.h:128
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
uint16_t tg_start
Definition: cbs_av1.h:293
ThreadFrame tf
Definition: av1dec.h:34
static av_cold int av1_decode_init(AVCodecContext *avctx)
Definition: av1dec.c:482
CodedBitstreamFragment current_obu
Definition: av1dec.h:62
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
uint16_t tg_start
Definition: av1dec.h:70
uint8_t frame_type
Definition: cbs_av1.h:139
int8_t loop_filter_mode_deltas[2]
Definition: cbs_av1.h:228
#define width
int width
picture width / height.
Definition: avcodec.h:699
int8_t loop_filter_ref_deltas[AV1_NUM_REF_FRAMES]
Definition: av1dec.h:43
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
Definition: af_astats.c:254
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:609
AV1RawOBUHeader header
Definition: cbs_av1.h:388
uint8_t is_global[AV1_TOTAL_REFS_PER_FRAME]
Definition: cbs_av1.h:248
int32_t
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
static int get_current_frame(AVCodecContext *avctx)
Definition: av1dec.c:627
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
int level
level
Definition: avcodec.h:1982
uint32_t tile_offset
Definition: av1dec.h:50
uint8_t color_primaries
Definition: cbs_av1.h:47
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:658
static int set_context_with_sequence(AVCodecContext *avctx, const AV1RawSequenceHeader *seq)
Definition: av1dec.c:408
uint16_t render_height_minus_1
Definition: cbs_av1.h:162
#define FF_ARRAY_ELEMS(a)
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:1867
size_t data_size
Definition: cbs_av1.h:288
Full range content.
Definition: pixfmt.h:586
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
Definition: av1dec.c:538
uint8_t subsampling_y
Definition: cbs_av1.h:53
static int FUNC() sequence_header(CodedBitstreamContext *ctx, RWContext *rw, MPEG2RawSequenceHeader *current)
uint16_t max_frame_height_minus_1
Definition: cbs_av1.h:99
Libavcodec external API header.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2511
uint8_t loop_filter_delta_update
Definition: cbs_av1.h:224
const AVProfile ff_av1_profiles[]
Definition: profiles.c:141
uint8_t subsampling_x
Definition: cbs_av1.h:52
main external API structure.
Definition: avcodec.h:526
uint8_t * data
The data buffer.
Definition: buffer.h:89
int extradata_size
Definition: avcodec.h:628
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
uint8_t loop_filter_delta_enabled
Definition: cbs_av1.h:223
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
Switching Predicted.
Definition: avutil.h:279
static const AVProfile profiles[]
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
AVBufferRef * content_ref
If content is reference counted, a reference to the buffer containing content.
Definition: cbs.h:108
static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *src)
Definition: av1dec.c:349
int8_t loop_filter_mode_deltas[2]
Definition: av1dec.h:44
uint8_t show_existing_frame
Definition: cbs_av1.h:134
static uint32_t inverse_recenter(int r, uint32_t v)
Definition: av1dec.c:77
AV1RawTileGroup tile_group
Definition: cbs_av1.h:301
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
Definition: cbs.c:121
uint8_t spatial_id
Definition: cbs_av1.h:37
AVCodec ff_av1_decoder
Definition: av1dec.c:878
uint32_t num_ticks_per_picture_minus_1
Definition: cbs_av1.h:63
AV1RawSequenceHeader sequence_header
Definition: cbs_av1.h:393
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
uint8_t equal_picture_interval
Definition: cbs_av1.h:62
uint8_t tile_size_bytes_minus1
Definition: cbs_av1.h:189
int32_t gm_params[AV1_NUM_REF_FRAMES][6]
Definition: av1dec.h:46
CodedBitstreamContext * cbc
Definition: av1dec.h:61
#define SIZE_SPECIFIER
Definition: internal.h:229
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2500
uint16_t tile_cols
Definition: cbs_av1.h:193
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
AVBufferRef * seq_ref
Definition: av1dec.h:64
static void global_motion_params(AV1DecContext *s)
update gm type/params, since cbs already implemented part of this funcation, so we don&#39;t need to full...
Definition: av1dec.c:141
Narrow or limited range content.
Definition: pixfmt.h:569
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2472
void * priv_data
Internal codec-specific data.
Definition: cbs.h:191
uint16_t tile_column
Definition: av1dec.h:53
enum AVPixelFormat pix_fmt
Definition: av1dec.h:60
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:417
AV1Frame ref[AV1_NUM_REF_FRAMES]
Definition: av1dec.h:73
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
uint8_t is_translation[AV1_TOTAL_REFS_PER_FRAME]
Definition: cbs_av1.h:250
common internal api header.
uint16_t tile_row
Definition: av1dec.h:52
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int8_t loop_filter_ref_deltas[AV1_TOTAL_REFS_PER_FRAME]
Definition: cbs_av1.h:226
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
Definition: av1dec.c:341
AVCodecContext * avctx
Definition: av1dec.h:58
int den
Denominator.
Definition: rational.h:60
static int av1_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
Definition: av1dec.c:658
static int update_reference_list(AVCodecContext *avctx)
Definition: av1dec.c:607
AV1RawSequenceHeader * raw_seq
Definition: av1dec.h:65
void * priv_data
Definition: avcodec.h:553
static int set_output_frame(AVCodecContext *avctx, AVFrame *frame, const AVPacket *pkt, int *got_frame)
Definition: av1dec.c:587
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2520
uint16_t tg_end
Definition: cbs_av1.h:294
uint8_t is_rot_zoom[AV1_TOTAL_REFS_PER_FRAME]
Definition: cbs_av1.h:249
#define AV_CODEC_CAP_AVOID_PROBING
Decoder is not a preferred choice for probing.
Definition: codec.h:132
uint8_t update_ref_delta[AV1_TOTAL_REFS_PER_FRAME]
Definition: cbs_av1.h:225
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
uint8_t chroma_sample_position
Definition: cbs_av1.h:54
uint32_t tile_size
Definition: av1dec.h:51
void ff_cbs_fragment_free(CodedBitstreamFragment *frag)
Free the units array of a fragment in addition to what ff_cbs_fragment_reset does.
Definition: cbs.c:161
AV1RawTileGroup tile_group
Definition: cbs_av1.h:396
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
uint16_t tile_num
Definition: av1dec.h:69
AV1RawFrameHeader frame_header
Definition: cbs_av1.h:394
static int init_tile_data(AV1DecContext *s)
Definition: av1dec.c:187
static av_cold int av1_decode_free(AVCodecContext *avctx)
Definition: av1dec.c:387
int spatial_id
Definition: av1dec.h:40
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static float sub(float src0, float src1)
void ff_cbs_flush(CodedBitstreamContext *ctx)
Reset all internal state in a context.
Definition: cbs.c:115
AV1RawTimingInfo timing_info
Definition: cbs_av1.h:83
uint8_t show_frame
Definition: cbs_av1.h:140
static void load_previous_and_update(AV1DecContext *s)
Definition: av1dec.c:46
uint8_t gm_type[AV1_NUM_REF_FRAMES]
Definition: av1dec.h:45
uint8_t frame_to_show_map_idx
Definition: cbs_av1.h:135
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
static int get_tiles_info(AVCodecContext *avctx, const AV1RawTileGroup *tile_group)
Definition: av1dec.c:205
int ff_cbs_read(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const uint8_t *data, size_t size)
Read a bitstream from a memory region into a fragment, then split into units and decompose.
Definition: cbs.c:271
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
void * hwaccel_picture_private
Definition: av1dec.h:37
AV1RawFrameHeader header
Definition: cbs_av1.h:300
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:623
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:340
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
uint8_t seq_level_idx[AV1_MAX_OPERATING_POINTS]
Definition: cbs_av1.h:87
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
uint8_t transfer_characteristics
Definition: cbs_av1.h:48
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
size_t data_size
The number of bytes in the bitstream (including any padding bits in the final byte).
Definition: cbs.h:82
AVBufferRef * hwaccel_priv_buf
Definition: av1dec.h:36