FFmpeg
vaapi_av1.c
Go to the documentation of this file.
1 /*
2  * AV1 HW decode acceleration through VA API
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/frame.h"
22 #include "hwaccel_internal.h"
23 #include "vaapi_decode.h"
24 #include "internal.h"
25 #include "av1dec.h"
26 #include "thread.h"
27 
28 typedef struct VAAPIAV1FrameRef {
30  int valid;
32 
33 typedef struct VAAPIAV1DecContext {
35 
36  /**
37  * For film grain case, VAAPI generate 2 output for each frame,
38  * current_frame will not apply film grain, and will be used for
39  * references for next frames. Maintain the reference list without
40  * applying film grain here. And current_display_picture will be
41  * used to apply film grain and push to downstream.
42  */
46 
47 static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
48 {
49  if (vf)
50  return ff_vaapi_get_surface_id(vf->f);
51  else
52  return VA_INVALID_SURFACE;
53 }
54 
56 {
57  AV1DecContext *s = avctx->priv_data;
58  const AV1RawSequenceHeader *seq = s->raw_seq;
59  int8_t bit_depth = 8;
60 
61  if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
62  bit_depth = seq->color_config.twelve_bit ? 12 : 10;
63  else if (seq->seq_profile <= 2)
64  bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
65  else {
66  av_log(avctx, AV_LOG_ERROR,
67  "Couldn't get bit depth from profile:%d.\n", seq->seq_profile);
68  return -1;
69  }
70  return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2;
71 }
72 
74 {
76 
77  ctx->tmp_frame = av_frame_alloc();
78  if (!ctx->tmp_frame) {
79  av_log(avctx, AV_LOG_ERROR,
80  "Failed to allocate frame.\n");
81  return AVERROR(ENOMEM);
82  }
83 
84  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
85  ctx->ref_tab[i].frame = av_frame_alloc();
86  if (!ctx->ref_tab[i].frame) {
87  av_log(avctx, AV_LOG_ERROR,
88  "Failed to allocate reference table frame %d.\n", i);
89  return AVERROR(ENOMEM);
90  }
91  ctx->ref_tab[i].valid = 0;
92  }
93 
94  return ff_vaapi_decode_init(avctx);
95 }
96 
98 {
100 
101  if (ctx->tmp_frame->buf[0])
102  ff_thread_release_buffer(avctx, ctx->tmp_frame);
103  av_frame_free(&ctx->tmp_frame);
104 
105  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
106  if (ctx->ref_tab[i].frame->buf[0])
107  ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
108  av_frame_free(&ctx->ref_tab[i].frame);
109  }
110 
111  return ff_vaapi_decode_uninit(avctx);
112 }
113 
114 
116  av_unused const uint8_t *buffer,
117  av_unused uint32_t size)
118 {
119  AV1DecContext *s = avctx->priv_data;
120  const AV1RawSequenceHeader *seq = s->raw_seq;
121  const AV1RawFrameHeader *frame_header = s->raw_frame_header;
122  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
123  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
125  VADecPictureParameterBufferAV1 pic_param;
126  int8_t bit_depth_idx;
127  int err = 0;
128  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
130  uint8_t segmentation_feature_signed[AV1_SEG_LVL_MAX] = {1, 1, 1, 1, 1, 0, 0, 0};
131  uint8_t segmentation_feature_max[AV1_SEG_LVL_MAX] = {255, AV1_MAX_LOOP_FILTER,
133 
134  bit_depth_idx = vaapi_av1_get_bit_depth_idx(avctx);
135  if (bit_depth_idx < 0)
136  goto fail;
137 
138  if (apply_grain) {
139  if (ctx->tmp_frame->buf[0])
140  ff_thread_release_buffer(avctx, ctx->tmp_frame);
141  err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
142  if (err < 0)
143  goto fail;
144  pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
145  } else {
146  pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
147  }
148 
149  memset(&pic_param, 0, sizeof(VADecPictureParameterBufferAV1));
150  pic_param = (VADecPictureParameterBufferAV1) {
151  .profile = seq->seq_profile,
152  .order_hint_bits_minus_1 = seq->order_hint_bits_minus_1,
153  .bit_depth_idx = bit_depth_idx,
154  .matrix_coefficients = seq->color_config.matrix_coefficients,
155  .current_frame = pic->output_surface,
156  .current_display_picture = vaapi_av1_surface_id(&s->cur_frame),
157  .frame_width_minus1 = frame_header->frame_width_minus_1,
158  .frame_height_minus1 = frame_header->frame_height_minus_1,
159  .primary_ref_frame = frame_header->primary_ref_frame,
160  .order_hint = frame_header->order_hint,
161  .tile_cols = frame_header->tile_cols,
162  .tile_rows = frame_header->tile_rows,
163  .context_update_tile_id = frame_header->context_update_tile_id,
164  .superres_scale_denominator = frame_header->use_superres ?
165  frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN :
167  .interp_filter = frame_header->interpolation_filter,
168  .filter_level[0] = frame_header->loop_filter_level[0],
169  .filter_level[1] = frame_header->loop_filter_level[1],
170  .filter_level_u = frame_header->loop_filter_level[2],
171  .filter_level_v = frame_header->loop_filter_level[3],
172  .base_qindex = frame_header->base_q_idx,
173  .y_dc_delta_q = frame_header->delta_q_y_dc,
174  .u_dc_delta_q = frame_header->delta_q_u_dc,
175  .u_ac_delta_q = frame_header->delta_q_u_ac,
176  .v_dc_delta_q = frame_header->delta_q_v_dc,
177  .v_ac_delta_q = frame_header->delta_q_v_ac,
178  .cdef_damping_minus_3 = frame_header->cdef_damping_minus_3,
179  .cdef_bits = frame_header->cdef_bits,
180  .seq_info_fields.fields = {
181  .still_picture = seq->still_picture,
182  .use_128x128_superblock = seq->use_128x128_superblock,
183  .enable_filter_intra = seq->enable_filter_intra,
184  .enable_intra_edge_filter = seq->enable_intra_edge_filter,
185  .enable_interintra_compound = seq->enable_interintra_compound,
186  .enable_masked_compound = seq->enable_masked_compound,
187  .enable_dual_filter = seq->enable_dual_filter,
188  .enable_order_hint = seq->enable_order_hint,
189  .enable_jnt_comp = seq->enable_jnt_comp,
190  .enable_cdef = seq->enable_cdef,
191  .mono_chrome = seq->color_config.mono_chrome,
192  .color_range = seq->color_config.color_range,
193  .subsampling_x = seq->color_config.subsampling_x,
194  .subsampling_y = seq->color_config.subsampling_y,
195  .chroma_sample_position = seq->color_config.chroma_sample_position,
196  .film_grain_params_present = seq->film_grain_params_present &&
197  !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN),
198  },
199  .seg_info.segment_info_fields.bits = {
200  .enabled = frame_header->segmentation_enabled,
201  .update_map = frame_header->segmentation_update_map,
202  .temporal_update = frame_header->segmentation_temporal_update,
203  .update_data = frame_header->segmentation_update_data,
204  },
205  .film_grain_info = {
206  .film_grain_info_fields.bits = {
207  .apply_grain = apply_grain,
208  .chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma,
209  .grain_scaling_minus_8 = film_grain->grain_scaling_minus_8,
210  .ar_coeff_lag = film_grain->ar_coeff_lag,
211  .ar_coeff_shift_minus_6 = film_grain->ar_coeff_shift_minus_6,
212  .grain_scale_shift = film_grain->grain_scale_shift,
213  .overlap_flag = film_grain->overlap_flag,
214  .clip_to_restricted_range = film_grain->clip_to_restricted_range,
215  },
216  .grain_seed = film_grain->grain_seed,
217  .num_y_points = film_grain->num_y_points,
218  .num_cb_points = film_grain->num_cb_points,
219  .num_cr_points = film_grain->num_cr_points,
220  .cb_mult = film_grain->cb_mult,
221  .cb_luma_mult = film_grain->cb_luma_mult,
222  .cb_offset = film_grain->cb_offset,
223  .cr_mult = film_grain->cr_mult,
224  .cr_luma_mult = film_grain->cr_luma_mult,
225  .cr_offset = film_grain->cr_offset,
226  },
227  .pic_info_fields.bits = {
228  .frame_type = frame_header->frame_type,
229  .show_frame = frame_header->show_frame,
230  .showable_frame = frame_header->showable_frame,
231  .error_resilient_mode = frame_header->error_resilient_mode,
232  .disable_cdf_update = frame_header->disable_cdf_update,
233  .allow_screen_content_tools = frame_header->allow_screen_content_tools,
234  .force_integer_mv = frame_header->force_integer_mv,
235  .allow_intrabc = frame_header->allow_intrabc,
236  .use_superres = frame_header->use_superres,
237  .allow_high_precision_mv = frame_header->allow_high_precision_mv,
238  .is_motion_mode_switchable = frame_header->is_motion_mode_switchable,
239  .use_ref_frame_mvs = frame_header->use_ref_frame_mvs,
240  .disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf,
241  .uniform_tile_spacing_flag = frame_header->uniform_tile_spacing_flag,
242  .allow_warped_motion = frame_header->allow_warped_motion,
243  },
244  .loop_filter_info_fields.bits = {
245  .sharpness_level = frame_header->loop_filter_sharpness,
246  .mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled,
247  .mode_ref_delta_update = frame_header->loop_filter_delta_update,
248  },
249  .mode_control_fields.bits = {
250  .delta_q_present_flag = frame_header->delta_q_present,
251  .log2_delta_q_res = frame_header->delta_q_res,
252  .delta_lf_present_flag = frame_header->delta_lf_present,
253  .log2_delta_lf_res = frame_header->delta_lf_res,
254  .delta_lf_multi = frame_header->delta_lf_multi,
255  .tx_mode = frame_header->tx_mode,
256  .reference_select = frame_header->reference_select,
257  .reduced_tx_set_used = frame_header->reduced_tx_set,
258  .skip_mode_present = frame_header->skip_mode_present,
259  },
260  .loop_restoration_fields.bits = {
261  .yframe_restoration_type = remap_lr_type[frame_header->lr_type[0]],
262  .cbframe_restoration_type = remap_lr_type[frame_header->lr_type[1]],
263  .crframe_restoration_type = remap_lr_type[frame_header->lr_type[2]],
264  .lr_unit_shift = frame_header->lr_unit_shift,
265  .lr_uv_shift = frame_header->lr_uv_shift,
266  },
267  .qmatrix_fields.bits = {
268  .using_qmatrix = frame_header->using_qmatrix,
269  .qm_y = frame_header->qm_y,
270  .qm_u = frame_header->qm_u,
271  .qm_v = frame_header->qm_v,
272  }
273  };
274 
275  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
276  if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY && frame_header->show_frame)
277  pic_param.ref_frame_map[i] = VA_INVALID_ID;
278  else
279  pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
280  ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
282  }
283  for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
284  pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i];
285  }
286  for (int i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
287  pic_param.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
288  }
289  for (int i = 0; i < 2; i++) {
290  pic_param.mode_deltas[i] = frame_header->loop_filter_mode_deltas[i];
291  }
292  for (int i = 0; i < (1 << frame_header->cdef_bits); i++) {
293  pic_param.cdef_y_strengths[i] =
294  (frame_header->cdef_y_pri_strength[i] << 2) +
295  frame_header->cdef_y_sec_strength[i];
296  pic_param.cdef_uv_strengths[i] =
297  (frame_header->cdef_uv_pri_strength[i] << 2) +
298  frame_header->cdef_uv_sec_strength[i];
299  }
300  for (int i = 0; i < frame_header->tile_cols; i++) {
301  pic_param.width_in_sbs_minus_1[i] =
302  frame_header->width_in_sbs_minus_1[i];
303  }
304  for (int i = 0; i < frame_header->tile_rows; i++) {
305  pic_param.height_in_sbs_minus_1[i] =
306  frame_header->height_in_sbs_minus_1[i];
307  }
308  for (int i = AV1_REF_FRAME_LAST; i <= AV1_REF_FRAME_ALTREF; i++) {
309  pic_param.wm[i - 1].invalid = s->cur_frame.gm_invalid[i];
310  pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i];
311  for (int j = 0; j < 6; j++)
312  pic_param.wm[i - 1].wmmat[j] = s->cur_frame.gm_params[i][j];
313  }
314  for (int i = 0; i < AV1_MAX_SEGMENTS; i++) {
315  for (int j = 0; j < AV1_SEG_LVL_MAX; j++) {
316  pic_param.seg_info.feature_mask[i] |= (frame_header->feature_enabled[i][j] << j);
317  if (segmentation_feature_signed[j])
318  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
319  -segmentation_feature_max[j], segmentation_feature_max[j]);
320  else
321  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
322  0, segmentation_feature_max[j]);
323  }
324  }
325  if (apply_grain) {
326  for (int i = 0; i < film_grain->num_y_points; i++) {
327  pic_param.film_grain_info.point_y_value[i] =
328  film_grain->point_y_value[i];
329  pic_param.film_grain_info.point_y_scaling[i] =
330  film_grain->point_y_scaling[i];
331  }
332  for (int i = 0; i < film_grain->num_cb_points; i++) {
333  pic_param.film_grain_info.point_cb_value[i] =
334  film_grain->point_cb_value[i];
335  pic_param.film_grain_info.point_cb_scaling[i] =
336  film_grain->point_cb_scaling[i];
337  }
338  for (int i = 0; i < film_grain->num_cr_points; i++) {
339  pic_param.film_grain_info.point_cr_value[i] =
340  film_grain->point_cr_value[i];
341  pic_param.film_grain_info.point_cr_scaling[i] =
342  film_grain->point_cr_scaling[i];
343  }
344  for (int i = 0; i < 24; i++) {
345  pic_param.film_grain_info.ar_coeffs_y[i] =
346  film_grain->ar_coeffs_y_plus_128[i] - 128;
347  }
348  for (int i = 0; i < 25; i++) {
349  pic_param.film_grain_info.ar_coeffs_cb[i] =
350  film_grain->ar_coeffs_cb_plus_128[i] - 128;
351  pic_param.film_grain_info.ar_coeffs_cr[i] =
352  film_grain->ar_coeffs_cr_plus_128[i] - 128;
353  }
354  }
355  err = ff_vaapi_decode_make_param_buffer(avctx, pic,
356  VAPictureParameterBufferType,
357  &pic_param, sizeof(pic_param));
358  if (err < 0)
359  goto fail;
360 
361  return 0;
362 
363 fail:
364  ff_vaapi_decode_cancel(avctx, pic);
365  return err;
366 }
367 
369 {
370  const AV1DecContext *s = avctx->priv_data;
371  const AV1RawFrameHeader *header = s->raw_frame_header;
372  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
373  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
375 
376  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
377  int ret;
378  ret = ff_vaapi_decode_issue(avctx, pic);
379  if (ret < 0)
380  return ret;
381 
382  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
383  if (header->refresh_frame_flags & (1 << i)) {
384  if (ctx->ref_tab[i].frame->buf[0])
385  ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
386 
387  if (apply_grain) {
388  ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
389  if (ret < 0)
390  return ret;
391  ctx->ref_tab[i].valid = 1;
392  } else {
393  ctx->ref_tab[i].valid = 0;
394  }
395  }
396  }
397 
398  return 0;
399 }
400 
402  const uint8_t *buffer,
403  uint32_t size)
404 {
405  const AV1DecContext *s = avctx->priv_data;
406  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
407  VASliceParameterBufferAV1 slice_param;
408  int err = 0;
409 
410  for (int i = s->tg_start; i <= s->tg_end; i++) {
411  memset(&slice_param, 0, sizeof(VASliceParameterBufferAV1));
412 
413  slice_param = (VASliceParameterBufferAV1) {
414  .slice_data_size = s->tile_group_info[i].tile_size,
415  .slice_data_offset = s->tile_group_info[i].tile_offset,
416  .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
417  .tile_row = s->tile_group_info[i].tile_row,
418  .tile_column = s->tile_group_info[i].tile_column,
419  .tg_start = s->tg_start,
420  .tg_end = s->tg_end,
421  };
422 
423  err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &slice_param,
424  sizeof(VASliceParameterBufferAV1),
425  buffer,
426  size);
427  if (err) {
428  ff_vaapi_decode_cancel(avctx, pic);
429  return err;
430  }
431  }
432 
433  return 0;
434 }
435 
437  .p.name = "av1_vaapi",
438  .p.type = AVMEDIA_TYPE_VIDEO,
439  .p.id = AV_CODEC_ID_AV1,
440  .p.pix_fmt = AV_PIX_FMT_VAAPI,
441  .start_frame = vaapi_av1_start_frame,
442  .end_frame = vaapi_av1_end_frame,
443  .decode_slice = vaapi_av1_decode_slice,
444  .frame_priv_data_size = sizeof(VAAPIDecodePicture),
447  .frame_params = ff_vaapi_common_frame_params,
448  .priv_data_size = sizeof(VAAPIAV1DecContext),
449  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
450 };
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_vaapi_get_surface_id
static VASurfaceID ff_vaapi_get_surface_id(AVFrame *pic)
Definition: vaapi_decode.h:30
VAAPIAV1FrameRef::frame
AVFrame * frame
Definition: vaapi_av1.c:29
AV1RawSequenceHeader
Definition: cbs_av1.h:73
VAAPIDecodeContext
Definition: vaapi_decode.h:50
AV1_SUPERRES_NUM
@ AV1_SUPERRES_NUM
Definition: av1.h:100
vaapi_decode.h
AV1_REF_FRAME_ALTREF
@ AV1_REF_FRAME_ALTREF
Definition: av1.h:68
AV1RawFilmGrainParams::apply_grain
uint8_t apply_grain
Definition: cbs_av1.h:134
av_unused
#define av_unused
Definition: attributes.h:131
AV1_RESTORE_SWITCHABLE
@ AV1_RESTORE_SWITCHABLE
Definition: av1.h:175
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:37
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
VAAPIDecodePicture
Definition: vaapi_decode.h:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
ff_vaapi_decode_make_slice_buffer
int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, const void *params_data, size_t params_size, const void *slice_data, size_t slice_size)
Definition: vaapi_decode.c:61
internal.h
vaapi_av1_end_frame
static int vaapi_av1_end_frame(AVCodecContext *avctx)
Definition: vaapi_av1.c:368
VAAPIAV1DecContext::ref_tab
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES]
For film grain case, VAAPI generate 2 output for each frame, current_frame will not apply film grain,...
Definition: vaapi_av1.c:43
VAAPIAV1DecContext::base
VAAPIDecodeContext base
Definition: vaapi_av1.c:34
bit_depth
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
Definition: af_astats.c:245
thread.h
AV1RawSequenceHeader::seq_profile
uint8_t seq_profile
Definition: cbs_av1.h:74
vaapi_av1_decode_init
static int vaapi_av1_decode_init(AVCodecContext *avctx)
Definition: vaapi_av1.c:73
ff_vaapi_decode_make_param_buffer
int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, int type, const void *data, size_t size)
Definition: vaapi_decode.c:32
FFHWAccel
Definition: hwaccel_internal.h:33
AV1_MAX_SEGMENTS
@ AV1_MAX_SEGMENTS
Definition: av1.h:88
fail
#define fail()
Definition: checkasm.h:138
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV1Frame
Definition: av1dec.h:35
VAAPIAV1FrameRef::valid
int valid
Definition: vaapi_av1.c:30
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AV1_REF_FRAME_LAST
@ AV1_REF_FRAME_LAST
Definition: av1.h:62
vaapi_av1_decode_slice
static int vaapi_av1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vaapi_av1.c:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_vaapi_decode_init
int ff_vaapi_decode_init(AVCodecContext *avctx)
Definition: vaapi_decode.c:662
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:417
ff_vaapi_common_frame_params
int ff_vaapi_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vaapi_decode.c:638
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
AV1_NUM_REF_FRAMES
@ AV1_NUM_REF_FRAMES
Definition: av1.h:83
ctx
AVFormatContext * ctx
Definition: movenc.c:48
VAAPIAV1DecContext
Definition: vaapi_av1.c:33
ff_vaapi_decode_uninit
int ff_vaapi_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_decode.c:708
av1dec.h
vaapi_av1_get_bit_depth_idx
static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx)
Definition: vaapi_av1.c:55
ff_vaapi_decode_issue
int ff_vaapi_decode_issue(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:153
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
Definition: hwaccel_internal.h:30
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:283
hwaccel_internal.h
AV1RawFrameHeader
Definition: cbs_av1.h:165
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:472
vaapi_av1_decode_uninit
static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_av1.c:97
AV1_RESTORE_NONE
@ AV1_RESTORE_NONE
Definition: av1.h:172
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:124
size
int size
Definition: twinvq_data.h:10344
AV1DecContext
Definition: av1dec.h:65
AV1_FRAME_KEY
@ AV1_FRAME_KEY
Definition: av1.h:53
ff_vaapi_decode_cancel
int ff_vaapi_decode_cancel(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:227
frame.h
header
static const uint8_t header[24]
Definition: sdr2.c:67
AV1Frame::f
AVFrame * f
Definition: av1dec.h:36
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around av_frame_unref() for frame-threaded codecs.
Definition: pthread_frame.c:1012
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2129
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:244
vaapi_av1_surface_id
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
Definition: vaapi_av1.c:47
AV1_SUPERRES_DENOM_MIN
@ AV1_SUPERRES_DENOM_MIN
Definition: av1.h:101
AV1RawSequenceHeader::color_config
AV1RawColorConfig color_config
Definition: cbs_av1.h:128
AV1_RESTORE_WIENER
@ AV1_RESTORE_WIENER
Definition: av1.h:173
AV1_RESTORE_SGRPROJ
@ AV1_RESTORE_SGRPROJ
Definition: av1.h:174
AV1_TOTAL_REFS_PER_FRAME
@ AV1_TOTAL_REFS_PER_FRAME
Definition: av1.h:85
AV1_REFS_PER_FRAME
@ AV1_REFS_PER_FRAME
Definition: av1.h:84
vaapi_av1_start_frame
static int vaapi_av1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vaapi_av1.c:115
ret
ret
Definition: filter_design.txt:187
AV1RawColorConfig::high_bitdepth
uint8_t high_bitdepth
Definition: cbs_av1.h:42
AVCodecContext
main external API structure.
Definition: avcodec.h:437
AV1_SEG_LVL_MAX
@ AV1_SEG_LVL_MAX
Definition: av1.h:89
frame_header
Definition: truemotion1.c:88
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV1_MAX_LOOP_FILTER
@ AV1_MAX_LOOP_FILTER
Definition: av1.h:123
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2051
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV1RawColorConfig::twelve_bit
uint8_t twelve_bit
Definition: cbs_av1.h:43
VAAPIAV1DecContext::tmp_frame
AVFrame * tmp_frame
Definition: vaapi_av1.c:44
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:464
AV1RawFilmGrainParams
Definition: cbs_av1.h:133
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:285
ff_av1_vaapi_hwaccel
const FFHWAccel ff_av1_vaapi_hwaccel
Definition: vaapi_av1.c:436
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:412
VAAPIAV1FrameRef
Definition: vaapi_av1.c:28