FFmpeg
vaapi_av1.c
Go to the documentation of this file.
1 /*
2  * AV1 HW decode acceleration through VA API
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/pixdesc.h"
22 #include "hwconfig.h"
23 #include "vaapi_decode.h"
24 #include "internal.h"
25 #include "av1dec.h"
26 
27 typedef struct VAAPIAV1FrameRef {
29  int valid;
31 
32 typedef struct VAAPIAV1DecContext {
34 
35  /**
36  * For film grain case, VAAPI generate 2 output for each frame,
37  * current_frame will not apply film grain, and will be used for
38  * references for next frames. Maintain the reference list without
39  * applying film grain here. And current_display_picture will be
40  * used to apply film grain and push to downstream.
41  */
45 
46 static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
47 {
48  if (vf)
49  return ff_vaapi_get_surface_id(vf->tf.f);
50  else
51  return VA_INVALID_SURFACE;
52 }
53 
55 {
56  AV1DecContext *s = avctx->priv_data;
57  const AV1RawSequenceHeader *seq = s->raw_seq;
58  int8_t bit_depth = 8;
59 
60  if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
61  bit_depth = seq->color_config.twelve_bit ? 12 : 10;
62  else if (seq->seq_profile <= 2)
63  bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
64  else {
65  av_log(avctx, AV_LOG_ERROR,
66  "Couldn't get bit depth from profile:%d.\n", seq->seq_profile);
67  return -1;
68  }
69  return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2;
70 }
71 
73 {
75 
76  ctx->tmp_frame.f = av_frame_alloc();
77  if (!ctx->tmp_frame.f) {
78  av_log(avctx, AV_LOG_ERROR,
79  "Failed to allocate frame.\n");
80  return AVERROR(ENOMEM);
81  }
82 
83  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
84  ctx->ref_tab[i].frame.f = av_frame_alloc();
85  if (!ctx->ref_tab[i].frame.f) {
86  av_log(avctx, AV_LOG_ERROR,
87  "Failed to allocate reference table frame %d.\n", i);
88  return AVERROR(ENOMEM);
89  }
90  ctx->ref_tab[i].valid = 0;
91  }
92 
93  return ff_vaapi_decode_init(avctx);
94 }
95 
97 {
99 
100  if (ctx->tmp_frame.f->buf[0])
101  ff_thread_release_buffer(avctx, &ctx->tmp_frame);
102  av_frame_free(&ctx->tmp_frame.f);
103 
104  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
105  if (ctx->ref_tab[i].frame.f->buf[0])
106  ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
107  av_frame_free(&ctx->ref_tab[i].frame.f);
108  }
109 
110  return ff_vaapi_decode_uninit(avctx);
111 }
112 
113 
115  av_unused const uint8_t *buffer,
116  av_unused uint32_t size)
117 {
118  AV1DecContext *s = avctx->priv_data;
119  const AV1RawSequenceHeader *seq = s->raw_seq;
120  const AV1RawFrameHeader *frame_header = s->raw_frame_header;
121  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
122  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
124  VADecPictureParameterBufferAV1 pic_param;
125  int8_t bit_depth_idx;
126  int err = 0;
127  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
129  uint8_t segmentation_feature_signed[AV1_SEG_LVL_MAX] = {1, 1, 1, 1, 1, 0, 0, 0};
130  uint8_t segmentation_feature_max[AV1_SEG_LVL_MAX] = {255, AV1_MAX_LOOP_FILTER,
132 
133  bit_depth_idx = vaapi_av1_get_bit_depth_idx(avctx);
134  if (bit_depth_idx < 0)
135  goto fail;
136 
137  if (apply_grain) {
138  if (ctx->tmp_frame.f->buf[0])
139  ff_thread_release_buffer(avctx, &ctx->tmp_frame);
140  err = ff_thread_get_buffer(avctx, &ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
141  if (err < 0)
142  goto fail;
143  pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame.f);
144  } else {
145  pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
146  }
147 
148  memset(&pic_param, 0, sizeof(VADecPictureParameterBufferAV1));
149  pic_param = (VADecPictureParameterBufferAV1) {
150  .profile = seq->seq_profile,
151  .order_hint_bits_minus_1 = seq->order_hint_bits_minus_1,
152  .bit_depth_idx = bit_depth_idx,
153  .matrix_coefficients = seq->color_config.matrix_coefficients,
154  .current_frame = pic->output_surface,
155  .current_display_picture = vaapi_av1_surface_id(&s->cur_frame),
156  .frame_width_minus1 = frame_header->frame_width_minus_1,
157  .frame_height_minus1 = frame_header->frame_height_minus_1,
158  .primary_ref_frame = frame_header->primary_ref_frame,
159  .order_hint = frame_header->order_hint,
160  .tile_cols = frame_header->tile_cols,
161  .tile_rows = frame_header->tile_rows,
162  .context_update_tile_id = frame_header->context_update_tile_id,
163  .superres_scale_denominator = frame_header->use_superres ?
164  frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN :
166  .interp_filter = frame_header->interpolation_filter,
167  .filter_level[0] = frame_header->loop_filter_level[0],
168  .filter_level[1] = frame_header->loop_filter_level[1],
169  .filter_level_u = frame_header->loop_filter_level[2],
170  .filter_level_v = frame_header->loop_filter_level[3],
171  .base_qindex = frame_header->base_q_idx,
172  .y_dc_delta_q = frame_header->delta_q_y_dc,
173  .u_dc_delta_q = frame_header->delta_q_u_dc,
174  .u_ac_delta_q = frame_header->delta_q_u_ac,
175  .v_dc_delta_q = frame_header->delta_q_v_dc,
176  .v_ac_delta_q = frame_header->delta_q_v_ac,
177  .cdef_damping_minus_3 = frame_header->cdef_damping_minus_3,
178  .cdef_bits = frame_header->cdef_bits,
179  .seq_info_fields.fields = {
180  .still_picture = seq->still_picture,
181  .use_128x128_superblock = seq->use_128x128_superblock,
182  .enable_filter_intra = seq->enable_filter_intra,
183  .enable_intra_edge_filter = seq->enable_intra_edge_filter,
184  .enable_interintra_compound = seq->enable_interintra_compound,
185  .enable_masked_compound = seq->enable_masked_compound,
186  .enable_dual_filter = seq->enable_dual_filter,
187  .enable_order_hint = seq->enable_order_hint,
188  .enable_jnt_comp = seq->enable_jnt_comp,
189  .enable_cdef = seq->enable_cdef,
190  .mono_chrome = seq->color_config.mono_chrome,
191  .color_range = seq->color_config.color_range,
192  .subsampling_x = seq->color_config.subsampling_x,
193  .subsampling_y = seq->color_config.subsampling_y,
194  .chroma_sample_position = seq->color_config.chroma_sample_position,
195  .film_grain_params_present = seq->film_grain_params_present &&
196  !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN),
197  },
198  .seg_info.segment_info_fields.bits = {
199  .enabled = frame_header->segmentation_enabled,
200  .update_map = frame_header->segmentation_update_map,
201  .temporal_update = frame_header->segmentation_temporal_update,
202  .update_data = frame_header->segmentation_update_data,
203  },
204  .film_grain_info = {
205  .film_grain_info_fields.bits = {
206  .apply_grain = apply_grain,
207  .chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma,
208  .grain_scaling_minus_8 = film_grain->grain_scaling_minus_8,
209  .ar_coeff_lag = film_grain->ar_coeff_lag,
210  .ar_coeff_shift_minus_6 = film_grain->ar_coeff_shift_minus_6,
211  .grain_scale_shift = film_grain->grain_scale_shift,
212  .overlap_flag = film_grain->overlap_flag,
213  .clip_to_restricted_range = film_grain->clip_to_restricted_range,
214  },
215  .grain_seed = film_grain->grain_seed,
216  .num_y_points = film_grain->num_y_points,
217  .num_cb_points = film_grain->num_cb_points,
218  .num_cr_points = film_grain->num_cr_points,
219  .cb_mult = film_grain->cb_mult,
220  .cb_luma_mult = film_grain->cb_luma_mult,
221  .cb_offset = film_grain->cb_offset,
222  .cr_mult = film_grain->cr_mult,
223  .cr_luma_mult = film_grain->cr_luma_mult,
224  .cr_offset = film_grain->cr_offset,
225  },
226  .pic_info_fields.bits = {
227  .frame_type = frame_header->frame_type,
228  .show_frame = frame_header->show_frame,
229  .showable_frame = frame_header->showable_frame,
230  .error_resilient_mode = frame_header->error_resilient_mode,
231  .disable_cdf_update = frame_header->disable_cdf_update,
232  .allow_screen_content_tools = frame_header->allow_screen_content_tools,
233  .force_integer_mv = frame_header->force_integer_mv,
234  .allow_intrabc = frame_header->allow_intrabc,
235  .use_superres = frame_header->use_superres,
236  .allow_high_precision_mv = frame_header->allow_high_precision_mv,
237  .is_motion_mode_switchable = frame_header->is_motion_mode_switchable,
238  .use_ref_frame_mvs = frame_header->use_ref_frame_mvs,
239  .disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf,
240  .uniform_tile_spacing_flag = frame_header->uniform_tile_spacing_flag,
241  .allow_warped_motion = frame_header->allow_warped_motion,
242  },
243  .loop_filter_info_fields.bits = {
244  .sharpness_level = frame_header->loop_filter_sharpness,
245  .mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled,
246  .mode_ref_delta_update = frame_header->loop_filter_delta_update,
247  },
248  .mode_control_fields.bits = {
249  .delta_q_present_flag = frame_header->delta_q_present,
250  .log2_delta_q_res = frame_header->delta_q_res,
251  .delta_lf_present_flag = frame_header->delta_lf_present,
252  .log2_delta_lf_res = frame_header->delta_lf_res,
253  .delta_lf_multi = frame_header->delta_lf_multi,
254  .tx_mode = frame_header->tx_mode,
255  .reference_select = frame_header->reference_select,
256  .reduced_tx_set_used = frame_header->reduced_tx_set,
257  .skip_mode_present = frame_header->skip_mode_present,
258  },
259  .loop_restoration_fields.bits = {
260  .yframe_restoration_type = remap_lr_type[frame_header->lr_type[0]],
261  .cbframe_restoration_type = remap_lr_type[frame_header->lr_type[1]],
262  .crframe_restoration_type = remap_lr_type[frame_header->lr_type[2]],
263  .lr_unit_shift = frame_header->lr_unit_shift,
264  .lr_uv_shift = frame_header->lr_uv_shift,
265  },
266  .qmatrix_fields.bits = {
267  .using_qmatrix = frame_header->using_qmatrix,
268  .qm_y = frame_header->qm_y,
269  .qm_u = frame_header->qm_u,
270  .qm_v = frame_header->qm_v,
271  }
272  };
273 
274  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
275  if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY)
276  pic_param.ref_frame_map[i] = VA_INVALID_ID;
277  else
278  pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
279  ff_vaapi_get_surface_id(ctx->ref_tab[i].frame.f) :
281  }
282  for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
283  pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i];
284  }
285  for (int i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
286  pic_param.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
287  }
288  for (int i = 0; i < 2; i++) {
289  pic_param.mode_deltas[i] = frame_header->loop_filter_mode_deltas[i];
290  }
291  for (int i = 0; i < (1 << frame_header->cdef_bits); i++) {
292  pic_param.cdef_y_strengths[i] =
293  (frame_header->cdef_y_pri_strength[i] << 2) +
294  frame_header->cdef_y_sec_strength[i];
295  pic_param.cdef_uv_strengths[i] =
296  (frame_header->cdef_uv_pri_strength[i] << 2) +
297  frame_header->cdef_uv_sec_strength[i];
298  }
299  for (int i = 0; i < frame_header->tile_cols; i++) {
300  pic_param.width_in_sbs_minus_1[i] =
301  frame_header->width_in_sbs_minus_1[i];
302  }
303  for (int i = 0; i < frame_header->tile_rows; i++) {
304  pic_param.height_in_sbs_minus_1[i] =
305  frame_header->height_in_sbs_minus_1[i];
306  }
307  for (int i = AV1_REF_FRAME_LAST; i <= AV1_REF_FRAME_ALTREF; i++) {
308  pic_param.wm[i - 1].invalid = s->cur_frame.gm_invalid[i];
309  pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i];
310  for (int j = 0; j < 6; j++)
311  pic_param.wm[i - 1].wmmat[j] = s->cur_frame.gm_params[i][j];
312  }
313  for (int i = 0; i < AV1_MAX_SEGMENTS; i++) {
314  for (int j = 0; j < AV1_SEG_LVL_MAX; j++) {
315  pic_param.seg_info.feature_mask[i] |= (frame_header->feature_enabled[i][j] << j);
316  if (segmentation_feature_signed[j])
317  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
318  -segmentation_feature_max[j], segmentation_feature_max[j]);
319  else
320  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
321  0, segmentation_feature_max[j]);
322  }
323  }
324  if (apply_grain) {
325  for (int i = 0; i < film_grain->num_y_points; i++) {
326  pic_param.film_grain_info.point_y_value[i] =
327  film_grain->point_y_value[i];
328  pic_param.film_grain_info.point_y_scaling[i] =
329  film_grain->point_y_scaling[i];
330  }
331  for (int i = 0; i < film_grain->num_cb_points; i++) {
332  pic_param.film_grain_info.point_cb_value[i] =
333  film_grain->point_cb_value[i];
334  pic_param.film_grain_info.point_cb_scaling[i] =
335  film_grain->point_cb_scaling[i];
336  }
337  for (int i = 0; i < film_grain->num_cr_points; i++) {
338  pic_param.film_grain_info.point_cr_value[i] =
339  film_grain->point_cr_value[i];
340  pic_param.film_grain_info.point_cr_scaling[i] =
341  film_grain->point_cr_scaling[i];
342  }
343  for (int i = 0; i < 24; i++) {
344  pic_param.film_grain_info.ar_coeffs_y[i] =
345  film_grain->ar_coeffs_y_plus_128[i] - 128;
346  }
347  for (int i = 0; i < 25; i++) {
348  pic_param.film_grain_info.ar_coeffs_cb[i] =
349  film_grain->ar_coeffs_cb_plus_128[i] - 128;
350  pic_param.film_grain_info.ar_coeffs_cr[i] =
351  film_grain->ar_coeffs_cr_plus_128[i] - 128;
352  }
353  }
354  err = ff_vaapi_decode_make_param_buffer(avctx, pic,
355  VAPictureParameterBufferType,
356  &pic_param, sizeof(pic_param));
357  if (err < 0)
358  goto fail;
359 
360  return 0;
361 
362 fail:
363  ff_vaapi_decode_cancel(avctx, pic);
364  return err;
365 }
366 
368 {
369  const AV1DecContext *s = avctx->priv_data;
370  const AV1RawFrameHeader *header = s->raw_frame_header;
371  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
372  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
374 
375  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
376  int ret;
377  ret = ff_vaapi_decode_issue(avctx, pic);
378  if (ret < 0)
379  return ret;
380 
381  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
382  if (header->refresh_frame_flags & (1 << i)) {
383  if (ctx->ref_tab[i].frame.f->buf[0])
384  ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
385 
386  if (apply_grain) {
387  ret = ff_thread_ref_frame(&ctx->ref_tab[i].frame, &ctx->tmp_frame);
388  if (ret < 0)
389  return ret;
390  ctx->ref_tab[i].valid = 1;
391  } else {
392  ctx->ref_tab[i].valid = 0;
393  }
394  }
395  }
396 
397  return 0;
398 }
399 
401  const uint8_t *buffer,
402  uint32_t size)
403 {
404  const AV1DecContext *s = avctx->priv_data;
405  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
406  VASliceParameterBufferAV1 slice_param;
407  int err = 0;
408 
409  for (int i = s->tg_start; i <= s->tg_end; i++) {
410  memset(&slice_param, 0, sizeof(VASliceParameterBufferAV1));
411 
412  slice_param = (VASliceParameterBufferAV1) {
413  .slice_data_size = s->tile_group_info[i].tile_size,
414  .slice_data_offset = s->tile_group_info[i].tile_offset,
415  .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
416  .tile_row = s->tile_group_info[i].tile_row,
417  .tile_column = s->tile_group_info[i].tile_column,
418  .tg_start = s->tg_start,
419  .tg_end = s->tg_end,
420  };
421 
422  err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &slice_param,
423  sizeof(VASliceParameterBufferAV1),
424  buffer,
425  size);
426  if (err) {
427  ff_vaapi_decode_cancel(avctx, pic);
428  return err;
429  }
430  }
431 
432  return 0;
433 }
434 
436  .name = "av1_vaapi",
437  .type = AVMEDIA_TYPE_VIDEO,
438  .id = AV_CODEC_ID_AV1,
439  .pix_fmt = AV_PIX_FMT_VAAPI,
440  .start_frame = vaapi_av1_start_frame,
441  .end_frame = vaapi_av1_end_frame,
442  .decode_slice = vaapi_av1_decode_slice,
443  .frame_priv_data_size = sizeof(VAAPIDecodePicture),
446  .frame_params = ff_vaapi_common_frame_params,
447  .priv_data_size = sizeof(VAAPIAV1DecContext),
448  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
449 };
hwconfig.h
bit_depth
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
Definition: af_astats.c:226
AV1_REFS_PER_FRAME
@ AV1_REFS_PER_FRAME
Definition: av1.h:84
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_vaapi_get_surface_id
static VASurfaceID ff_vaapi_get_surface_id(AVFrame *pic)
Definition: vaapi_decode.h:30
AV1RawSequenceHeader
Definition: cbs_av1.h:73
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:866
VAAPIDecodeContext
Definition: vaapi_decode.h:50
vaapi_decode.h
AV1RawFilmGrainParams::apply_grain
uint8_t apply_grain
Definition: cbs_av1.h:134
av_unused
#define av_unused
Definition: attributes.h:131
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
VAAPIDecodePicture
Definition: vaapi_decode.h:39
AV1_RESTORE_SGRPROJ
@ AV1_RESTORE_SGRPROJ
Definition: av1.h:174
pixdesc.h
AV1_MAX_LOOP_FILTER
@ AV1_MAX_LOOP_FILTER
Definition: av1.h:123
ff_vaapi_decode_make_slice_buffer
int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, const void *params_data, size_t params_size, const void *slice_data, size_t slice_size)
Definition: vaapi_decode.c:59
internal.h
vaapi_av1_end_frame
static int vaapi_av1_end_frame(AVCodecContext *avctx)
Definition: vaapi_av1.c:367
VAAPIAV1DecContext::ref_tab
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES]
For film grain case, VAAPI generate 2 output for each frame, current_frame will not apply film grain,...
Definition: vaapi_av1.c:42
VAAPIAV1DecContext::base
VAAPIDecodeContext base
Definition: vaapi_av1.c:33
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
AV1RawSequenceHeader::seq_profile
uint8_t seq_profile
Definition: cbs_av1.h:74
init
static int init
Definition: av_tx.c:47
vaapi_av1_decode_init
static int vaapi_av1_decode_init(AVCodecContext *avctx)
Definition: vaapi_av1.c:72
AVHWAccel
Definition: avcodec.h:2039
ff_vaapi_decode_make_param_buffer
int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, int type, const void *data, size_t size)
Definition: vaapi_decode.c:30
fail
#define fail()
Definition: checkasm.h:127
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
AV1Frame
Definition: av1dec.h:33
VAAPIAV1FrameRef::valid
int valid
Definition: vaapi_av1.c:29
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
vaapi_av1_decode_slice
static int vaapi_av1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vaapi_av1.c:400
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_vaapi_decode_init
int ff_vaapi_decode_init(AVCodecContext *avctx)
Definition: vaapi_decode.c:634
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
ff_vaapi_common_frame_params
int ff_vaapi_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vaapi_decode.c:610
AV1_RESTORE_WIENER
@ AV1_RESTORE_WIENER
Definition: av1.h:173
ctx
AVFormatContext * ctx
Definition: movenc.c:48
VAAPIAV1DecContext
Definition: vaapi_av1.c:32
ff_vaapi_decode_uninit
int ff_vaapi_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_decode.c:680
av1dec.h
vaapi_av1_get_bit_depth_idx
static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx)
Definition: vaapi_av1.c:54
AV1_REF_FRAME_ALTREF
@ AV1_REF_FRAME_ALTREF
Definition: av1.h:68
AV1_MAX_SEGMENTS
@ AV1_MAX_SEGMENTS
Definition: av1.h:88
AV1_SUPERRES_DENOM_MIN
@ AV1_SUPERRES_DENOM_MIN
Definition: av1.h:101
ff_vaapi_decode_issue
int ff_vaapi_decode_issue(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:151
VAAPIAV1DecContext::tmp_frame
ThreadFrame tmp_frame
Definition: vaapi_av1.c:43
AV1_REF_FRAME_LAST
@ AV1_REF_FRAME_LAST
Definition: av1.h:62
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:279
AV1RawFrameHeader
Definition: cbs_av1.h:165
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:418
vaapi_av1_decode_uninit
static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_av1.c:96
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:1056
AV1Frame::tf
ThreadFrame tf
Definition: av1dec.h:34
AV1_RESTORE_SWITCHABLE
@ AV1_RESTORE_SWITCHABLE
Definition: av1.h:175
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:185
AV1_TOTAL_REFS_PER_FRAME
@ AV1_TOTAL_REFS_PER_FRAME
Definition: av1.h:85
size
int size
Definition: twinvq_data.h:10344
AV1DecContext
Definition: av1dec.h:63
ff_vaapi_decode_cancel
int ff_vaapi_decode_cancel(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:225
header
static const uint8_t header[24]
Definition: sdr2.c:67
AV1_FRAME_KEY
@ AV1_FRAME_KEY
Definition: av1.h:53
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2045
AV1_NUM_REF_FRAMES
@ AV1_NUM_REF_FRAMES
Definition: av1.h:83
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
VAAPIAV1FrameRef::frame
ThreadFrame frame
Definition: vaapi_av1.c:28
vaapi_av1_surface_id
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
Definition: vaapi_av1.c:46
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Definition: hwconfig.h:26
AV1RawSequenceHeader::color_config
AV1RawColorConfig color_config
Definition: cbs_av1.h:128
AV1_SEG_LVL_MAX
@ AV1_SEG_LVL_MAX
Definition: av1.h:89
vaapi_av1_start_frame
static int vaapi_av1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vaapi_av1.c:114
ret
ret
Definition: filter_design.txt:187
AV1RawColorConfig::high_bitdepth
uint8_t high_bitdepth
Definition: cbs_av1.h:42
AVCodecContext
main external API structure.
Definition: avcodec.h:383
frame_header
Definition: truemotion1.c:87
ThreadFrame
Definition: thread.h:34
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1984
ff_av1_vaapi_hwaccel
const AVHWAccel ff_av1_vaapi_hwaccel
Definition: vaapi_av1.c:435
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV1RawColorConfig::twelve_bit
uint8_t twelve_bit
Definition: cbs_av1.h:43
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV1_RESTORE_NONE
@ AV1_RESTORE_NONE
Definition: av1.h:172
AV1RawFilmGrainParams
Definition: cbs_av1.h:133
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:282
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:356
AV1_SUPERRES_NUM
@ AV1_SUPERRES_NUM
Definition: av1.h:100
VAAPIAV1FrameRef
Definition: vaapi_av1.c:27