FFmpeg
vaapi_av1.c
Go to the documentation of this file.
1 /*
2  * AV1 HW decode acceleration through VA API
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/frame.h"
22 #include "hwaccel_internal.h"
23 #include "vaapi_decode.h"
24 #include "internal.h"
25 #include "av1dec.h"
26 #include "thread.h"
27 
28 typedef struct VAAPIAV1FrameRef {
30  int valid;
32 
33 typedef struct VAAPIAV1DecContext {
35 
36  /**
37  * For film grain case, VAAPI generate 2 output for each frame,
38  * current_frame will not apply film grain, and will be used for
39  * references for next frames. Maintain the reference list without
40  * applying film grain here. And current_display_picture will be
41  * used to apply film grain and push to downstream.
42  */
46 
47 static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
48 {
49  if (vf)
50  return ff_vaapi_get_surface_id(vf->f);
51  else
52  return VA_INVALID_SURFACE;
53 }
54 
56 {
57  AV1DecContext *s = avctx->priv_data;
58  const AV1RawSequenceHeader *seq = s->raw_seq;
59  int8_t bit_depth = 8;
60 
61  if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
62  bit_depth = seq->color_config.twelve_bit ? 12 : 10;
63  else if (seq->seq_profile <= 2)
64  bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
65  else {
66  av_log(avctx, AV_LOG_ERROR,
67  "Couldn't get bit depth from profile:%d.\n", seq->seq_profile);
68  return -1;
69  }
70  return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2;
71 }
72 
74 {
76 
77  ctx->tmp_frame = av_frame_alloc();
78  if (!ctx->tmp_frame)
79  return AVERROR(ENOMEM);
80 
81  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
82  ctx->ref_tab[i].frame = av_frame_alloc();
83  if (!ctx->ref_tab[i].frame)
84  return AVERROR(ENOMEM);
85  ctx->ref_tab[i].valid = 0;
86  }
87 
88  return ff_vaapi_decode_init(avctx);
89 }
90 
92 {
94 
95  av_frame_free(&ctx->tmp_frame);
96 
97  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++)
98  av_frame_free(&ctx->ref_tab[i].frame);
99 
100  return ff_vaapi_decode_uninit(avctx);
101 }
102 
103 
105  av_unused const uint8_t *buffer,
106  av_unused uint32_t size)
107 {
108  AV1DecContext *s = avctx->priv_data;
109  const AV1RawSequenceHeader *seq = s->raw_seq;
110  const AV1RawFrameHeader *frame_header = s->raw_frame_header;
111  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
112  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
114  VADecPictureParameterBufferAV1 pic_param;
115  int8_t bit_depth_idx;
116  int err = 0;
117  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
119  uint8_t segmentation_feature_signed[AV1_SEG_LVL_MAX] = {1, 1, 1, 1, 1, 0, 0, 0};
120  uint8_t segmentation_feature_max[AV1_SEG_LVL_MAX] = {255, AV1_MAX_LOOP_FILTER,
122 
123  bit_depth_idx = vaapi_av1_get_bit_depth_idx(avctx);
124  if (bit_depth_idx < 0)
125  goto fail;
126 
127  if (apply_grain) {
128  if (ctx->tmp_frame->buf[0])
129  av_frame_unref(ctx->tmp_frame);
130  err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
131  if (err < 0)
132  goto fail;
133  pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
134  } else {
135  pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
136  }
137 
138  memset(&pic_param, 0, sizeof(VADecPictureParameterBufferAV1));
139  pic_param = (VADecPictureParameterBufferAV1) {
140  .profile = seq->seq_profile,
141  .order_hint_bits_minus_1 = seq->order_hint_bits_minus_1,
142  .bit_depth_idx = bit_depth_idx,
143  .matrix_coefficients = seq->color_config.matrix_coefficients,
144  .current_frame = pic->output_surface,
145  .current_display_picture = vaapi_av1_surface_id(&s->cur_frame),
146  .frame_width_minus1 = frame_header->frame_width_minus_1,
147  .frame_height_minus1 = frame_header->frame_height_minus_1,
148  .primary_ref_frame = frame_header->primary_ref_frame,
149  .order_hint = frame_header->order_hint,
150  .tile_cols = frame_header->tile_cols,
151  .tile_rows = frame_header->tile_rows,
152  .context_update_tile_id = frame_header->context_update_tile_id,
153  .superres_scale_denominator = frame_header->use_superres ?
154  frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN :
156  .interp_filter = frame_header->interpolation_filter,
157  .filter_level[0] = frame_header->loop_filter_level[0],
158  .filter_level[1] = frame_header->loop_filter_level[1],
159  .filter_level_u = frame_header->loop_filter_level[2],
160  .filter_level_v = frame_header->loop_filter_level[3],
161  .base_qindex = frame_header->base_q_idx,
162  .y_dc_delta_q = frame_header->delta_q_y_dc,
163  .u_dc_delta_q = frame_header->delta_q_u_dc,
164  .u_ac_delta_q = frame_header->delta_q_u_ac,
165  .v_dc_delta_q = frame_header->delta_q_v_dc,
166  .v_ac_delta_q = frame_header->delta_q_v_ac,
167  .cdef_damping_minus_3 = frame_header->cdef_damping_minus_3,
168  .cdef_bits = frame_header->cdef_bits,
169  .seq_info_fields.fields = {
170  .still_picture = seq->still_picture,
171  .use_128x128_superblock = seq->use_128x128_superblock,
172  .enable_filter_intra = seq->enable_filter_intra,
173  .enable_intra_edge_filter = seq->enable_intra_edge_filter,
174  .enable_interintra_compound = seq->enable_interintra_compound,
175  .enable_masked_compound = seq->enable_masked_compound,
176  .enable_dual_filter = seq->enable_dual_filter,
177  .enable_order_hint = seq->enable_order_hint,
178  .enable_jnt_comp = seq->enable_jnt_comp,
179  .enable_cdef = seq->enable_cdef,
180  .mono_chrome = seq->color_config.mono_chrome,
181  .color_range = seq->color_config.color_range,
182  .subsampling_x = seq->color_config.subsampling_x,
183  .subsampling_y = seq->color_config.subsampling_y,
184  .chroma_sample_position = seq->color_config.chroma_sample_position,
185  .film_grain_params_present = seq->film_grain_params_present &&
186  !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN),
187  },
188  .seg_info.segment_info_fields.bits = {
189  .enabled = frame_header->segmentation_enabled,
190  .update_map = frame_header->segmentation_update_map,
191  .temporal_update = frame_header->segmentation_temporal_update,
192  .update_data = frame_header->segmentation_update_data,
193  },
194  .film_grain_info = {
195  .film_grain_info_fields.bits = {
196  .apply_grain = apply_grain,
197  .chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma,
198  .grain_scaling_minus_8 = film_grain->grain_scaling_minus_8,
199  .ar_coeff_lag = film_grain->ar_coeff_lag,
200  .ar_coeff_shift_minus_6 = film_grain->ar_coeff_shift_minus_6,
201  .grain_scale_shift = film_grain->grain_scale_shift,
202  .overlap_flag = film_grain->overlap_flag,
203  .clip_to_restricted_range = film_grain->clip_to_restricted_range,
204  },
205  .grain_seed = film_grain->grain_seed,
206  .num_y_points = film_grain->num_y_points,
207  .num_cb_points = film_grain->num_cb_points,
208  .num_cr_points = film_grain->num_cr_points,
209  .cb_mult = film_grain->cb_mult,
210  .cb_luma_mult = film_grain->cb_luma_mult,
211  .cb_offset = film_grain->cb_offset,
212  .cr_mult = film_grain->cr_mult,
213  .cr_luma_mult = film_grain->cr_luma_mult,
214  .cr_offset = film_grain->cr_offset,
215  },
216  .pic_info_fields.bits = {
217  .frame_type = frame_header->frame_type,
218  .show_frame = frame_header->show_frame,
219  .showable_frame = frame_header->showable_frame,
220  .error_resilient_mode = frame_header->error_resilient_mode,
221  .disable_cdf_update = frame_header->disable_cdf_update,
222  .allow_screen_content_tools = frame_header->allow_screen_content_tools,
223  .force_integer_mv = frame_header->force_integer_mv,
224  .allow_intrabc = frame_header->allow_intrabc,
225  .use_superres = frame_header->use_superres,
226  .allow_high_precision_mv = frame_header->allow_high_precision_mv,
227  .is_motion_mode_switchable = frame_header->is_motion_mode_switchable,
228  .use_ref_frame_mvs = frame_header->use_ref_frame_mvs,
229  .disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf,
230  .uniform_tile_spacing_flag = frame_header->uniform_tile_spacing_flag,
231  .allow_warped_motion = frame_header->allow_warped_motion,
232  },
233  .loop_filter_info_fields.bits = {
234  .sharpness_level = frame_header->loop_filter_sharpness,
235  .mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled,
236  .mode_ref_delta_update = frame_header->loop_filter_delta_update,
237  },
238  .mode_control_fields.bits = {
239  .delta_q_present_flag = frame_header->delta_q_present,
240  .log2_delta_q_res = frame_header->delta_q_res,
241  .delta_lf_present_flag = frame_header->delta_lf_present,
242  .log2_delta_lf_res = frame_header->delta_lf_res,
243  .delta_lf_multi = frame_header->delta_lf_multi,
244  .tx_mode = frame_header->tx_mode,
245  .reference_select = frame_header->reference_select,
246  .reduced_tx_set_used = frame_header->reduced_tx_set,
247  .skip_mode_present = frame_header->skip_mode_present,
248  },
249  .loop_restoration_fields.bits = {
250  .yframe_restoration_type = remap_lr_type[frame_header->lr_type[0]],
251  .cbframe_restoration_type = remap_lr_type[frame_header->lr_type[1]],
252  .crframe_restoration_type = remap_lr_type[frame_header->lr_type[2]],
253  .lr_unit_shift = frame_header->lr_unit_shift,
254  .lr_uv_shift = frame_header->lr_uv_shift,
255  },
256  .qmatrix_fields.bits = {
257  .using_qmatrix = frame_header->using_qmatrix,
258  .qm_y = frame_header->qm_y,
259  .qm_u = frame_header->qm_u,
260  .qm_v = frame_header->qm_v,
261  }
262  };
263 
264  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
265  if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY && frame_header->show_frame)
266  pic_param.ref_frame_map[i] = VA_INVALID_ID;
267  else
268  pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
269  ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
271  }
272  for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
273  pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i];
274  }
275  for (int i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
276  pic_param.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
277  }
278  for (int i = 0; i < 2; i++) {
279  pic_param.mode_deltas[i] = frame_header->loop_filter_mode_deltas[i];
280  }
281  for (int i = 0; i < (1 << frame_header->cdef_bits); i++) {
282  pic_param.cdef_y_strengths[i] =
283  (frame_header->cdef_y_pri_strength[i] << 2) +
284  frame_header->cdef_y_sec_strength[i];
285  pic_param.cdef_uv_strengths[i] =
286  (frame_header->cdef_uv_pri_strength[i] << 2) +
287  frame_header->cdef_uv_sec_strength[i];
288  }
289  for (int i = 0; i < frame_header->tile_cols; i++) {
290  pic_param.width_in_sbs_minus_1[i] =
291  frame_header->width_in_sbs_minus_1[i];
292  }
293  for (int i = 0; i < frame_header->tile_rows; i++) {
294  pic_param.height_in_sbs_minus_1[i] =
295  frame_header->height_in_sbs_minus_1[i];
296  }
297  for (int i = AV1_REF_FRAME_LAST; i <= AV1_REF_FRAME_ALTREF; i++) {
298  pic_param.wm[i - 1].invalid = s->cur_frame.gm_invalid[i];
299  pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i];
300  for (int j = 0; j < 6; j++)
301  pic_param.wm[i - 1].wmmat[j] = s->cur_frame.gm_params[i][j];
302  }
303  for (int i = 0; i < AV1_MAX_SEGMENTS; i++) {
304  for (int j = 0; j < AV1_SEG_LVL_MAX; j++) {
305  pic_param.seg_info.feature_mask[i] |= (frame_header->feature_enabled[i][j] << j);
306  if (segmentation_feature_signed[j])
307  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
308  -segmentation_feature_max[j], segmentation_feature_max[j]);
309  else
310  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
311  0, segmentation_feature_max[j]);
312  }
313  }
314  if (apply_grain) {
315  for (int i = 0; i < film_grain->num_y_points; i++) {
316  pic_param.film_grain_info.point_y_value[i] =
317  film_grain->point_y_value[i];
318  pic_param.film_grain_info.point_y_scaling[i] =
319  film_grain->point_y_scaling[i];
320  }
321  for (int i = 0; i < film_grain->num_cb_points; i++) {
322  pic_param.film_grain_info.point_cb_value[i] =
323  film_grain->point_cb_value[i];
324  pic_param.film_grain_info.point_cb_scaling[i] =
325  film_grain->point_cb_scaling[i];
326  }
327  for (int i = 0; i < film_grain->num_cr_points; i++) {
328  pic_param.film_grain_info.point_cr_value[i] =
329  film_grain->point_cr_value[i];
330  pic_param.film_grain_info.point_cr_scaling[i] =
331  film_grain->point_cr_scaling[i];
332  }
333  for (int i = 0; i < 24; i++) {
334  pic_param.film_grain_info.ar_coeffs_y[i] =
335  film_grain->ar_coeffs_y_plus_128[i] - 128;
336  }
337  for (int i = 0; i < 25; i++) {
338  pic_param.film_grain_info.ar_coeffs_cb[i] =
339  film_grain->ar_coeffs_cb_plus_128[i] - 128;
340  pic_param.film_grain_info.ar_coeffs_cr[i] =
341  film_grain->ar_coeffs_cr_plus_128[i] - 128;
342  }
343  }
344  err = ff_vaapi_decode_make_param_buffer(avctx, pic,
345  VAPictureParameterBufferType,
346  &pic_param, sizeof(pic_param));
347  if (err < 0)
348  goto fail;
349 
350  return 0;
351 
352 fail:
353  ff_vaapi_decode_cancel(avctx, pic);
354  return err;
355 }
356 
358 {
359  const AV1DecContext *s = avctx->priv_data;
360  const AV1RawFrameHeader *header = s->raw_frame_header;
361  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
362  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
364 
365  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
366  int ret;
367  ret = ff_vaapi_decode_issue(avctx, pic);
368  if (ret < 0)
369  return ret;
370 
371  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
372  if (header->refresh_frame_flags & (1 << i)) {
373  if (ctx->ref_tab[i].frame->buf[0])
374  av_frame_unref(ctx->ref_tab[i].frame);
375 
376  if (apply_grain) {
377  ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
378  if (ret < 0)
379  return ret;
380  ctx->ref_tab[i].valid = 1;
381  } else {
382  ctx->ref_tab[i].valid = 0;
383  }
384  }
385  }
386 
387  return 0;
388 }
389 
391  const uint8_t *buffer,
392  uint32_t size)
393 {
394  const AV1DecContext *s = avctx->priv_data;
395  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
396  VASliceParameterBufferAV1 slice_param;
397  int err = 0;
398 
399  for (int i = s->tg_start; i <= s->tg_end; i++) {
400  memset(&slice_param, 0, sizeof(VASliceParameterBufferAV1));
401 
402  slice_param = (VASliceParameterBufferAV1) {
403  .slice_data_size = s->tile_group_info[i].tile_size,
404  .slice_data_offset = s->tile_group_info[i].tile_offset,
405  .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
406  .tile_row = s->tile_group_info[i].tile_row,
407  .tile_column = s->tile_group_info[i].tile_column,
408  .tg_start = s->tg_start,
409  .tg_end = s->tg_end,
410  };
411 
412  err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &slice_param,
413  sizeof(VASliceParameterBufferAV1),
414  buffer,
415  size);
416  if (err) {
417  ff_vaapi_decode_cancel(avctx, pic);
418  return err;
419  }
420  }
421 
422  return 0;
423 }
424 
426  .p.name = "av1_vaapi",
427  .p.type = AVMEDIA_TYPE_VIDEO,
428  .p.id = AV_CODEC_ID_AV1,
429  .p.pix_fmt = AV_PIX_FMT_VAAPI,
430  .start_frame = vaapi_av1_start_frame,
431  .end_frame = vaapi_av1_end_frame,
432  .decode_slice = vaapi_av1_decode_slice,
433  .frame_priv_data_size = sizeof(VAAPIDecodePicture),
436  .frame_params = ff_vaapi_common_frame_params,
437  .priv_data_size = sizeof(VAAPIAV1DecContext),
438  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
439 };
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_vaapi_get_surface_id
static VASurfaceID ff_vaapi_get_surface_id(AVFrame *pic)
Definition: vaapi_decode.h:30
VAAPIAV1FrameRef::frame
AVFrame * frame
Definition: vaapi_av1.c:29
AV1RawSequenceHeader
Definition: cbs_av1.h:73
VAAPIDecodeContext
Definition: vaapi_decode.h:50
AV1_SUPERRES_NUM
@ AV1_SUPERRES_NUM
Definition: av1.h:100
vaapi_decode.h
AV1_REF_FRAME_ALTREF
@ AV1_REF_FRAME_ALTREF
Definition: av1.h:68
AV1RawFilmGrainParams::apply_grain
uint8_t apply_grain
Definition: cbs_av1.h:134
av_unused
#define av_unused
Definition: attributes.h:131
AV1_RESTORE_SWITCHABLE
@ AV1_RESTORE_SWITCHABLE
Definition: av1.h:175
FFHWAccel::p
AVHWAccel p
The public AVHWAccel.
Definition: hwaccel_internal.h:38
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
VAAPIDecodePicture
Definition: vaapi_decode.h:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
ff_vaapi_decode_make_slice_buffer
int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, const void *params_data, size_t params_size, const void *slice_data, size_t slice_size)
Definition: vaapi_decode.c:62
internal.h
vaapi_av1_end_frame
static int vaapi_av1_end_frame(AVCodecContext *avctx)
Definition: vaapi_av1.c:357
VAAPIAV1DecContext::ref_tab
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES]
For film grain case, VAAPI generate 2 output for each frame, current_frame will not apply film grain,...
Definition: vaapi_av1.c:43
VAAPIAV1DecContext::base
VAAPIDecodeContext base
Definition: vaapi_av1.c:34
bit_depth
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
Definition: af_astats.c:245
thread.h
AV1RawSequenceHeader::seq_profile
uint8_t seq_profile
Definition: cbs_av1.h:74
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
vaapi_av1_decode_init
static int vaapi_av1_decode_init(AVCodecContext *avctx)
Definition: vaapi_av1.c:73
ff_vaapi_decode_make_param_buffer
int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, int type, const void *data, size_t size)
Definition: vaapi_decode.c:33
FFHWAccel
Definition: hwaccel_internal.h:34
AV1_MAX_SEGMENTS
@ AV1_MAX_SEGMENTS
Definition: av1.h:88
fail
#define fail()
Definition: checkasm.h:138
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV1Frame
Definition: av1dec.h:34
VAAPIAV1FrameRef::valid
int valid
Definition: vaapi_av1.c:30
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AV1_REF_FRAME_LAST
@ AV1_REF_FRAME_LAST
Definition: av1.h:62
vaapi_av1_decode_slice
static int vaapi_av1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vaapi_av1.c:390
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_vaapi_decode_init
int ff_vaapi_decode_init(AVCodecContext *avctx)
Definition: vaapi_decode.c:663
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:421
ff_vaapi_common_frame_params
int ff_vaapi_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vaapi_decode.c:639
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
AV1_NUM_REF_FRAMES
@ AV1_NUM_REF_FRAMES
Definition: av1.h:83
ctx
AVFormatContext * ctx
Definition: movenc.c:48
VAAPIAV1DecContext
Definition: vaapi_av1.c:33
ff_vaapi_decode_uninit
int ff_vaapi_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_decode.c:709
av1dec.h
vaapi_av1_get_bit_depth_idx
static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx)
Definition: vaapi_av1.c:55
ff_vaapi_decode_issue
int ff_vaapi_decode_issue(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:154
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Header providing the internals of AVHWAccel.
Definition: hwaccel_internal.h:31
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:283
hwaccel_internal.h
AV1RawFrameHeader
Definition: cbs_av1.h:165
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:476
vaapi_av1_decode_uninit
static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_av1.c:91
AV1_RESTORE_NONE
@ AV1_RESTORE_NONE
Definition: av1.h:172
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:124
size
int size
Definition: twinvq_data.h:10344
AV1DecContext
Definition: av1dec.h:63
AV1_FRAME_KEY
@ AV1_FRAME_KEY
Definition: av1.h:53
ff_vaapi_decode_cancel
int ff_vaapi_decode_cancel(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:228
frame.h
header
static const uint8_t header[24]
Definition: sdr2.c:67
AV1Frame::f
AVFrame * f
Definition: av1dec.h:35
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2135
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
vaapi_av1_surface_id
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
Definition: vaapi_av1.c:47
AV1_SUPERRES_DENOM_MIN
@ AV1_SUPERRES_DENOM_MIN
Definition: av1.h:101
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
AV1RawSequenceHeader::color_config
AV1RawColorConfig color_config
Definition: cbs_av1.h:128
AV1_RESTORE_WIENER
@ AV1_RESTORE_WIENER
Definition: av1.h:173
AV1_RESTORE_SGRPROJ
@ AV1_RESTORE_SGRPROJ
Definition: av1.h:174
AV1_TOTAL_REFS_PER_FRAME
@ AV1_TOTAL_REFS_PER_FRAME
Definition: av1.h:85
AV1_REFS_PER_FRAME
@ AV1_REFS_PER_FRAME
Definition: av1.h:84
vaapi_av1_start_frame
static int vaapi_av1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vaapi_av1.c:104
ret
ret
Definition: filter_design.txt:187
AV1RawColorConfig::high_bitdepth
uint8_t high_bitdepth
Definition: cbs_av1.h:42
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AV1_SEG_LVL_MAX
@ AV1_SEG_LVL_MAX
Definition: av1.h:89
frame_header
Definition: truemotion1.c:88
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV1_MAX_LOOP_FILTER
@ AV1_MAX_LOOP_FILTER
Definition: av1.h:123
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2057
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV1RawColorConfig::twelve_bit
uint8_t twelve_bit
Definition: cbs_av1.h:43
VAAPIAV1DecContext::tmp_frame
AVFrame * tmp_frame
Definition: vaapi_av1.c:44
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
AV1RawFilmGrainParams
Definition: cbs_av1.h:133
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:285
ff_av1_vaapi_hwaccel
const FFHWAccel ff_av1_vaapi_hwaccel
Definition: vaapi_av1.c:425
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:416
VAAPIAV1FrameRef
Definition: vaapi_av1.c:28