FFmpeg
vaapi_h264.c
Go to the documentation of this file.
1 /*
2  * H.264 HW decode acceleration through VA API
3  *
4  * Copyright (C) 2008-2009 Splitted-Desktop Systems
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "h264dec.h"
24 #include "h264_ps.h"
25 #include "hwaccel.h"
26 #include "vaapi_decode.h"
27 
28 /**
29  * @file
30  * This file implements the glue code between FFmpeg's and VA API's
31  * structures for H.264 decoding.
32  */
33 
34 /**
35  * Initialize an empty VA API picture.
36  *
37  * VA API requires a fixed-size reference picture array.
38  */
39 static void init_vaapi_pic(VAPictureH264 *va_pic)
40 {
41  va_pic->picture_id = VA_INVALID_ID;
42  va_pic->flags = VA_PICTURE_H264_INVALID;
43  va_pic->TopFieldOrderCnt = 0;
44  va_pic->BottomFieldOrderCnt = 0;
45 }
46 
47 /**
48  * Translate an FFmpeg Picture into its VA API form.
49  *
50  * @param[out] va_pic A pointer to VA API's own picture struct
51  * @param[in] pic A pointer to the FFmpeg picture struct to convert
52  * @param[in] pic_structure The picture field type (as defined in mpegvideo.h),
53  * supersedes pic's field type if nonzero.
54  */
55 static void fill_vaapi_pic(VAPictureH264 *va_pic,
56  const H264Picture *pic,
57  int pic_structure)
58 {
59  if (pic_structure == 0)
60  pic_structure = pic->reference;
61  pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */
62 
63  va_pic->picture_id = ff_vaapi_get_surface_id(pic->f);
64  va_pic->frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
65 
66  va_pic->flags = 0;
67  if (pic_structure != PICT_FRAME)
68  va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ? VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD;
69  if (pic->reference)
70  va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
71 
72  va_pic->TopFieldOrderCnt = 0;
73  if (pic->field_poc[0] != INT_MAX)
74  va_pic->TopFieldOrderCnt = pic->field_poc[0];
75 
76  va_pic->BottomFieldOrderCnt = 0;
77  if (pic->field_poc[1] != INT_MAX)
78  va_pic->BottomFieldOrderCnt = pic->field_poc[1];
79 }
80 
81 /** Decoded Picture Buffer (DPB). */
82 typedef struct DPB {
83  int size; ///< Current number of reference frames in the DPB
84  int max_size; ///< Max number of reference frames. This is FF_ARRAY_ELEMS(VAPictureParameterBufferH264.ReferenceFrames)
85  VAPictureH264 *va_pics; ///< Pointer to VAPictureParameterBufferH264.ReferenceFrames array
86 } DPB;
87 
88 /**
89  * Append picture to the decoded picture buffer, in a VA API form that
90  * merges the second field picture attributes with the first, if
91  * available. The decoded picture buffer's size must be large enough
92  * to receive the new VA API picture object.
93  */
94 static int dpb_add(DPB *dpb, const H264Picture *pic)
95 {
96  int i;
97 
98  if (dpb->size >= dpb->max_size)
99  return -1;
100 
101  for (i = 0; i < dpb->size; i++) {
102  VAPictureH264 * const va_pic = &dpb->va_pics[i];
103  if (va_pic->picture_id == ff_vaapi_get_surface_id(pic->f)) {
104  VAPictureH264 temp_va_pic;
105  fill_vaapi_pic(&temp_va_pic, pic, 0);
106 
107  if ((temp_va_pic.flags ^ va_pic->flags) & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) {
108  va_pic->flags |= temp_va_pic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD);
109  /* Merge second field */
110  if (temp_va_pic.flags & VA_PICTURE_H264_TOP_FIELD) {
111  va_pic->TopFieldOrderCnt = temp_va_pic.TopFieldOrderCnt;
112  } else {
113  va_pic->BottomFieldOrderCnt = temp_va_pic.BottomFieldOrderCnt;
114  }
115  }
116  return 0;
117  }
118  }
119 
120  fill_vaapi_pic(&dpb->va_pics[dpb->size++], pic, 0);
121  return 0;
122 }
123 
124 /** Fill in VA API reference frames array. */
125 static int fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param,
126  const H264Context *h)
127 {
128  DPB dpb;
129  int i;
130 
131  dpb.size = 0;
132  dpb.max_size = FF_ARRAY_ELEMS(pic_param->ReferenceFrames);
133  dpb.va_pics = pic_param->ReferenceFrames;
134  for (i = 0; i < dpb.max_size; i++)
135  init_vaapi_pic(&dpb.va_pics[i]);
136 
137  for (i = 0; i < h->short_ref_count; i++) {
138  const H264Picture *pic = h->short_ref[i];
139  if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
140  return -1;
141  }
142 
143  for (i = 0; i < 16; i++) {
144  const H264Picture *pic = h->long_ref[i];
145  if (pic && pic->reference && dpb_add(&dpb, pic) < 0)
146  return -1;
147  }
148  return 0;
149 }
150 
151 /**
152  * Fill in VA API reference picture lists from the FFmpeg reference
153  * picture list.
154  *
155  * @param[out] RefPicList VA API internal reference picture list
156  * @param[in] ref_list A pointer to the FFmpeg reference list
157  * @param[in] ref_count The number of reference pictures in ref_list
158  */
159 static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32],
160  const H264Ref *ref_list,
161  unsigned int ref_count)
162 {
163  unsigned int i, n = 0;
164  for (i = 0; i < ref_count; i++)
165  if (ref_list[i].reference)
166  fill_vaapi_pic(&RefPicList[n++], ref_list[i].parent,
167  ref_list[i].reference);
168 
169  for (; n < 32; n++)
171 }
172 
173 /**
174  * Fill in prediction weight table.
175  *
176  * VA API requires a plain prediction weight table as it does not infer
177  * any value.
178  *
179  * @param[in] h A pointer to the current H.264 context
180  * @param[in] list The reference frame list index to use
181  * @param[out] luma_weight_flag VA API plain luma weight flag
182  * @param[out] luma_weight VA API plain luma weight table
183  * @param[out] luma_offset VA API plain luma offset table
184  * @param[out] chroma_weight_flag VA API plain chroma weight flag
185  * @param[out] chroma_weight VA API plain chroma weight table
186  * @param[out] chroma_offset VA API plain chroma offset table
187  */
189  int list,
190  unsigned char *luma_weight_flag,
191  short luma_weight[32],
192  short luma_offset[32],
193  unsigned char *chroma_weight_flag,
194  short chroma_weight[32][2],
195  short chroma_offset[32][2])
196 {
197  const H264SliceContext *sl = &h->slice_ctx[0];
198  unsigned int i, j;
199 
200  *luma_weight_flag = sl->pwt.luma_weight_flag[list];
201  *chroma_weight_flag = sl->pwt.chroma_weight_flag[list];
202 
203  for (i = 0; i < sl->ref_count[list]; i++) {
204  /* VA API also wants the inferred (default) values, not
205  only what is available in the bitstream (7.4.3.2). */
206  if (sl->pwt.luma_weight_flag[list]) {
207  luma_weight[i] = sl->pwt.luma_weight[i][list][0];
208  luma_offset[i] = sl->pwt.luma_weight[i][list][1];
209  } else {
210  luma_weight[i] = 1 << sl->pwt.luma_log2_weight_denom;
211  luma_offset[i] = 0;
212  }
213  for (j = 0; j < 2; j++) {
214  if (sl->pwt.chroma_weight_flag[list]) {
215  chroma_weight[i][j] = sl->pwt.chroma_weight[i][list][j][0];
216  chroma_offset[i][j] = sl->pwt.chroma_weight[i][list][j][1];
217  } else {
218  chroma_weight[i][j] = 1 << sl->pwt.chroma_log2_weight_denom;
219  chroma_offset[i][j] = 0;
220  }
221  }
222  }
223 }
224 
225 /** Initialize and start decoding a frame with VA API. */
227  av_unused const uint8_t *buffer,
228  av_unused uint32_t size)
229 {
230  const H264Context *h = avctx->priv_data;
231  VAAPIDecodePicture *pic = h->cur_pic_ptr->hwaccel_picture_private;
232  const PPS *pps = h->ps.pps;
233  const SPS *sps = h->ps.sps;
234  VAPictureParameterBufferH264 pic_param;
235  VAIQMatrixBufferH264 iq_matrix;
236  int err;
237 
238  pic->output_surface = ff_vaapi_get_surface_id(h->cur_pic_ptr->f);
239 
240  pic_param = (VAPictureParameterBufferH264) {
241  .picture_width_in_mbs_minus1 = h->mb_width - 1,
242  .picture_height_in_mbs_minus1 = h->mb_height - 1,
243  .bit_depth_luma_minus8 = sps->bit_depth_luma - 8,
244  .bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8,
245  .num_ref_frames = sps->ref_frame_count,
246  .seq_fields.bits = {
247  .chroma_format_idc = sps->chroma_format_idc,
248  .residual_colour_transform_flag = sps->residual_color_transform_flag,
249  .gaps_in_frame_num_value_allowed_flag = sps->gaps_in_frame_num_allowed_flag,
250  .frame_mbs_only_flag = sps->frame_mbs_only_flag,
251  .mb_adaptive_frame_field_flag = sps->mb_aff,
252  .direct_8x8_inference_flag = sps->direct_8x8_inference_flag,
253  .MinLumaBiPredSize8x8 = sps->level_idc >= 31, /* A.3.3.2 */
254  .log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4,
255  .pic_order_cnt_type = sps->poc_type,
256  .log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4,
257  .delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag,
258  },
259  .pic_init_qp_minus26 = pps->init_qp - 26,
260  .pic_init_qs_minus26 = pps->init_qs - 26,
261  .chroma_qp_index_offset = pps->chroma_qp_index_offset[0],
262  .second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1],
263  .pic_fields.bits = {
264  .entropy_coding_mode_flag = pps->cabac,
265  .weighted_pred_flag = pps->weighted_pred,
266  .weighted_bipred_idc = pps->weighted_bipred_idc,
267  .transform_8x8_mode_flag = pps->transform_8x8_mode,
268  .field_pic_flag = h->picture_structure != PICT_FRAME,
269  .constrained_intra_pred_flag = pps->constrained_intra_pred,
270  .pic_order_present_flag = pps->pic_order_present,
271  .deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present,
272  .redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present,
273  .reference_pic_flag = h->nal_ref_idc != 0,
274  },
275  .frame_num = h->poc.frame_num,
276  };
277 
278  fill_vaapi_pic(&pic_param.CurrPic, h->cur_pic_ptr, h->picture_structure);
279  err = fill_vaapi_ReferenceFrames(&pic_param, h);
280  if (err < 0)
281  goto fail;
282 
283  err = ff_vaapi_decode_make_param_buffer(avctx, pic,
284  VAPictureParameterBufferType,
285  &pic_param, sizeof(pic_param));
286  if (err < 0)
287  goto fail;
288 
289  memcpy(iq_matrix.ScalingList4x4,
290  pps->scaling_matrix4, sizeof(iq_matrix.ScalingList4x4));
291  memcpy(iq_matrix.ScalingList8x8[0],
292  pps->scaling_matrix8[0], sizeof(iq_matrix.ScalingList8x8[0]));
293  memcpy(iq_matrix.ScalingList8x8[1],
294  pps->scaling_matrix8[3], sizeof(iq_matrix.ScalingList8x8[0]));
295 
296  err = ff_vaapi_decode_make_param_buffer(avctx, pic,
297  VAIQMatrixBufferType,
298  &iq_matrix, sizeof(iq_matrix));
299  if (err < 0)
300  goto fail;
301 
302  return 0;
303 
304 fail:
305  ff_vaapi_decode_cancel(avctx, pic);
306  return err;
307 }
308 
309 /** End a hardware decoding based frame. */
311 {
312  const H264Context *h = avctx->priv_data;
313  VAAPIDecodePicture *pic = h->cur_pic_ptr->hwaccel_picture_private;
314  H264SliceContext *sl = &h->slice_ctx[0];
315  int ret;
316 
317  ret = ff_vaapi_decode_issue(avctx, pic);
318  if (ret < 0)
319  goto finish;
320 
321  ff_h264_draw_horiz_band(h, sl, 0, h->avctx->height);
322 
323 finish:
324  return ret;
325 }
326 
327 /** Decode the given H.264 slice with VA API. */
329  const uint8_t *buffer,
330  uint32_t size)
331 {
332  const H264Context *h = avctx->priv_data;
333  VAAPIDecodePicture *pic = h->cur_pic_ptr->hwaccel_picture_private;
334  const H264SliceContext *sl = &h->slice_ctx[0];
335  VASliceParameterBufferH264 slice_param;
336  int err;
337 
338  slice_param = (VASliceParameterBufferH264) {
339  .slice_data_size = size,
340  .slice_data_offset = 0,
341  .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
342  .slice_data_bit_offset = get_bits_count(&sl->gb),
343  .first_mb_in_slice = (sl->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + sl->mb_x,
344  .slice_type = ff_h264_get_slice_type(sl),
345  .direct_spatial_mv_pred_flag = sl->slice_type == AV_PICTURE_TYPE_B ? sl->direct_spatial_mv_pred : 0,
346  .num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0,
347  .num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0,
348  .cabac_init_idc = sl->cabac_init_idc,
349  .slice_qp_delta = sl->qscale - h->ps.pps->init_qp,
350  .disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter,
351  .slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2,
352  .slice_beta_offset_div2 = sl->slice_beta_offset / 2,
353  .luma_log2_weight_denom = sl->pwt.luma_log2_weight_denom,
354  .chroma_log2_weight_denom = sl->pwt.chroma_log2_weight_denom,
355  };
356 
357  fill_vaapi_RefPicList(slice_param.RefPicList0, sl->ref_list[0],
358  sl->list_count > 0 ? sl->ref_count[0] : 0);
359  fill_vaapi_RefPicList(slice_param.RefPicList1, sl->ref_list[1],
360  sl->list_count > 1 ? sl->ref_count[1] : 0);
361 
363  &slice_param.luma_weight_l0_flag,
364  slice_param.luma_weight_l0,
365  slice_param.luma_offset_l0,
366  &slice_param.chroma_weight_l0_flag,
367  slice_param.chroma_weight_l0,
368  slice_param.chroma_offset_l0);
370  &slice_param.luma_weight_l1_flag,
371  slice_param.luma_weight_l1,
372  slice_param.luma_offset_l1,
373  &slice_param.chroma_weight_l1_flag,
374  slice_param.chroma_weight_l1,
375  slice_param.chroma_offset_l1);
376 
377  err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
378  &slice_param, sizeof(slice_param),
379  buffer, size);
380  if (err) {
381  ff_vaapi_decode_cancel(avctx, pic);
382  return err;
383  }
384 
385  return 0;
386 }
387 
389  .name = "h264_vaapi",
390  .type = AVMEDIA_TYPE_VIDEO,
391  .id = AV_CODEC_ID_H264,
392  .pix_fmt = AV_PIX_FMT_VAAPI,
393  .start_frame = &vaapi_h264_start_frame,
394  .end_frame = &vaapi_h264_end_frame,
395  .decode_slice = &vaapi_h264_decode_slice,
396  .frame_priv_data_size = sizeof(VAAPIDecodePicture),
399  .frame_params = &ff_vaapi_common_frame_params,
400  .priv_data_size = sizeof(VAAPIDecodeContext),
401  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
402 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
ff_h264_vaapi_hwaccel
const AVHWAccel ff_h264_vaapi_hwaccel
Definition: vaapi_h264.c:388
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Definition: hwaccel.h:26
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
ff_vaapi_get_surface_id
static VASurfaceID ff_vaapi_get_surface_id(AVFrame *pic)
Definition: vaapi_decode.h:35
H264Picture::f
AVFrame * f
Definition: h264dec.h:129
n
int n
Definition: avisynth_c.h:760
VAAPIDecodeContext
Definition: vaapi_decode.h:55
H264Ref
Definition: h264dec.h:166
vaapi_decode.h
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
av_unused
#define av_unused
Definition: attributes.h:125
VAAPIDecodePicture
Definition: vaapi_decode.h:44
ff_vaapi_decode_make_slice_buffer
int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, const void *params_data, size_t params_size, const void *slice_data, size_t slice_size)
Definition: vaapi_decode.c:58
vaapi_h264_start_frame
static int vaapi_h264_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Initialize and start decoding a frame with VA API.
Definition: vaapi_h264.c:226
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:267
DPB
Decoded Picture Buffer (DPB).
Definition: vaapi_h264.c:82
RefPicList
Definition: hevcdec.h:231
fill_vaapi_RefPicList
static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32], const H264Ref *ref_list, unsigned int ref_count)
Fill in VA API reference picture lists from the FFmpeg reference picture list.
Definition: vaapi_h264.c:159
DPB::size
int size
Current number of reference frames in the DPB.
Definition: vaapi_h264.c:83
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:230
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:149
H264SliceContext
Definition: h264dec.h:177
vaapi_h264_decode_slice
static int vaapi_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Decode the given H.264 slice with VA API.
Definition: vaapi_h264.c:328
AVHWAccel
Definition: avcodec.h:3649
ff_vaapi_decode_make_param_buffer
int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, int type, const void *data, size_t size)
Definition: vaapi_decode.c:29
finish
static void finish(void)
Definition: movenc.c:345
fail
#define fail()
Definition: checkasm.h:120
DPB::max_size
int max_size
Max number of reference frames. This is FF_ARRAY_ELEMS(VAPictureParameterBufferH264....
Definition: vaapi_h264.c:84
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:193
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:33
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:251
VAAPIDecodePicture::output_surface
VASurfaceID output_surface
Definition: vaapi_decode.h:45
H264PredWeightTable::chroma_weight
int chroma_weight[48][2][2][2]
Definition: h264_parse.h:39
hwaccel.h
ff_vaapi_decode_init
int ff_vaapi_decode_init(AVCodecContext *avctx)
Definition: vaapi_decode.c:610
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:34
ff_vaapi_common_frame_params
int ff_vaapi_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vaapi_decode.c:586
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:36
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:194
ff_vaapi_decode_uninit
int ff_vaapi_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_decode.c:699
fill_vaapi_plain_pred_weight_table
static void fill_vaapi_plain_pred_weight_table(const H264Context *h, int list, unsigned char *luma_weight_flag, short luma_weight[32], short luma_offset[32], unsigned char *chroma_weight_flag, short chroma_weight[32][2], short chroma_offset[32][2])
Fill in prediction weight table.
Definition: vaapi_h264.c:188
H264PredWeightTable::luma_weight
int luma_weight[48][2][2]
Definition: h264_parse.h:38
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:183
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: avcodec.h:245
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:102
ff_vaapi_decode_issue
int ff_vaapi_decode_issue(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:150
SPS
Sequence parameter set.
Definition: h264_ps.h:44
PPS
Picture parameter set.
Definition: h264_ps.h:109
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
H264SliceContext::qscale
int qscale
Definition: h264dec.h:187
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:91
h264_ps.h
H264Picture::pic_id
int pic_id
pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num)
Definition: h264dec.h:152
H264Picture::reference
int reference
Definition: h264dec.h:160
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:197
size
int size
Definition: twinvq_data.h:11134
ff_vaapi_decode_cancel
int ff_vaapi_decode_cancel(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:224
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:230
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3655
h264dec.h
H264Context
H264Context.
Definition: h264dec.h:337
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
vaapi_h264_end_frame
static int vaapi_h264_end_frame(AVCodecContext *avctx)
End a hardware decoding based frame.
Definition: vaapi_h264.c:310
uint8_t
uint8_t
Definition: audio_convert.c:194
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:268
ret
ret
Definition: filter_design.txt:187
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:195
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:147
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
H264Picture
Definition: h264dec.h:128
fill_vaapi_pic
static void fill_vaapi_pic(VAPictureH264 *va_pic, const H264Picture *pic, int pic_structure)
Translate an FFmpeg Picture into its VA API form.
Definition: vaapi_h264.c:55
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:269
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
init_vaapi_pic
static void init_vaapi_pic(VAPictureH264 *va_pic)
Initialize an empty VA API picture.
Definition: vaapi_h264.c:39
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:179
DPB::va_pics
VAPictureH264 * va_pics
Pointer to VAPictureParameterBufferH264.ReferenceFrames array.
Definition: vaapi_h264.c:85
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2207
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
h
h
Definition: vp9dsp_template.c:2038
fill_vaapi_ReferenceFrames
static int fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param, const H264Context *h)
Fill in VA API reference frames array.
Definition: vaapi_h264.c:125
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:320
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:35
dpb_add
static int dpb_add(DPB *dpb, const H264Picture *pic)
Append picture to the decoded picture buffer, in a VA API form that merges the second field picture a...
Definition: vaapi_h264.c:94
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:154