FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vdpau.c
Go to the documentation of this file.
1 /*
2  * Video Decode and Presentation API for UNIX (VDPAU) is used for
3  * HW decode acceleration for MPEG-1/2, MPEG-4 ASP, H.264 and VC-1.
4  *
5  * Copyright (c) 2008 NVIDIA
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <limits.h>
25 #include "libavutil/avassert.h"
26 #include "avcodec.h"
27 #include "internal.h"
28 #include "h264.h"
29 #include "vc1.h"
30 
31 #undef NDEBUG
32 #include <assert.h>
33 
34 #include "vdpau.h"
35 #include "vdpau_compat.h"
36 #include "vdpau_internal.h"
37 
38 /**
39  * @addtogroup VDPAU_Decoding
40  *
41  * @{
42  */
43 
44 static int vdpau_error(VdpStatus status)
45 {
46  switch (status) {
47  case VDP_STATUS_OK:
48  return 0;
49  case VDP_STATUS_NO_IMPLEMENTATION:
50  return AVERROR(ENOSYS);
51  case VDP_STATUS_DISPLAY_PREEMPTED:
52  return AVERROR(EIO);
53  case VDP_STATUS_INVALID_HANDLE:
54  return AVERROR(EBADF);
55  case VDP_STATUS_INVALID_POINTER:
56  return AVERROR(EFAULT);
57  case VDP_STATUS_RESOURCES:
58  return AVERROR(ENOBUFS);
59  case VDP_STATUS_HANDLE_DEVICE_MISMATCH:
60  return AVERROR(EXDEV);
61  case VDP_STATUS_ERROR:
62  return AVERROR(EIO);
63  default:
64  return AVERROR(EINVAL);
65  }
66 }
67 
69 {
70  return av_vdpau_alloc_context();
71 }
72 
73 MAKE_ACCESSORS(AVVDPAUContext, vdpau_hwaccel, AVVDPAU_Render2, render2)
74 
76  VdpChromaType *type,
77  uint32_t *width, uint32_t *height)
78 {
79  VdpChromaType t;
80  uint32_t w = avctx->coded_width;
81  uint32_t h = avctx->coded_height;
82 
83  /* See <vdpau/vdpau.h> for per-type alignment constraints. */
84  switch (avctx->sw_pix_fmt) {
85  case AV_PIX_FMT_YUV420P:
87  t = VDP_CHROMA_TYPE_420;
88  w = (w + 1) & ~1;
89  h = (h + 3) & ~3;
90  break;
91  case AV_PIX_FMT_YUV422P:
93  t = VDP_CHROMA_TYPE_422;
94  w = (w + 1) & ~1;
95  h = (h + 1) & ~1;
96  break;
97  case AV_PIX_FMT_YUV444P:
99  t = VDP_CHROMA_TYPE_444;
100  h = (h + 1) & ~1;
101  break;
102  default:
103  return AVERROR(ENOSYS);
104  }
105 
106  if (type)
107  *type = t;
108  if (width)
109  *width = w;
110  if (height)
111  *height = h;
112  return 0;
113 }
114 
115 int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile,
116  int level)
117 {
118  VDPAUHWContext *hwctx = avctx->hwaccel_context;
119  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
120  VdpVideoSurfaceQueryCapabilities *surface_query_caps;
121  VdpDecoderQueryCapabilities *decoder_query_caps;
122  VdpDecoderCreate *create;
123  void *func;
124  VdpStatus status;
125  VdpBool supported;
126  uint32_t max_level, max_mb, max_width, max_height;
127  VdpChromaType type;
128  uint32_t width;
129  uint32_t height;
130 
131  vdctx->width = UINT32_MAX;
132  vdctx->height = UINT32_MAX;
133 
134  if (!hwctx) {
135  vdctx->device = VDP_INVALID_HANDLE;
136  av_log(avctx, AV_LOG_WARNING, "hwaccel_context has not been setup by the user application, cannot initialize\n");
137  return 0;
138  }
139 
140  if (hwctx->context.decoder != VDP_INVALID_HANDLE) {
141  vdctx->decoder = hwctx->context.decoder;
142  vdctx->render = hwctx->context.render;
143  vdctx->device = VDP_INVALID_HANDLE;
144  return 0; /* Decoder created by user */
145  }
146  hwctx->reset = 0;
147 
148  vdctx->device = hwctx->device;
149  vdctx->get_proc_address = hwctx->get_proc_address;
150 
151  if (hwctx->flags & AV_HWACCEL_FLAG_IGNORE_LEVEL)
152  level = 0;
153  else if (level < 0)
154  return AVERROR(ENOTSUP);
155 
156  if (av_vdpau_get_surface_parameters(avctx, &type, &width, &height))
157  return AVERROR(ENOSYS);
158 
159  if (!(hwctx->flags & AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH) &&
160  type != VDP_CHROMA_TYPE_420)
161  return AVERROR(ENOSYS);
162 
163  status = vdctx->get_proc_address(vdctx->device,
164  VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES,
165  &func);
166  if (status != VDP_STATUS_OK)
167  return vdpau_error(status);
168  else
169  surface_query_caps = func;
170 
171  status = surface_query_caps(vdctx->device, type, &supported,
172  &max_width, &max_height);
173  if (status != VDP_STATUS_OK)
174  return vdpau_error(status);
175  if (supported != VDP_TRUE ||
176  max_width < width || max_height < height)
177  return AVERROR(ENOTSUP);
178 
179  status = vdctx->get_proc_address(vdctx->device,
180  VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES,
181  &func);
182  if (status != VDP_STATUS_OK)
183  return vdpau_error(status);
184  else
185  decoder_query_caps = func;
186 
187  status = decoder_query_caps(vdctx->device, profile, &supported, &max_level,
188  &max_mb, &max_width, &max_height);
189 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
190  if (status != VDP_STATUS_OK && profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) {
191  /* Run-time backward compatibility for libvdpau 0.8 and earlier */
192  profile = VDP_DECODER_PROFILE_H264_MAIN;
193  status = decoder_query_caps(vdctx->device, profile, &supported,
194  &max_level, &max_mb,
195  &max_width, &max_height);
196  }
197 #endif
198  if (status != VDP_STATUS_OK)
199  return vdpau_error(status);
200 
201  if (supported != VDP_TRUE || max_level < level ||
202  max_width < width || max_height < height)
203  return AVERROR(ENOTSUP);
204 
205  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_CREATE,
206  &func);
207  if (status != VDP_STATUS_OK)
208  return vdpau_error(status);
209  else
210  create = func;
211 
212  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_RENDER,
213  &func);
214  if (status != VDP_STATUS_OK)
215  return vdpau_error(status);
216  else
217  vdctx->render = func;
218 
219  status = create(vdctx->device, profile, width, height, avctx->refs,
220  &vdctx->decoder);
221  if (status == VDP_STATUS_OK) {
222  vdctx->width = avctx->coded_width;
223  vdctx->height = avctx->coded_height;
224  }
225 
226  return vdpau_error(status);
227 }
228 
230 {
231  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
232  VdpDecoderDestroy *destroy;
233  void *func;
234  VdpStatus status;
235 
236  if (vdctx->device == VDP_INVALID_HANDLE)
237  return 0; /* Decoder created and destroyed by user */
238  if (vdctx->width == UINT32_MAX && vdctx->height == UINT32_MAX)
239  return 0;
240 
241  status = vdctx->get_proc_address(vdctx->device,
242  VDP_FUNC_ID_DECODER_DESTROY, &func);
243  if (status != VDP_STATUS_OK)
244  return vdpau_error(status);
245  else
246  destroy = func;
247 
248  status = destroy(vdctx->decoder);
249  return vdpau_error(status);
250 }
251 
253 {
254  VDPAUHWContext *hwctx = avctx->hwaccel_context;
255  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
256 
257  if (vdctx->device == VDP_INVALID_HANDLE)
258  return 0; /* Decoder created by user */
259  if (avctx->coded_width == vdctx->width &&
260  avctx->coded_height == vdctx->height && !hwctx->reset)
261  return 0;
262 
263  avctx->hwaccel->uninit(avctx);
264  return avctx->hwaccel->init(avctx);
265 }
266 
267 int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx,
268  av_unused const uint8_t *buffer,
269  av_unused uint32_t size)
270 {
271  pic_ctx->bitstream_buffers_allocated = 0;
272  pic_ctx->bitstream_buffers_used = 0;
273  pic_ctx->bitstream_buffers = NULL;
274  return 0;
275 }
276 
278  struct vdpau_picture_context *pic_ctx)
279 {
280  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
281  AVVDPAUContext *hwctx = avctx->hwaccel_context;
282  VdpVideoSurface surf = ff_vdpau_get_surface_id(frame);
283  VdpStatus status;
284  int val;
285 
286  val = ff_vdpau_common_reinit(avctx);
287  if (val < 0)
288  return val;
289 
290 #if FF_API_BUFS_VDPAU
292  av_assert0(sizeof(hwctx->info) <= sizeof(pic_ctx->info));
293  memcpy(&hwctx->info, &pic_ctx->info, sizeof(hwctx->info));
294  hwctx->bitstream_buffers = pic_ctx->bitstream_buffers;
295  hwctx->bitstream_buffers_used = pic_ctx->bitstream_buffers_used;
296  hwctx->bitstream_buffers_allocated = pic_ctx->bitstream_buffers_allocated;
298 #endif
299 
300  if (!hwctx->render && hwctx->render2) {
301  status = hwctx->render2(avctx, frame, (void *)&pic_ctx->info,
302  pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
303  } else
304  status = vdctx->render(vdctx->decoder, surf, (void *)&pic_ctx->info,
305  pic_ctx->bitstream_buffers_used,
306  pic_ctx->bitstream_buffers);
307 
308  av_freep(&pic_ctx->bitstream_buffers);
309 
310 #if FF_API_BUFS_VDPAU
312  hwctx->bitstream_buffers = NULL;
313  hwctx->bitstream_buffers_used = 0;
314  hwctx->bitstream_buffers_allocated = 0;
316 #endif
317 
318  return vdpau_error(status);
319 }
320 
321 #if CONFIG_H263_VDPAU_HWACCEL || CONFIG_MPEG1_VDPAU_HWACCEL || \
322  CONFIG_MPEG2_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL || \
323  CONFIG_VC1_VDPAU_HWACCEL || CONFIG_WMV3_VDPAU_HWACCEL
325 {
326  MpegEncContext *s = avctx->priv_data;
327  Picture *pic = s->current_picture_ptr;
328  struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
329  int val;
330 
331  val = ff_vdpau_common_end_frame(avctx, pic->f, pic_ctx);
332  if (val < 0)
333  return val;
334 
336  return 0;
337 }
338 #endif
339 
340 int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx,
341  const uint8_t *buf, uint32_t size)
342 {
343  VdpBitstreamBuffer *buffers = pic_ctx->bitstream_buffers;
344 
345  buffers = av_fast_realloc(buffers, &pic_ctx->bitstream_buffers_allocated,
346  (pic_ctx->bitstream_buffers_used + 1) * sizeof(*buffers));
347  if (!buffers)
348  return AVERROR(ENOMEM);
349 
350  pic_ctx->bitstream_buffers = buffers;
351  buffers += pic_ctx->bitstream_buffers_used++;
352 
353  buffers->struct_version = VDP_BITSTREAM_BUFFER_VERSION;
354  buffers->bitstream = buf;
355  buffers->bitstream_bytes = size;
356  return 0;
357 }
358 
359 /* Obsolete non-hwaccel VDPAU support below... */
360 
362 {
363  struct vdpau_render_state *render, *render_ref;
364  VdpReferenceFrameH264 *rf, *rf2;
365  H264Picture *pic;
366  int i, list, pic_frame_idx;
367 
368  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
369  assert(render);
370 
371  rf = &render->info.h264.referenceFrames[0];
372 #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
373 
374  for (list = 0; list < 2; ++list) {
375  H264Picture **lp = list ? h->long_ref : h->short_ref;
376  int ls = list ? 16 : h->short_ref_count;
377 
378  for (i = 0; i < ls; ++i) {
379  pic = lp[i];
380  if (!pic || !pic->reference)
381  continue;
382  pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
383 
384  render_ref = (struct vdpau_render_state *)pic->f->data[0];
385  assert(render_ref);
386 
387  rf2 = &render->info.h264.referenceFrames[0];
388  while (rf2 != rf) {
389  if (
390  (rf2->surface == render_ref->surface)
391  && (rf2->is_long_term == pic->long_ref)
392  && (rf2->frame_idx == pic_frame_idx)
393  )
394  break;
395  ++rf2;
396  }
397  if (rf2 != rf) {
398  rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
399  rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
400  continue;
401  }
402 
403  if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
404  continue;
405 
406  rf->surface = render_ref->surface;
407  rf->is_long_term = pic->long_ref;
408  rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
409  rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
410  rf->field_order_cnt[0] = pic->field_poc[0];
411  rf->field_order_cnt[1] = pic->field_poc[1];
412  rf->frame_idx = pic_frame_idx;
413 
414  ++rf;
415  }
416  }
417 
418  for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
419  rf->surface = VDP_INVALID_HANDLE;
420  rf->is_long_term = 0;
421  rf->top_is_reference = 0;
422  rf->bottom_is_reference = 0;
423  rf->field_order_cnt[0] = 0;
424  rf->field_order_cnt[1] = 0;
425  rf->frame_idx = 0;
426  }
427 }
428 
429 void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
430 {
431  struct vdpau_render_state *render = (struct vdpau_render_state*)data;
432  assert(render);
433 
435  render->bitstream_buffers,
437  sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
438  );
439 
440  render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
441  render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf;
442  render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
443  render->bitstream_buffers_used++;
444 }
445 
446 #if CONFIG_H264_VDPAU_DECODER
448 {
449  struct vdpau_render_state *render;
450  int i;
451 
452  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
453  assert(render);
454 
455  for (i = 0; i < 2; ++i) {
456  int foc = h->cur_pic_ptr->field_poc[i];
457  if (foc == INT_MAX)
458  foc = 0;
459  render->info.h264.field_order_cnt[i] = foc;
460  }
461 
462  render->info.h264.frame_num = h->frame_num;
463 }
464 
466 {
467  struct vdpau_render_state *render;
468 
469  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
470  assert(render);
471 
472  render->info.h264.slice_count = h->current_slice;
473  if (render->info.h264.slice_count < 1)
474  return;
475 
476  render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
477  render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME;
478  render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
479  render->info.h264.num_ref_frames = h->sps.ref_frame_count;
480  render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag;
481  render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred;
482  render->info.h264.weighted_pred_flag = h->pps.weighted_pred;
483  render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc;
484  render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag;
485  render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode;
486  render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0];
487  render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
488  render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26;
489  render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1;
490  render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1;
491  render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4;
492  render->info.h264.pic_order_cnt_type = h->sps.poc_type;
493  render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
494  render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
495  render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag;
496  render->info.h264.entropy_coding_mode_flag = h->pps.cabac;
497  render->info.h264.pic_order_present_flag = h->pps.pic_order_present;
498  render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
499  render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present;
500  memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
501  memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
502  memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
503 
504  ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height);
505  render->bitstream_buffers_used = 0;
506 }
507 #endif /* CONFIG_H264_VDPAU_DECODER */
508 
509 #if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER
511  int buf_size, int slice_count)
512 {
513  struct vdpau_render_state *render, *last, *next;
514  int i;
515 
516  if (!s->current_picture_ptr) return;
517 
518  render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
519  assert(render);
520 
521  /* fill VdpPictureInfoMPEG1Or2 struct */
522  render->info.mpeg.picture_structure = s->picture_structure;
523  render->info.mpeg.picture_coding_type = s->pict_type;
524  render->info.mpeg.intra_dc_precision = s->intra_dc_precision;
525  render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct;
526  render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
527  render->info.mpeg.intra_vlc_format = s->intra_vlc_format;
528  render->info.mpeg.alternate_scan = s->alternate_scan;
529  render->info.mpeg.q_scale_type = s->q_scale_type;
530  render->info.mpeg.top_field_first = s->top_field_first;
531  render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2
532  render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2
533  render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
534  render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1];
535  render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0];
536  render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1];
537  for (i = 0; i < 64; ++i) {
538  render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i];
539  render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
540  }
541 
542  render->info.mpeg.forward_reference = VDP_INVALID_HANDLE;
543  render->info.mpeg.backward_reference = VDP_INVALID_HANDLE;
544 
545  switch(s->pict_type){
546  case AV_PICTURE_TYPE_B:
547  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
548  assert(next);
549  render->info.mpeg.backward_reference = next->surface;
550  // no return here, going to set forward prediction
551  case AV_PICTURE_TYPE_P:
552  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
553  if (!last) // FIXME: Does this test make sense?
554  last = render; // predict second field from the first
555  render->info.mpeg.forward_reference = last->surface;
556  }
557 
558  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
559 
560  render->info.mpeg.slice_count = slice_count;
561 
562  if (slice_count)
564  render->bitstream_buffers_used = 0;
565 }
566 #endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */
567 
568 #if CONFIG_VC1_VDPAU_DECODER
570  int buf_size)
571 {
572  VC1Context *v = s->avctx->priv_data;
573  struct vdpau_render_state *render, *last, *next;
574 
575  render = (struct vdpau_render_state *)s->current_picture.f->data[0];
576  assert(render);
577 
578  /* fill LvPictureInfoVC1 struct */
579  render->info.vc1.frame_coding_mode = v->fcm ? v->fcm + 1 : 0;
580  render->info.vc1.postprocflag = v->postprocflag;
581  render->info.vc1.pulldown = v->broadcast;
582  render->info.vc1.interlace = v->interlace;
583  render->info.vc1.tfcntrflag = v->tfcntrflag;
584  render->info.vc1.finterpflag = v->finterpflag;
585  render->info.vc1.psf = v->psf;
586  render->info.vc1.dquant = v->dquant;
587  render->info.vc1.panscan_flag = v->panscanflag;
588  render->info.vc1.refdist_flag = v->refdist_flag;
589  render->info.vc1.quantizer = v->quantizer_mode;
590  render->info.vc1.extended_mv = v->extended_mv;
591  render->info.vc1.extended_dmv = v->extended_dmv;
592  render->info.vc1.overlap = v->overlap;
593  render->info.vc1.vstransform = v->vstransform;
594  render->info.vc1.loopfilter = v->s.loop_filter;
595  render->info.vc1.fastuvmc = v->fastuvmc;
596  render->info.vc1.range_mapy_flag = v->range_mapy_flag;
597  render->info.vc1.range_mapy = v->range_mapy;
598  render->info.vc1.range_mapuv_flag = v->range_mapuv_flag;
599  render->info.vc1.range_mapuv = v->range_mapuv;
600  /* Specific to simple/main profile only */
601  render->info.vc1.multires = v->multires;
602  render->info.vc1.syncmarker = v->resync_marker;
603  render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1);
604  render->info.vc1.maxbframes = v->s.max_b_frames;
605 
606  render->info.vc1.deblockEnable = v->postprocflag & 1;
607  render->info.vc1.pquant = v->pq;
608 
609  render->info.vc1.forward_reference = VDP_INVALID_HANDLE;
610  render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
611 
612  if (v->bi_type)
613  render->info.vc1.picture_type = 4;
614  else
615  render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
616 
617  switch(s->pict_type){
618  case AV_PICTURE_TYPE_B:
619  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
620  assert(next);
621  render->info.vc1.backward_reference = next->surface;
622  // no break here, going to set forward prediction
623  case AV_PICTURE_TYPE_P:
624  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
625  if (!last) // FIXME: Does this test make sense?
626  last = render; // predict second field from the first
627  render->info.vc1.forward_reference = last->surface;
628  }
629 
630  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
631 
632  render->info.vc1.slice_count = 1;
633 
635  render->bitstream_buffers_used = 0;
636 }
637 #endif /* (CONFIG_VC1_VDPAU_DECODER */
638 
639 #if CONFIG_MPEG4_VDPAU_DECODER
641  int buf_size)
642 {
643  MpegEncContext *s = &ctx->m;
644  struct vdpau_render_state *render, *last, *next;
645  int i;
646 
647  if (!s->current_picture_ptr) return;
648 
649  render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
650  assert(render);
651 
652  /* fill VdpPictureInfoMPEG4Part2 struct */
653  render->info.mpeg4.trd[0] = s->pp_time;
654  render->info.mpeg4.trb[0] = s->pb_time;
655  render->info.mpeg4.trd[1] = s->pp_field_time >> 1;
656  render->info.mpeg4.trb[1] = s->pb_field_time >> 1;
657  render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den;
658  render->info.mpeg4.vop_coding_type = 0;
659  render->info.mpeg4.vop_fcode_forward = s->f_code;
660  render->info.mpeg4.vop_fcode_backward = s->b_code;
661  render->info.mpeg4.resync_marker_disable = !ctx->resync_marker;
662  render->info.mpeg4.interlaced = !s->progressive_sequence;
663  render->info.mpeg4.quant_type = s->mpeg_quant;
664  render->info.mpeg4.quarter_sample = s->quarter_sample;
665  render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263;
666  render->info.mpeg4.rounding_control = s->no_rounding;
667  render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan;
668  render->info.mpeg4.top_field_first = s->top_field_first;
669  for (i = 0; i < 64; ++i) {
670  render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i];
671  render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
672  }
673  render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE;
674  render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE;
675 
676  switch (s->pict_type) {
677  case AV_PICTURE_TYPE_B:
678  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
679  assert(next);
680  render->info.mpeg4.backward_reference = next->surface;
681  render->info.mpeg4.vop_coding_type = 2;
682  // no break here, going to set forward prediction
683  case AV_PICTURE_TYPE_P:
684  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
685  assert(last);
686  render->info.mpeg4.forward_reference = last->surface;
687  }
688 
689  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
690 
692  render->bitstream_buffers_used = 0;
693 }
694 #endif /* CONFIG_MPEG4_VDPAU_DECODER */
695 
696 int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
697 {
698 #define PROFILE(prof) \
699 do { \
700  *profile = VDP_DECODER_PROFILE_##prof; \
701  return 0; \
702 } while (0)
703 
704  switch (avctx->codec_id) {
705  case AV_CODEC_ID_MPEG1VIDEO: PROFILE(MPEG1);
707  switch (avctx->profile) {
708  case FF_PROFILE_MPEG2_MAIN: PROFILE(MPEG2_MAIN);
709  case FF_PROFILE_MPEG2_SIMPLE: PROFILE(MPEG2_SIMPLE);
710  default: return AVERROR(EINVAL);
711  }
712  case AV_CODEC_ID_H263: PROFILE(MPEG4_PART2_ASP);
713  case AV_CODEC_ID_MPEG4:
714  switch (avctx->profile) {
715  case FF_PROFILE_MPEG4_SIMPLE: PROFILE(MPEG4_PART2_SP);
716  case FF_PROFILE_MPEG4_ADVANCED_SIMPLE: PROFILE(MPEG4_PART2_ASP);
717  default: return AVERROR(EINVAL);
718  }
719  case AV_CODEC_ID_H264:
720  switch (avctx->profile & ~FF_PROFILE_H264_INTRA) {
721  case FF_PROFILE_H264_BASELINE: PROFILE(H264_BASELINE);
723  case FF_PROFILE_H264_MAIN: PROFILE(H264_MAIN);
724  case FF_PROFILE_H264_HIGH: PROFILE(H264_HIGH);
725 #ifdef VDP_DECODER_PROFILE_H264_EXTENDED
726  case FF_PROFILE_H264_EXTENDED: PROFILE(H264_EXTENDED);
727 #endif
728  default: return AVERROR(EINVAL);
729  }
730  case AV_CODEC_ID_WMV3:
731  case AV_CODEC_ID_VC1:
732  switch (avctx->profile) {
733  case FF_PROFILE_VC1_SIMPLE: PROFILE(VC1_SIMPLE);
734  case FF_PROFILE_VC1_MAIN: PROFILE(VC1_MAIN);
735  case FF_PROFILE_VC1_ADVANCED: PROFILE(VC1_ADVANCED);
736  default: return AVERROR(EINVAL);
737  }
738  }
739  return AVERROR(EINVAL);
740 #undef PROFILE
741 }
742 
744 {
745  return av_mallocz(sizeof(AVVDPAUContext));
746 }
747 
748 int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
749  VdpGetProcAddress *get_proc, unsigned flags)
750 {
751  VDPAUHWContext *hwctx;
752 
754  return AVERROR(EINVAL);
755 
756  if (av_reallocp(&avctx->hwaccel_context, sizeof(*hwctx)))
757  return AVERROR(ENOMEM);
758 
759  hwctx = avctx->hwaccel_context;
760 
761  memset(hwctx, 0, sizeof(*hwctx));
762  hwctx->context.decoder = VDP_INVALID_HANDLE;
763  hwctx->device = device;
764  hwctx->get_proc_address = get_proc;
765  hwctx->flags = flags;
766  hwctx->reset = 1;
767  return 0;
768 }
769 
770 /* @}*/
static struct ResampleContext * create(struct ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format, enum SwrFilterType filter_type, int kaiser_beta, double precision, int cheby)
Definition: soxr_resample.c:32
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1261
const char const char void * val
Definition: avisynth_c.h:634
void ff_vdpau_h264_picture_complete(H264Context *h)
float v
int long_ref
1->long term reference 0->short term reference
Definition: h264.h:335
const char * s
Definition: avisynth_c.h:631
The VC1 Context.
Definition: vc1.h:173
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3430
VdpDevice device
Definition: ffmpeg_vdpau.c:38
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1445
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:68
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int weighted_bipred_idc
Definition: h264.h:245
int chroma_qp_index_offset[2]
Definition: h264.h:248
int resync_marker
could this stream contain resync markers
Definition: mpeg4video.h:82
VdpDecoder decoder
Definition: ffmpeg_vdpau.c:39
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vdpau.c:267
int extended_mv
Ext MV in P/B (not in Simple)
Definition: vc1.h:223
#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE
Definition: avcodec.h:2927
int broadcast
TFF/RFF present.
Definition: vc1.h:200
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:243
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:302
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264.c:98
int frame_mbs_only_flag
Definition: h264.h:191
VdpPictureInfoMPEG1Or2 mpeg
Definition: vdpau.h:63
attribute_deprecated VdpBitstreamBuffer * bitstream_buffers
Table of bitstream buffers.
Definition: vdpau.h:137
H264Context.
Definition: h264.h:517
AVFrame * f
Definition: h264.h:310
Public libavcodec VDPAU header.
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:222
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:2895
H264Picture * long_ref[32]
Definition: h264.h:669
int profile
profile
Definition: avcodec.h:2861
int picture_structure
Definition: h264.h:590
AVVDPAUContext * av_vdpau_alloc_context(void)
Allocate an AVVDPAUContext.
Definition: vdpau.c:743
AVVDPAUContext * av_alloc_vdpaucontext(void)
allocation function for AVVDPAUContext
Definition: vdpau.c:68
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1380
int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, uint32_t *width, uint32_t *height)
Gets the parameters to create an adequate VDPAU video surface for the codec context using VDPAU hardw...
Definition: vdpau.c:75
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2670
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2637
uint8_t scaling_matrix4[6][16]
Definition: h264.h:253
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:249
int bi_type
Definition: vc1.h:381
if()
Definition: avfilter.c:975
int ff_vdpau_common_uninit(AVCodecContext *avctx)
Definition: vdpau.c:229
uint8_t
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2682
int panscanflag
NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present.
Definition: vc1.h:203
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:2896
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:201
#define FF_PROFILE_VC1_ADVANCED
Definition: avcodec.h:2910
void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *s, const uint8_t *buf, int buf_size)
int cabac
entropy_coding_mode_flag
Definition: h264.h:239
#define FF_PROFILE_MPEG2_MAIN
Definition: avcodec.h:2887
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:288
int full_pel[2]
Definition: mpegvideo.h:478
VdpGetProcAddress * get_proc_address
Definition: ffmpeg_vdpau.c:40
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:186
int intra_dc_precision
Definition: mpegvideo.h:460
static AVFrame * frame
attribute_deprecated int bitstream_buffers_used
Useful bitstream buffers in the bitstream buffers table.
Definition: vdpau.h:128
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
int av_reallocp(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:185
int refdist_flag
REFDIST syntax element present in II, IP, PI or PP field picture headers.
Definition: vc1.h:204
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:76
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:251
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:392
ptrdiff_t size
Definition: opengl_enc.c:101
#define av_log(a,...)
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, int buf_size, int slice_count)
int psf
Progressive Segmented Frame.
Definition: vc1.h:211
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
Get a decoder profile that should be used for initializing a VDPAU decoder.
Definition: vdpau.c:696
MpegEncContext m
Definition: mpeg4video.h:66
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:650
attribute_deprecated union AVVDPAUPictureInfo info
VDPAU picture information.
Definition: vdpau.h:112
enum AVCodecID id
Definition: avcodec.h:3220
#define AV_HWACCEL_FLAG_IGNORE_LEVEL
Hardware acceleration should be used for decoding even if the codec level used is unknown or higher t...
Definition: avcodec.h:3452
#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH
Hardware acceleration can output YUV pixel formats with a different chroma sampling than 4:2:0 and/or...
Definition: avcodec.h:3458
#define PROFILE(prof)
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:192
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
Definition: vdpau.c:115
VdpBitstreamBuffer * bitstream_buffers
The user is responsible for freeing this buffer using av_freep().
Definition: vdpau.h:244
int overlap
overlapped transforms in use
Definition: vc1.h:226
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given block if it is not large enough, otherwise do nothing.
Definition: mem.c:478
This structure is used to share data between the libavcodec library and the client video application...
Definition: vdpau.h:90
int poc_type
pic_order_cnt_type
Definition: h264.h:181
int profile
Definition: mxfenc.c:1804
int constrained_intra_pred
constrained_intra_pred_flag
Definition: h264.h:250
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:86
#define AVERROR(e)
Definition: error.h:43
#define FF_PROFILE_MPEG2_SIMPLE
Definition: avcodec.h:2888
PPS pps
current pps
Definition: h264.h:577
simple assert() macros that are a bit more flexible than ISO C assert().
int weighted_pred
weighted_pred_flag
Definition: h264.h:244
#define PICT_TOP_FIELD
Definition: mpegutils.h:33
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:401
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264.h:330
int resync_marker
could this stream contain resync markers
Definition: vc1.h:396
Libavcodec external API header.
#define FF_PROFILE_VC1_MAIN
Definition: avcodec.h:2908
int postprocflag
Per-frame processing suggestion flag present.
Definition: vc1.h:199
int delta_pic_order_always_zero_flag
Definition: h264.h:183
attribute_deprecated int bitstream_buffers_allocated
Allocated size of the bitstream_buffers table.
Definition: vdpau.h:120
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:67
uint8_t scaling_matrix8[6][64]
Definition: h264.h:254
int refs
number of reference frames
Definition: avcodec.h:1926
int intra_vlc_format
Definition: mpegvideo.h:465
void ff_vdpau_h264_picture_start(H264Context *h)
union AVVDPAUPictureInfo info
picture parameter information for all supported codecs
Definition: vdpau.h:248
int ref_frame_count
num_ref_frames
Definition: h264.h:187
int top_field_first
Definition: mpegvideo.h:462
#define FF_PROFILE_MPEG4_SIMPLE
Definition: avcodec.h:2912
int reference
Definition: h264.h:341
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:75
int tfcntrflag
TFCNTR present.
Definition: vc1.h:202
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:190
Picture.
Definition: mpegpicture.h:45
int alternate_scan
Definition: mpegvideo.h:466
void * hwaccel_picture_private
Hardware accelerator private data.
Definition: mpegpicture.h:77
SPS sps
current sps
Definition: h264.h:576
static int vdpau_error(VdpStatus status)
Definition: vdpau.c:44
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:246
H264SliceContext * slice_ctx
Definition: h264.h:531
int direct_8x8_inference_flag
Definition: h264.h:193
uint8_t range_mapuv_flag
Definition: vc1.h:329
int mpeg_f_code[2][2]
Definition: mpegvideo.h:455
VdpPictureInfoMPEG4Part2 mpeg4
Definition: vdpau.h:65
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
int pic_order_present
pic_order_present_flag
Definition: h264.h:240
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
Definition: vc1.h:189
int frame_pred_frame_dct
Definition: mpegvideo.h:461
static void destroy(struct ResampleContext **c)
Definition: soxr_resample.c:64
int finterpflag
INTERPFRM present.
Definition: vc1.h:228
uint16_t inter_matrix[64]
Definition: mpegvideo.h:306
AVCodecContext * avctx
Definition: h264.h:519
int concealment_motion_vectors
Definition: mpegvideo.h:463
enum AVCodecID codec_id
Definition: avcodec.h:1269
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
Definition: vdpau.c:429
H264Picture * short_ref[32]
Definition: h264.h:668
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
int multires
frame-level RESPIC syntax element present
Definition: vc1.h:186
int field_poc[2]
top/bottom POC
Definition: h264.h:328
main external API structure.
Definition: avcodec.h:1252
int bitstream_buffers_used
Definition: vdpau.h:242
uint8_t range_mapy
Definition: vc1.h:330
int extended_dmv
Additional extended dmv range at P/B frame-level.
Definition: vc1.h:205
void * buf
Definition: avisynth_c.h:553
GLint GLenum type
Definition: opengl_enc.c:105
int progressive_sequence
Definition: mpegvideo.h:454
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
int bitstream_buffers_allocated
Describe size/location of the compressed video data.
Definition: vdpau.h:241
void ff_vdpau_h264_set_reference_frames(H264Context *h)
Definition: vdpau.c:361
int coded_height
Definition: avcodec.h:1445
struct AVFrame * f
Definition: mpegpicture.h:46
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:67
H264Picture * cur_pic_ptr
Definition: h264.h:527
VdpDecoderRender * render
VDPAU decoder render callback.
Definition: vdpau.h:103
int quantizer_mode
2bits, quantizer mode used for sequence, see QUANT_*
Definition: vc1.h:227
int f_code
forward MV resolution
Definition: mpegvideo.h:244
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:182
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:2891
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:121
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:218
int(* AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, const VdpPictureInfo *, uint32_t, const VdpBitstreamBuffer *)
Definition: vdpau.h:72
int vstransform
variable-size [48]x[48] transform type + info
Definition: vc1.h:225
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:252
static int flags
Definition: cpu.c:47
uint8_t range_mapuv
Definition: vc1.h:331
#define FF_PROFILE_VC1_SIMPLE
Definition: avcodec.h:2907
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:395
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, VdpGetProcAddress *get_proc, unsigned flags)
Associate a VDPAU device with a codec context for hardware acceleration.
Definition: vdpau.c:748
uint8_t level
Definition: svq3.c:150
MpegEncContext s
Definition: vc1.h:174
MpegEncContext.
Definition: mpegvideo.h:87
struct AVCodecContext * avctx
Definition: mpegvideo.h:104
uint16_t pp_field_time
Definition: mpegvideo.h:394
uint8_t pq
Definition: vc1.h:238
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:2897
int pic_id
pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) ...
Definition: h264.h:333
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
AVVDPAU_Render2 render2
Definition: vdpau.h:139
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:79
common internal api header.
VdpPictureInfoH264 h264
Definition: vdpau.h:62
This structure is used as a callback between the FFmpeg decoder (vd_) and presentation (vo_) module...
Definition: vdpau.h:229
GLuint * buffers
Definition: opengl_enc.c:99
VdpPictureInfoVC1 vc1
Definition: vdpau.h:64
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
Definition: vdpau.c:340
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:308
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:180
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3438
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:165
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:168
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:77
int den
denominator
Definition: rational.h:45
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Definition: vdpau.c:277
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:2894
void * priv_data
Definition: avcodec.h:1294
#define PICT_FRAME
Definition: mpegutils.h:35
int picture_structure
Definition: mpegvideo.h:458
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1302
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:2893
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:174
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264.h:692
uint8_t range_mapy_flag
Definition: vc1.h:328
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:304
#define av_freep(p)
int dquant
How qscale varies with MBs, 2bits (not in Simple)
Definition: vc1.h:224
#define H264_RF_COUNT
int b_code
backward MV resolution for B Frames (mpeg4)
Definition: mpegvideo.h:245
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size)
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
static int ff_vdpau_common_reinit(AVCodecContext *avctx)
Definition: vdpau.c:252
VdpVideoSurface surface
Used as rendered surface, never changed.
Definition: vdpau.h:230
Predicted.
Definition: avutil.h:275
GLuint buffer
Definition: opengl_enc.c:102
#define av_unused
Definition: attributes.h:118
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:393
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
int short_ref_count
number of actual short term references
Definition: h264.h:683
static int width