FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vdpau.c
Go to the documentation of this file.
1 /*
2  * Video Decode and Presentation API for UNIX (VDPAU) is used for
3  * HW decode acceleration for MPEG-1/2, MPEG-4 ASP, H.264 and VC-1.
4  *
5  * Copyright (c) 2008 NVIDIA
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <limits.h>
25 
26 #include "avcodec.h"
27 #include "internal.h"
28 #include "h264dec.h"
29 #include "vc1.h"
30 #include "vdpau.h"
31 #include "vdpau_compat.h"
32 #include "vdpau_internal.h"
33 
34 // XXX: at the time of adding this ifdefery, av_assert* wasn't use outside.
35 // When dropping it, make sure other av_assert* were not added since then.
36 #if FF_API_BUFS_VDPAU
37 #include "libavutil/avassert.h"
38 #endif
39 
40 #if FF_API_VDPAU
41 #undef NDEBUG
42 #include <assert.h>
43 #endif
44 
45 /**
46  * @addtogroup VDPAU_Decoding
47  *
48  * @{
49  */
50 
51 static int vdpau_error(VdpStatus status)
52 {
53  switch (status) {
54  case VDP_STATUS_OK:
55  return 0;
56  case VDP_STATUS_NO_IMPLEMENTATION:
57  return AVERROR(ENOSYS);
58  case VDP_STATUS_DISPLAY_PREEMPTED:
59  return AVERROR(EIO);
60  case VDP_STATUS_INVALID_HANDLE:
61  return AVERROR(EBADF);
62  case VDP_STATUS_INVALID_POINTER:
63  return AVERROR(EFAULT);
64  case VDP_STATUS_RESOURCES:
65  return AVERROR(ENOBUFS);
66  case VDP_STATUS_HANDLE_DEVICE_MISMATCH:
67  return AVERROR(EXDEV);
68  case VDP_STATUS_ERROR:
69  return AVERROR(EIO);
70  default:
71  return AVERROR(EINVAL);
72  }
73 }
74 
76 {
77  return av_vdpau_alloc_context();
78 }
79 
80 MAKE_ACCESSORS(AVVDPAUContext, vdpau_hwaccel, AVVDPAU_Render2, render2)
81 
83  VdpChromaType *type,
84  uint32_t *width, uint32_t *height)
85 {
86  VdpChromaType t;
87  uint32_t w = avctx->coded_width;
88  uint32_t h = avctx->coded_height;
89 
90  /* See <vdpau/vdpau.h> for per-type alignment constraints. */
91  switch (avctx->sw_pix_fmt) {
92  case AV_PIX_FMT_YUV420P:
94  t = VDP_CHROMA_TYPE_420;
95  w = (w + 1) & ~1;
96  h = (h + 3) & ~3;
97  break;
98  case AV_PIX_FMT_YUV422P:
100  t = VDP_CHROMA_TYPE_422;
101  w = (w + 1) & ~1;
102  h = (h + 1) & ~1;
103  break;
104  case AV_PIX_FMT_YUV444P:
105  case AV_PIX_FMT_YUVJ444P:
106  t = VDP_CHROMA_TYPE_444;
107  h = (h + 1) & ~1;
108  break;
109  default:
110  return AVERROR(ENOSYS);
111  }
112 
113  if (type)
114  *type = t;
115  if (width)
116  *width = w;
117  if (height)
118  *height = h;
119  return 0;
120 }
121 
122 int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile,
123  int level)
124 {
125  VDPAUHWContext *hwctx = avctx->hwaccel_context;
126  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
127  VdpVideoSurfaceQueryCapabilities *surface_query_caps;
128  VdpDecoderQueryCapabilities *decoder_query_caps;
129  VdpDecoderCreate *create;
130  void *func;
131  VdpStatus status;
132  VdpBool supported;
133  uint32_t max_level, max_mb, max_width, max_height;
134  VdpChromaType type;
135  uint32_t width;
136  uint32_t height;
137 
138  vdctx->width = UINT32_MAX;
139  vdctx->height = UINT32_MAX;
140 
141  if (av_vdpau_get_surface_parameters(avctx, &type, &width, &height))
142  return AVERROR(ENOSYS);
143 
144  if (hwctx) {
145  hwctx->reset = 0;
146 
147  if (hwctx->context.decoder != VDP_INVALID_HANDLE) {
148  vdctx->decoder = hwctx->context.decoder;
149  vdctx->render = hwctx->context.render;
150  vdctx->device = VDP_INVALID_HANDLE;
151  return 0; /* Decoder created by user */
152  }
153 
154  vdctx->device = hwctx->device;
155  vdctx->get_proc_address = hwctx->get_proc_address;
156 
157  if (hwctx->flags & AV_HWACCEL_FLAG_IGNORE_LEVEL)
158  level = 0;
159 
160  if (!(hwctx->flags & AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH) &&
161  type != VDP_CHROMA_TYPE_420)
162  return AVERROR(ENOSYS);
163  } else {
164  AVHWFramesContext *frames_ctx = NULL;
165  AVVDPAUDeviceContext *dev_ctx;
166 
167  // We assume the hw_frames_ctx always survives until ff_vdpau_common_uninit
168  // is called. This holds true as the user is not allowed to touch
169  // hw_device_ctx, or hw_frames_ctx after get_format (and ff_get_format
170  // itself also uninits before unreffing hw_frames_ctx).
171  if (avctx->hw_frames_ctx) {
172  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
173  } else if (avctx->hw_device_ctx) {
174  int ret;
175 
177  if (!avctx->hw_frames_ctx)
178  return AVERROR(ENOMEM);
179 
180  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
181  frames_ctx->format = AV_PIX_FMT_VDPAU;
182  frames_ctx->sw_format = avctx->sw_pix_fmt;
183  frames_ctx->width = avctx->coded_width;
184  frames_ctx->height = avctx->coded_height;
185 
186  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
187  if (ret < 0) {
189  return ret;
190  }
191  }
192 
193  if (!frames_ctx) {
194  av_log(avctx, AV_LOG_ERROR, "A hardware frames context is "
195  "required for VDPAU decoding.\n");
196  return AVERROR(EINVAL);
197  }
198 
199  dev_ctx = frames_ctx->device_ctx->hwctx;
200 
201  vdctx->device = dev_ctx->device;
202  vdctx->get_proc_address = dev_ctx->get_proc_address;
203 
205  level = 0;
206  }
207 
208  if (level < 0)
209  return AVERROR(ENOTSUP);
210 
211  status = vdctx->get_proc_address(vdctx->device,
212  VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES,
213  &func);
214  if (status != VDP_STATUS_OK)
215  return vdpau_error(status);
216  else
217  surface_query_caps = func;
218 
219  status = surface_query_caps(vdctx->device, type, &supported,
220  &max_width, &max_height);
221  if (status != VDP_STATUS_OK)
222  return vdpau_error(status);
223  if (supported != VDP_TRUE ||
224  max_width < width || max_height < height)
225  return AVERROR(ENOTSUP);
226 
227  status = vdctx->get_proc_address(vdctx->device,
228  VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES,
229  &func);
230  if (status != VDP_STATUS_OK)
231  return vdpau_error(status);
232  else
233  decoder_query_caps = func;
234 
235  status = decoder_query_caps(vdctx->device, profile, &supported, &max_level,
236  &max_mb, &max_width, &max_height);
237 #ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
238  if ((status != VDP_STATUS_OK || supported != VDP_TRUE) && profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) {
239  profile = VDP_DECODER_PROFILE_H264_MAIN;
240  status = decoder_query_caps(vdctx->device, profile, &supported,
241  &max_level, &max_mb,
242  &max_width, &max_height);
243  }
244 #endif
245  if (status != VDP_STATUS_OK)
246  return vdpau_error(status);
247 
248  if (supported != VDP_TRUE || max_level < level ||
249  max_width < width || max_height < height)
250  return AVERROR(ENOTSUP);
251 
252  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_CREATE,
253  &func);
254  if (status != VDP_STATUS_OK)
255  return vdpau_error(status);
256  else
257  create = func;
258 
259  status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_RENDER,
260  &func);
261  if (status != VDP_STATUS_OK)
262  return vdpau_error(status);
263  else
264  vdctx->render = func;
265 
266  status = create(vdctx->device, profile, width, height, avctx->refs,
267  &vdctx->decoder);
268  if (status == VDP_STATUS_OK) {
269  vdctx->width = avctx->coded_width;
270  vdctx->height = avctx->coded_height;
271  }
272 
273  return vdpau_error(status);
274 }
275 
277 {
278  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
279  VdpDecoderDestroy *destroy;
280  void *func;
281  VdpStatus status;
282 
283  if (vdctx->device == VDP_INVALID_HANDLE)
284  return 0; /* Decoder created and destroyed by user */
285  if (vdctx->width == UINT32_MAX && vdctx->height == UINT32_MAX)
286  return 0;
287 
288  status = vdctx->get_proc_address(vdctx->device,
289  VDP_FUNC_ID_DECODER_DESTROY, &func);
290  if (status != VDP_STATUS_OK)
291  return vdpau_error(status);
292  else
293  destroy = func;
294 
295  status = destroy(vdctx->decoder);
296  return vdpau_error(status);
297 }
298 
300 {
301  VDPAUHWContext *hwctx = avctx->hwaccel_context;
302  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
303 
304  if (vdctx->device == VDP_INVALID_HANDLE)
305  return 0; /* Decoder created by user */
306  if (avctx->coded_width == vdctx->width &&
307  avctx->coded_height == vdctx->height && (!hwctx || !hwctx->reset))
308  return 0;
309 
310  avctx->hwaccel->uninit(avctx);
311  return avctx->hwaccel->init(avctx);
312 }
313 
315  av_unused const uint8_t *buffer,
316  av_unused uint32_t size)
317 {
318  pic_ctx->bitstream_buffers_allocated = 0;
319  pic_ctx->bitstream_buffers_used = 0;
320  pic_ctx->bitstream_buffers = NULL;
321  return 0;
322 }
323 
325  struct vdpau_picture_context *pic_ctx)
326 {
327  VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
328  AVVDPAUContext *hwctx = avctx->hwaccel_context;
329  VdpVideoSurface surf = ff_vdpau_get_surface_id(frame);
330  VdpStatus status;
331  int val;
332 
333  val = ff_vdpau_common_reinit(avctx);
334  if (val < 0)
335  return val;
336 
337 #if FF_API_BUFS_VDPAU
339  if (hwctx) {
340  av_assert0(sizeof(hwctx->info) <= sizeof(pic_ctx->info));
341  memcpy(&hwctx->info, &pic_ctx->info, sizeof(hwctx->info));
342  hwctx->bitstream_buffers = pic_ctx->bitstream_buffers;
345  }
347 #endif
348 
349  if (hwctx && !hwctx->render && hwctx->render2) {
350  status = hwctx->render2(avctx, frame, (void *)&pic_ctx->info,
351  pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
352  } else
353  status = vdctx->render(vdctx->decoder, surf, &pic_ctx->info,
354  pic_ctx->bitstream_buffers_used,
355  pic_ctx->bitstream_buffers);
356 
357  av_freep(&pic_ctx->bitstream_buffers);
358 
359 #if FF_API_BUFS_VDPAU
361  if (hwctx) {
362  hwctx->bitstream_buffers = NULL;
363  hwctx->bitstream_buffers_used = 0;
364  hwctx->bitstream_buffers_allocated = 0;
365  }
367 #endif
368 
369  return vdpau_error(status);
370 }
371 
372 #if CONFIG_MPEG1_VDPAU_HWACCEL || \
373  CONFIG_MPEG2_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL || \
374  CONFIG_VC1_VDPAU_HWACCEL || CONFIG_WMV3_VDPAU_HWACCEL
376 {
377  MpegEncContext *s = avctx->priv_data;
378  Picture *pic = s->current_picture_ptr;
379  struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
380  int val;
381 
382  val = ff_vdpau_common_end_frame(avctx, pic->f, pic_ctx);
383  if (val < 0)
384  return val;
385 
387  return 0;
388 }
389 #endif
390 
392  const uint8_t *buf, uint32_t size)
393 {
394  VdpBitstreamBuffer *buffers = pic_ctx->bitstream_buffers;
395 
396  buffers = av_fast_realloc(buffers, &pic_ctx->bitstream_buffers_allocated,
397  (pic_ctx->bitstream_buffers_used + 1) * sizeof(*buffers));
398  if (!buffers)
399  return AVERROR(ENOMEM);
400 
401  pic_ctx->bitstream_buffers = buffers;
402  buffers += pic_ctx->bitstream_buffers_used++;
403 
404  buffers->struct_version = VDP_BITSTREAM_BUFFER_VERSION;
405  buffers->bitstream = buf;
406  buffers->bitstream_bytes = size;
407  return 0;
408 }
409 
410 /* Obsolete non-hwaccel VDPAU support below... */
411 
412 #if FF_API_VDPAU
413 void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
414 {
415  struct vdpau_render_state *render = (struct vdpau_render_state*)data;
416  assert(render);
417 
419  render->bitstream_buffers,
421  sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
422  );
423 
424  render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
425  render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf;
426  render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
427  render->bitstream_buffers_used++;
428 }
429 
430 #if CONFIG_H264_VDPAU_DECODER
432 {
433  struct vdpau_render_state *render, *render_ref;
434  VdpReferenceFrameH264 *rf, *rf2;
435  H264Picture *pic;
436  int i, list, pic_frame_idx;
437 
438  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
439  assert(render);
440 
441  rf = &render->info.h264.referenceFrames[0];
442 #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
443 
444  for (list = 0; list < 2; ++list) {
445  H264Picture **lp = list ? h->long_ref : h->short_ref;
446  int ls = list ? 16 : h->short_ref_count;
447 
448  for (i = 0; i < ls; ++i) {
449  pic = lp[i];
450  if (!pic || !pic->reference)
451  continue;
452  pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
453 
454  render_ref = (struct vdpau_render_state *)pic->f->data[0];
455  assert(render_ref);
456 
457  rf2 = &render->info.h264.referenceFrames[0];
458  while (rf2 != rf) {
459  if (
460  (rf2->surface == render_ref->surface)
461  && (rf2->is_long_term == pic->long_ref)
462  && (rf2->frame_idx == pic_frame_idx)
463  )
464  break;
465  ++rf2;
466  }
467  if (rf2 != rf) {
468  rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
469  rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
470  continue;
471  }
472 
473  if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
474  continue;
475 
476  rf->surface = render_ref->surface;
477  rf->is_long_term = pic->long_ref;
478  rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
479  rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
480  rf->field_order_cnt[0] = pic->field_poc[0];
481  rf->field_order_cnt[1] = pic->field_poc[1];
482  rf->frame_idx = pic_frame_idx;
483 
484  ++rf;
485  }
486  }
487 
488  for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
489  rf->surface = VDP_INVALID_HANDLE;
490  rf->is_long_term = 0;
491  rf->top_is_reference = 0;
492  rf->bottom_is_reference = 0;
493  rf->field_order_cnt[0] = 0;
494  rf->field_order_cnt[1] = 0;
495  rf->frame_idx = 0;
496  }
497 }
498 
500 {
501  struct vdpau_render_state *render;
502  int i;
503 
504  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
505  assert(render);
506 
507  for (i = 0; i < 2; ++i) {
508  int foc = h->cur_pic_ptr->field_poc[i];
509  if (foc == INT_MAX)
510  foc = 0;
511  render->info.h264.field_order_cnt[i] = foc;
512  }
513 
514  render->info.h264.frame_num = h->poc.frame_num;
515 }
516 
518 {
519  struct vdpau_render_state *render;
520 
521  render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
522  assert(render);
523 
524  render->info.h264.slice_count = h->current_slice;
525  if (render->info.h264.slice_count < 1)
526  return;
527 
528  render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
529  render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME;
530  render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
531  render->info.h264.num_ref_frames = h->ps.sps->ref_frame_count;
532  render->info.h264.mb_adaptive_frame_field_flag = h->ps.sps->mb_aff && !render->info.h264.field_pic_flag;
533  render->info.h264.constrained_intra_pred_flag = h->ps.pps->constrained_intra_pred;
534  render->info.h264.weighted_pred_flag = h->ps.pps->weighted_pred;
535  render->info.h264.weighted_bipred_idc = h->ps.pps->weighted_bipred_idc;
536  render->info.h264.frame_mbs_only_flag = h->ps.sps->frame_mbs_only_flag;
537  render->info.h264.transform_8x8_mode_flag = h->ps.pps->transform_8x8_mode;
538  render->info.h264.chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[0];
539  render->info.h264.second_chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[1];
540  render->info.h264.pic_init_qp_minus26 = h->ps.pps->init_qp - 26;
541  render->info.h264.num_ref_idx_l0_active_minus1 = h->ps.pps->ref_count[0] - 1;
542  render->info.h264.num_ref_idx_l1_active_minus1 = h->ps.pps->ref_count[1] - 1;
543  render->info.h264.log2_max_frame_num_minus4 = h->ps.sps->log2_max_frame_num - 4;
544  render->info.h264.pic_order_cnt_type = h->ps.sps->poc_type;
545  render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->ps.sps->poc_type ? 0 : h->ps.sps->log2_max_poc_lsb - 4;
546  render->info.h264.delta_pic_order_always_zero_flag = h->ps.sps->delta_pic_order_always_zero_flag;
547  render->info.h264.direct_8x8_inference_flag = h->ps.sps->direct_8x8_inference_flag;
548  render->info.h264.entropy_coding_mode_flag = h->ps.pps->cabac;
549  render->info.h264.pic_order_present_flag = h->ps.pps->pic_order_present;
550  render->info.h264.deblocking_filter_control_present_flag = h->ps.pps->deblocking_filter_parameters_present;
551  render->info.h264.redundant_pic_cnt_present_flag = h->ps.pps->redundant_pic_cnt_present;
552  memcpy(render->info.h264.scaling_lists_4x4, h->ps.pps->scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
553  memcpy(render->info.h264.scaling_lists_8x8[0], h->ps.pps->scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
554  memcpy(render->info.h264.scaling_lists_8x8[1], h->ps.pps->scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
555 
556  ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height);
557  render->bitstream_buffers_used = 0;
558 }
559 #endif /* CONFIG_H264_VDPAU_DECODER */
560 
561 #if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER
563  int buf_size, int slice_count)
564 {
565  struct vdpau_render_state *render, *last, *next;
566  int i;
567 
568  if (!s->current_picture_ptr) return;
569 
570  render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
571  assert(render);
572 
573  /* fill VdpPictureInfoMPEG1Or2 struct */
574  render->info.mpeg.picture_structure = s->picture_structure;
575  render->info.mpeg.picture_coding_type = s->pict_type;
576  render->info.mpeg.intra_dc_precision = s->intra_dc_precision;
577  render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct;
578  render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
579  render->info.mpeg.intra_vlc_format = s->intra_vlc_format;
580  render->info.mpeg.alternate_scan = s->alternate_scan;
581  render->info.mpeg.q_scale_type = s->q_scale_type;
582  render->info.mpeg.top_field_first = s->top_field_first;
583  render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2
584  render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2
585  render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
586  render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1];
587  render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0];
588  render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1];
589  for (i = 0; i < 64; ++i) {
590  render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i];
591  render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
592  }
593 
594  render->info.mpeg.forward_reference = VDP_INVALID_HANDLE;
595  render->info.mpeg.backward_reference = VDP_INVALID_HANDLE;
596 
597  switch(s->pict_type){
598  case AV_PICTURE_TYPE_B:
599  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
600  assert(next);
601  render->info.mpeg.backward_reference = next->surface;
602  // no return here, going to set forward prediction
603  case AV_PICTURE_TYPE_P:
604  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
605  if (!last) // FIXME: Does this test make sense?
606  last = render; // predict second field from the first
607  render->info.mpeg.forward_reference = last->surface;
608  }
609 
610  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
611 
612  render->info.mpeg.slice_count = slice_count;
613 
614  if (slice_count)
616  render->bitstream_buffers_used = 0;
617 }
618 #endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */
619 
620 #if CONFIG_VC1_VDPAU_DECODER
622  int buf_size)
623 {
624  VC1Context *v = s->avctx->priv_data;
625  struct vdpau_render_state *render, *last, *next;
626 
627  render = (struct vdpau_render_state *)s->current_picture.f->data[0];
628  assert(render);
629 
630  /* fill LvPictureInfoVC1 struct */
631  render->info.vc1.frame_coding_mode = v->fcm ? v->fcm + 1 : 0;
632  render->info.vc1.postprocflag = v->postprocflag;
633  render->info.vc1.pulldown = v->broadcast;
634  render->info.vc1.interlace = v->interlace;
635  render->info.vc1.tfcntrflag = v->tfcntrflag;
636  render->info.vc1.finterpflag = v->finterpflag;
637  render->info.vc1.psf = v->psf;
638  render->info.vc1.dquant = v->dquant;
639  render->info.vc1.panscan_flag = v->panscanflag;
640  render->info.vc1.refdist_flag = v->refdist_flag;
641  render->info.vc1.quantizer = v->quantizer_mode;
642  render->info.vc1.extended_mv = v->extended_mv;
643  render->info.vc1.extended_dmv = v->extended_dmv;
644  render->info.vc1.overlap = v->overlap;
645  render->info.vc1.vstransform = v->vstransform;
646  render->info.vc1.loopfilter = v->s.loop_filter;
647  render->info.vc1.fastuvmc = v->fastuvmc;
648  render->info.vc1.range_mapy_flag = v->range_mapy_flag;
649  render->info.vc1.range_mapy = v->range_mapy;
650  render->info.vc1.range_mapuv_flag = v->range_mapuv_flag;
651  render->info.vc1.range_mapuv = v->range_mapuv;
652  /* Specific to simple/main profile only */
653  render->info.vc1.multires = v->multires;
654  render->info.vc1.syncmarker = v->resync_marker;
655  render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1);
656  render->info.vc1.maxbframes = v->s.max_b_frames;
657 
658  render->info.vc1.deblockEnable = v->postprocflag & 1;
659  render->info.vc1.pquant = v->pq;
660 
661  render->info.vc1.forward_reference = VDP_INVALID_HANDLE;
662  render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
663 
664  if (v->bi_type)
665  render->info.vc1.picture_type = 4;
666  else
667  render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
668 
669  switch(s->pict_type){
670  case AV_PICTURE_TYPE_B:
671  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
672  assert(next);
673  render->info.vc1.backward_reference = next->surface;
674  // no break here, going to set forward prediction
675  case AV_PICTURE_TYPE_P:
676  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
677  if (!last) // FIXME: Does this test make sense?
678  last = render; // predict second field from the first
679  render->info.vc1.forward_reference = last->surface;
680  }
681 
682  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
683 
684  render->info.vc1.slice_count = 1;
685 
687  render->bitstream_buffers_used = 0;
688 }
689 #endif /* (CONFIG_VC1_VDPAU_DECODER */
690 
691 #if CONFIG_MPEG4_VDPAU_DECODER
693  int buf_size)
694 {
695  MpegEncContext *s = &ctx->m;
696  struct vdpau_render_state *render, *last, *next;
697  int i;
698 
699  if (!s->current_picture_ptr) return;
700 
701  render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
702  assert(render);
703 
704  /* fill VdpPictureInfoMPEG4Part2 struct */
705  render->info.mpeg4.trd[0] = s->pp_time;
706  render->info.mpeg4.trb[0] = s->pb_time;
707  render->info.mpeg4.trd[1] = s->pp_field_time >> 1;
708  render->info.mpeg4.trb[1] = s->pb_field_time >> 1;
709  render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den;
710  render->info.mpeg4.vop_coding_type = 0;
711  render->info.mpeg4.vop_fcode_forward = s->f_code;
712  render->info.mpeg4.vop_fcode_backward = s->b_code;
713  render->info.mpeg4.resync_marker_disable = !ctx->resync_marker;
714  render->info.mpeg4.interlaced = !s->progressive_sequence;
715  render->info.mpeg4.quant_type = s->mpeg_quant;
716  render->info.mpeg4.quarter_sample = s->quarter_sample;
717  render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263;
718  render->info.mpeg4.rounding_control = s->no_rounding;
719  render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan;
720  render->info.mpeg4.top_field_first = s->top_field_first;
721  for (i = 0; i < 64; ++i) {
722  render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i];
723  render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
724  }
725  render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE;
726  render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE;
727 
728  switch (s->pict_type) {
729  case AV_PICTURE_TYPE_B:
730  next = (struct vdpau_render_state *)s->next_picture.f->data[0];
731  assert(next);
732  render->info.mpeg4.backward_reference = next->surface;
733  render->info.mpeg4.vop_coding_type = 2;
734  // no break here, going to set forward prediction
735  case AV_PICTURE_TYPE_P:
736  last = (struct vdpau_render_state *)s->last_picture.f->data[0];
737  assert(last);
738  render->info.mpeg4.forward_reference = last->surface;
739  }
740 
741  ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
742 
744  render->bitstream_buffers_used = 0;
745 }
746 #endif /* CONFIG_MPEG4_VDPAU_DECODER */
747 #endif /* FF_API_VDPAU */
748 
749 #if FF_API_VDPAU_PROFILE
750 int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
751 {
752 #define PROFILE(prof) \
753 do { \
754  *profile = VDP_DECODER_PROFILE_##prof; \
755  return 0; \
756 } while (0)
757 
758  switch (avctx->codec_id) {
759  case AV_CODEC_ID_MPEG1VIDEO: PROFILE(MPEG1);
761  switch (avctx->profile) {
762  case FF_PROFILE_MPEG2_MAIN: PROFILE(MPEG2_MAIN);
763  case FF_PROFILE_MPEG2_SIMPLE: PROFILE(MPEG2_SIMPLE);
764  default: return AVERROR(EINVAL);
765  }
766  case AV_CODEC_ID_H263: PROFILE(MPEG4_PART2_ASP);
767  case AV_CODEC_ID_MPEG4:
768  switch (avctx->profile) {
769  case FF_PROFILE_MPEG4_SIMPLE: PROFILE(MPEG4_PART2_SP);
770  case FF_PROFILE_MPEG4_ADVANCED_SIMPLE: PROFILE(MPEG4_PART2_ASP);
771  default: return AVERROR(EINVAL);
772  }
773  case AV_CODEC_ID_H264:
774  switch (avctx->profile & ~FF_PROFILE_H264_INTRA) {
775  case FF_PROFILE_H264_BASELINE: PROFILE(H264_BASELINE);
777  case FF_PROFILE_H264_MAIN: PROFILE(H264_MAIN);
778  case FF_PROFILE_H264_HIGH: PROFILE(H264_HIGH);
779 #ifdef VDP_DECODER_PROFILE_H264_EXTENDED
780  case FF_PROFILE_H264_EXTENDED: PROFILE(H264_EXTENDED);
781 #endif
782  default: return AVERROR(EINVAL);
783  }
784  case AV_CODEC_ID_WMV3:
785  case AV_CODEC_ID_VC1:
786  switch (avctx->profile) {
787  case FF_PROFILE_VC1_SIMPLE: PROFILE(VC1_SIMPLE);
788  case FF_PROFILE_VC1_MAIN: PROFILE(VC1_MAIN);
789  case FF_PROFILE_VC1_ADVANCED: PROFILE(VC1_ADVANCED);
790  default: return AVERROR(EINVAL);
791  }
792  }
793  return AVERROR(EINVAL);
794 #undef PROFILE
795 }
796 #endif /* FF_API_VDPAU_PROFILE */
797 
799 {
800  return av_mallocz(sizeof(AVVDPAUContext));
801 }
802 
803 int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
804  VdpGetProcAddress *get_proc, unsigned flags)
805 {
806  VDPAUHWContext *hwctx;
807 
809  return AVERROR(EINVAL);
810 
811  if (av_reallocp(&avctx->hwaccel_context, sizeof(*hwctx)))
812  return AVERROR(ENOMEM);
813 
814  hwctx = avctx->hwaccel_context;
815 
816  memset(hwctx, 0, sizeof(*hwctx));
817  hwctx->context.decoder = VDP_INVALID_HANDLE;
818  hwctx->device = device;
819  hwctx->get_proc_address = get_proc;
820  hwctx->flags = flags;
821  hwctx->reset = 1;
822  return 0;
823 }
824 
825 /* @}*/
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:3276
#define FF_PROFILE_MPEG4_SIMPLE
Definition: avcodec.h:3295
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1741
const char const char void * val
Definition: avisynth_c.h:771
void ff_vdpau_h264_picture_complete(H264Context *h)
This struct is allocated as AVHWDeviceContext.hwctx.
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:154
const char * s
Definition: avisynth_c.h:768
H264POCContext poc
Definition: h264dec.h:458
The VC1 Context.
Definition: vc1.h:173
#define FF_PROFILE_MPEG2_MAIN
Definition: avcodec.h:3268
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
This structure describes decoded (raw) audio or video data.
Definition: frame.h:187
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3918
VdpDevice device
VDPAU device handle.
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1934
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
VdpGetProcAddress * get_proc_address
int weighted_bipred_idc
Definition: h264_ps.h:116
int chroma_qp_index_offset[2]
Definition: h264_ps.h:119
int resync_marker
could this stream contain resync markers
Definition: mpeg4video.h:82
VdpDecoder decoder
VDPAU decoder handle.
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vdpau.c:314
int extended_mv
Ext MV in P/B (not in Simple)
Definition: vc1.h:223
VdpGetProcAddress * get_proc_address
int broadcast
TFF/RFF present.
Definition: vc1.h:200
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:3272
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264_ps.h:114
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
Definition: vc1.h:302
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:222
int bitstream_buffers_used
Useful bitstream buffers in the bitstream buffers table.
int frame_mbs_only_flag
Definition: h264_ps.h:61
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:202
VdpPictureInfoMPEG1Or2 mpeg
Definition: vdpau.h:63
attribute_deprecated VdpBitstreamBuffer * bitstream_buffers
Table of bitstream buffers.
Definition: vdpau.h:137
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
H264Context.
Definition: h264dec.h:341
AVFrame * f
Definition: h264dec.h:129
Public libavcodec VDPAU header.
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:222
H264Picture * long_ref[32]
Definition: h264dec.h:462
int profile
profile
Definition: avcodec.h:3235
int picture_structure
Definition: h264dec.h:405
AVVDPAUContext * av_vdpau_alloc_context(void)
Allocate an AVVDPAUContext.
Definition: vdpau.c:798
AVVDPAUContext * av_alloc_vdpaucontext(void)
allocation function for AVVDPAUContext
Definition: vdpau.c:75
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1869
int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, uint32_t *width, uint32_t *height)
Gets the parameters to create an adequate VDPAU video surface for the codec context using VDPAU hardw...
Definition: vdpau.c:82
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:3052
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2734
VdpBitstreamBuffer * bitstream_buffers
Table of bitstream buffers.
uint8_t scaling_matrix4[6][16]
Definition: h264_ps.h:124
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264_ps.h:120
int bi_type
Definition: vc1.h:381
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:3274
const PPS * pps
Definition: h264_ps.h:144
int ff_vdpau_common_uninit(AVCodecContext *avctx)
Definition: vdpau.c:276
uint8_t
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:3064
int panscanflag
NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present.
Definition: vc1.h:203
int interlace
Progressive/interlaced (RPTFTM syntax element)
Definition: vc1.h:201
void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *s, const uint8_t *buf, int buf_size)
int cabac
entropy_coding_mode_flag
Definition: h264_ps.h:110
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
int full_pel[2]
Definition: mpegvideo.h:480
VdpGetProcAddress * get_proc_address
VDPAU device driver.
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:177
int intra_dc_precision
Definition: mpegvideo.h:461
static AVFrame * frame
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:85
attribute_deprecated int bitstream_buffers_used
Useful bitstream buffers in the bitstream buffers table.
Definition: vdpau.h:128
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int refdist_flag
REFDIST syntax element present in II, IP, PI or PP field picture headers.
Definition: vc1.h:204
static int flags
Definition: log.c:57
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
VdpDecoder decoder
VDPAU decoder handle.
Definition: vdpau.h:96
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264_ps.h:122
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:3277
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:390
ptrdiff_t size
Definition: opengl_enc.c:101
AVVDPAUContext context
#define av_log(a,...)
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, int buf_size, int slice_count)
int psf
Progressive Segmented Frame.
Definition: vc1.h:211
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
Get a decoder profile that should be used for initializing a VDPAU decoder.
Definition: vdpau.c:750
MpegEncContext m
Definition: mpeg4video.h:66
VdpDevice device
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:104
attribute_deprecated union AVVDPAUPictureInfo info
VDPAU picture information.
Definition: vdpau.h:112
enum AVCodecID id
Definition: avcodec.h:3695
#define AV_HWACCEL_FLAG_IGNORE_LEVEL
Hardware acceleration should be used for decoding even if the codec level used is unknown or higher t...
Definition: avcodec.h:3948
#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH
Hardware acceleration can output YUV pixel formats with a different chroma sampling than 4:2:0 and/or...
Definition: avcodec.h:3954
#define PROFILE(prof)
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264_ps.h:62
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
Definition: vdpau.c:122
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
VdpBitstreamBuffer * bitstream_buffers
The user is responsible for freeing this buffer using av_freep().
Definition: vdpau.h:247
int overlap
overlapped transforms in use
Definition: vc1.h:226
This structure is used to share data between the libavcodec library and the client video application...
Definition: vdpau.h:90
int poc_type
pic_order_cnt_type
Definition: h264_ps.h:50
int constrained_intra_pred
constrained_intra_pred_flag
Definition: h264_ps.h:121
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:90
#define AVERROR(e)
Definition: error.h:43
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:3278
simple assert() macros that are a bit more flexible than ISO C assert().
int weighted_pred
weighted_pred_flag
Definition: h264_ps.h:115
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:399
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:268
int bitstream_buffers_allocated
Allocated size of the bitstream_buffers table.
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:149
int resync_marker
could this stream contain resync markers
Definition: vc1.h:396
int postprocflag
Per-frame processing suggestion flag present.
Definition: vc1.h:199
int delta_pic_order_always_zero_flag
Definition: h264_ps.h:52
attribute_deprecated int bitstream_buffers_allocated
Allocated size of the bitstream_buffers table.
Definition: vdpau.h:120
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
uint8_t scaling_matrix8[6][64]
Definition: h264_ps.h:125
int refs
number of reference frames
Definition: avcodec.h:2413
int intra_vlc_format
Definition: mpegvideo.h:467
void ff_vdpau_h264_picture_start(H264Context *h)
uint32_t width
union AVVDPAUPictureInfo info
picture parameter information for all supported codecs
Definition: vdpau.h:240
int ref_frame_count
num_ref_frames
Definition: h264_ps.h:56
int top_field_first
Definition: mpegvideo.h:463
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:142
int reference
Definition: h264dec.h:160
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
int tfcntrflag
TFCNTR present.
Definition: vc1.h:202
#define width
#define FF_PROFILE_VC1_MAIN
Definition: avcodec.h:3291
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3585
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:181
Picture.
Definition: mpegpicture.h:45
int alternate_scan
Definition: mpegvideo.h:468
void * hwaccel_picture_private
Hardware accelerator private data.
Definition: mpegpicture.h:77
static int vdpau_error(VdpStatus status)
Definition: vdpau.c:51
static struct ResampleContext * create(struct ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta, double precision, int cheby, int exact_rational)
Definition: soxr_resample.c:32
AVFormatContext * ctx
Definition: movenc.c:48
int init_qp
pic_init_qp_minus26 + 26
Definition: h264_ps.h:117
H.264 / AVC / MPEG-4 part10 codec.
H264SliceContext * slice_ctx
Definition: h264dec.h:354
int direct_8x8_inference_flag
Definition: h264_ps.h:63
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:450
uint8_t range_mapuv_flag
Definition: vc1.h:329
int mpeg_f_code[2][2]
Definition: mpegvideo.h:455
#define FF_PROFILE_VC1_SIMPLE
Definition: avcodec.h:3290
VdpPictureInfoMPEG4Part2 mpeg4
Definition: vdpau.h:65
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:219
int pic_order_present
pic_order_present_flag
Definition: h264_ps.h:111
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
Definition: vc1.h:189
int frame_pred_frame_dct
Definition: mpegvideo.h:462
static void destroy(struct ResampleContext **c)
Definition: soxr_resample.c:64
int finterpflag
INTERPFRM present.
Definition: vc1.h:228
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
AVCodecContext * avctx
Definition: h264dec.h:343
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Definition: mem.c:163
int concealment_motion_vectors
Definition: mpegvideo.h:464
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:1749
H264Picture * short_ref[32]
Definition: h264dec.h:461
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
int multires
frame-level RESPIC syntax element present
Definition: vc1.h:186
int field_poc[2]
top/bottom POC
Definition: h264dec.h:147
main external API structure.
Definition: avcodec.h:1732
#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE
Definition: avcodec.h:3310
uint8_t * data
The data buffer.
Definition: buffer.h:89
int bitstream_buffers_used
Definition: vdpau.h:245
uint8_t range_mapy
Definition: vc1.h:330
void ff_vdpau_h264_set_reference_frames(H264Context *h)
int extended_dmv
Additional extended dmv range at P/B-frame-level.
Definition: vc1.h:205
void * buf
Definition: avisynth_c.h:690
GLint GLenum type
Definition: opengl_enc.c:105
VdpDecoderRender * render
VDPAU decoder render callback.
int progressive_sequence
Definition: mpegvideo.h:454
int bitstream_buffers_allocated
Describe size/location of the compressed video data.
Definition: vdpau.h:244
int coded_height
Definition: avcodec.h:1934
#define H264_RF_COUNT
struct AVFrame * f
Definition: mpegpicture.h:46
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:209
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:117
int(* func)(AVBPrint *dst, const char *in, const char *arg)
Definition: jacosubdec.c:67
H264Picture * cur_pic_ptr
Definition: h264dec.h:350
VdpDecoderRender * render
VDPAU decoder render callback.
Definition: vdpau.h:103
const SPS * sps
Definition: h264_ps.h:145
int quantizer_mode
2 bits, quantizer mode used for sequence, see QUANT_*
Definition: vc1.h:227
int f_code
forward MV resolution
Definition: mpegvideo.h:235
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264_ps.h:51
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:112
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:209
int(* AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, const VdpPictureInfo *, uint32_t, const VdpBitstreamBuffer *)
Definition: vdpau.h:72
mfxU16 profile
Definition: qsvenc.c:44
int vstransform
variable-size [48]x[48] transform type + info
Definition: vc1.h:225
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264_ps.h:123
uint8_t range_mapuv
Definition: vc1.h:331
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:393
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:201
int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, VdpGetProcAddress *get_proc, unsigned flags)
Associate a VDPAU device with a codec context for hardware acceleration.
Definition: vdpau.c:803
uint8_t level
Definition: svq3.c:207
MpegEncContext s
Definition: vc1.h:174
MpegEncContext.
Definition: mpegvideo.h:78
struct AVCodecContext * avctx
Definition: mpegvideo.h:95
uint16_t pp_field_time
Definition: mpegvideo.h:392
uint8_t pq
Definition: vc1.h:238
int
int pic_id
pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) ...
Definition: h264dec.h:152
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
AVVDPAU_Render2 render2
Definition: vdpau.h:139
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:282
VdpPictureInfoH264 h264
Definition: vdpau.h:62
This structure is used as a callback between the FFmpeg decoder (vd_) and presentation (vo_) module...
Definition: vdpau.h:234
GLuint * buffers
Definition: opengl_enc.c:99
VdpPictureInfoVC1 vc1
Definition: vdpau.h:64
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
Definition: vdpau.c:391
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Definition: vc1.h:308
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264_ps.h:49
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3926
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:182
H264ParamSets ps
Definition: h264dec.h:454
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:162
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:159
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
int den
Denominator.
Definition: rational.h:60
unsigned char flags
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Definition: vdpau.c:324
void * priv_data
Definition: avcodec.h:1774
#define FF_PROFILE_VC1_ADVANCED
Definition: avcodec.h:3293
#define PICT_FRAME
Definition: mpegutils.h:39
int picture_structure
Definition: mpegvideo.h:458
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1782
union VDPAUPictureInfo info
VDPAU picture information.
#define FF_PROFILE_MPEG2_SIMPLE
Definition: avcodec.h:3269
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:165
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264dec.h:486
uint8_t range_mapy_flag
Definition: vc1.h:328
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define av_freep(p)
int hwaccel_flags
Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated decoding (if active)...
Definition: avcodec.h:3646
int dquant
How qscale varies with MBs, 2 bits (not in Simple)
Definition: vc1.h:224
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:3275
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:236
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3637
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:215
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size)
uint32_t height
static int ff_vdpau_common_reinit(AVCodecContext *avctx)
Definition: vdpau.c:299
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3436
VdpVideoSurface surface
Used as rendered surface, never changed.
Definition: vdpau.h:235
Predicted.
Definition: avutil.h:275
GLuint buffer
Definition: opengl_enc.c:102
#define av_unused
Definition: attributes.h:125
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:391
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.
int short_ref_count
number of actual short term references
Definition: h264dec.h:477