FFmpeg
hwcontext_qsv.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <stdatomic.h>
20 #include <stdint.h>
21 #include <string.h>
22 
23 #include <mfxvideo.h>
24 
25 #include "config.h"
26 
27 #if HAVE_PTHREADS
28 #include <pthread.h>
29 #endif
30 
31 #define COBJMACROS
32 #if CONFIG_VAAPI
33 #include "hwcontext_vaapi.h"
34 #endif
35 #if CONFIG_D3D11VA
36 #include "hwcontext_d3d11va.h"
37 #endif
38 #if CONFIG_DXVA2
39 #include "hwcontext_dxva2.h"
40 #endif
41 
42 #include "buffer.h"
43 #include "common.h"
44 #include "hwcontext.h"
45 #include "hwcontext_internal.h"
46 #include "hwcontext_qsv.h"
47 #include "mem.h"
48 #include "pixfmt.h"
49 #include "pixdesc.h"
50 #include "time.h"
51 #include "imgutils.h"
52 #include "avassert.h"
53 
54 #define QSV_VERSION_ATLEAST(MAJOR, MINOR) \
55  (MFX_VERSION_MAJOR > (MAJOR) || \
56  MFX_VERSION_MAJOR == (MAJOR) && MFX_VERSION_MINOR >= (MINOR))
57 
58 #define MFX_IMPL_VIA_MASK(impl) (0x0f00 & (impl))
59 #define QSV_ONEVPL QSV_VERSION_ATLEAST(2, 0)
60 #define QSV_HAVE_OPAQUE !QSV_ONEVPL
61 
62 #if QSV_ONEVPL
63 #include <mfxdispatcher.h>
64 #else
65 #define MFXUnload(a) do { } while(0)
66 #endif
67 
68 typedef struct QSVDevicePriv {
71 
72 typedef struct QSVDeviceContext {
73  mfxHDL handle;
74  mfxHandleType handle_type;
75  mfxVersion ver;
76  mfxIMPL impl;
77 
81 
82 typedef struct QSVFramesContext {
83  mfxSession session_download;
85  mfxSession session_upload;
87 #if HAVE_PTHREADS
88  pthread_mutex_t session_lock;
89 #endif
90 
92  mfxFrameSurface1 *surfaces_internal;
93  mfxHDLPair *handle_pairs_internal;
95 
96  // used in the frame allocator for non-opaque surfaces
97  mfxMemId *mem_ids;
98 #if QSV_HAVE_OPAQUE
99  // used in the opaque alloc request for opaque surfaces
100  mfxFrameSurface1 **surface_ptrs;
101 
102  mfxExtOpaqueSurfaceAlloc opaque_alloc;
103  mfxExtBuffer *ext_buffers[1];
104 #endif
108 
109 static const struct {
111  uint32_t fourcc;
112  uint16_t mfx_shift;
114  { AV_PIX_FMT_NV12, MFX_FOURCC_NV12, 0 },
115  { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4, 0 },
116  { AV_PIX_FMT_P010, MFX_FOURCC_P010, 1 },
117  { AV_PIX_FMT_PAL8, MFX_FOURCC_P8, 0 },
118 #if CONFIG_VAAPI
120  MFX_FOURCC_YUY2, 0 },
121  { AV_PIX_FMT_Y210,
122  MFX_FOURCC_Y210, 1 },
123  // VUYX is used for VAAPI child device,
124  // the SDK only delares support for AYUV
125  { AV_PIX_FMT_VUYX,
126  MFX_FOURCC_AYUV, 0 },
127  // XV30 is used for VAAPI child device,
128  // the SDK only delares support for Y410
129  { AV_PIX_FMT_XV30,
130  MFX_FOURCC_Y410, 0 },
131 #if QSV_VERSION_ATLEAST(1, 31)
132  // P012 is used for VAAPI child device,
133  // the SDK only delares support for P016
134  { AV_PIX_FMT_P012,
135  MFX_FOURCC_P016, 1 },
136  // Y212 is used for VAAPI child device,
137  // the SDK only delares support for Y216
138  { AV_PIX_FMT_Y212,
139  MFX_FOURCC_Y216, 1 },
140  // XV36 is used for VAAPI child device,
141  // the SDK only delares support for Y416
142  { AV_PIX_FMT_XV36,
143  MFX_FOURCC_Y416, 1 },
144 #endif
145 #endif
146 };
147 
148 extern int ff_qsv_get_surface_base_handle(mfxFrameSurface1 *surf,
149  enum AVHWDeviceType base_dev_type,
150  void **base_handle);
151 
152 /**
153  * Caller needs to allocate enough space for base_handle pointer.
154  **/
155 int ff_qsv_get_surface_base_handle(mfxFrameSurface1 *surf,
156  enum AVHWDeviceType base_dev_type,
157  void **base_handle)
158 {
159  mfxHDLPair *handle_pair;
160  handle_pair = surf->Data.MemId;
161  switch (base_dev_type) {
162 #if CONFIG_VAAPI
164  base_handle[0] = handle_pair->first;
165  return 0;
166 #endif
167 #if CONFIG_D3D11VA
169  base_handle[0] = handle_pair->first;
170  base_handle[1] = handle_pair->second;
171  return 0;
172 #endif
173 #if CONFIG_DXVA2
175  base_handle[0] = handle_pair->first;
176  return 0;
177 #endif
178  }
179  return AVERROR(EINVAL);
180 }
181 
183 {
184  int i;
185  for (i = 0; i < FF_ARRAY_ELEMS(supported_pixel_formats); i++) {
187  return supported_pixel_formats[i].fourcc;
188  }
189  return 0;
190 }
191 
193 {
194  for (int i = 0; i < FF_ARRAY_ELEMS(supported_pixel_formats); i++) {
196  return supported_pixel_formats[i].mfx_shift;
197  }
198 
199  return 0;
200 }
201 
202 #if CONFIG_D3D11VA
203 static uint32_t qsv_get_d3d11va_bind_flags(int mem_type)
204 {
205  uint32_t bind_flags = 0;
206 
207  if ((mem_type & MFX_MEMTYPE_VIDEO_MEMORY_ENCODER_TARGET) && (mem_type & MFX_MEMTYPE_INTERNAL_FRAME))
208  bind_flags = D3D11_BIND_DECODER | D3D11_BIND_VIDEO_ENCODER;
209  else
210  bind_flags = D3D11_BIND_DECODER;
211 
212  if ((MFX_MEMTYPE_FROM_VPPOUT & mem_type) || (MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET & mem_type))
213  bind_flags = D3D11_BIND_RENDER_TARGET;
214 
215  return bind_flags;
216 }
217 #endif
218 
219 static int qsv_fill_border(AVFrame *dst, const AVFrame *src)
220 {
221  const AVPixFmtDescriptor *desc;
222  int i, planes_nb = 0;
223  if (dst->format != src->format)
224  return AVERROR(EINVAL);
225 
227 
228  for (i = 0; i < desc->nb_components; i++)
229  planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
230 
231  for (i = 0; i < planes_nb; i++) {
232  int sheight, dheight, y;
233  ptrdiff_t swidth = av_image_get_linesize(src->format,
234  src->width,
235  i);
236  ptrdiff_t dwidth = av_image_get_linesize(dst->format,
237  dst->width,
238  i);
239  const AVComponentDescriptor comp = desc->comp[i];
240  if (swidth < 0 || dwidth < 0) {
241  av_log(NULL, AV_LOG_ERROR, "av_image_get_linesize failed\n");
242  return AVERROR(EINVAL);
243  }
244  sheight = src->height;
245  dheight = dst->height;
246  if (i) {
247  sheight = AV_CEIL_RSHIFT(src->height, desc->log2_chroma_h);
248  dheight = AV_CEIL_RSHIFT(dst->height, desc->log2_chroma_h);
249  }
250  //fill right padding
251  for (y = 0; y < sheight; y++) {
252  void *line_ptr = dst->data[i] + y*dst->linesize[i] + swidth;
253  av_memcpy_backptr(line_ptr,
254  comp.depth > 8 ? 2 : 1,
255  dwidth - swidth);
256  }
257  //fill bottom padding
258  for (y = sheight; y < dheight; y++) {
259  memcpy(dst->data[i]+y*dst->linesize[i],
260  dst->data[i]+(sheight-1)*dst->linesize[i],
261  dwidth);
262  }
263  }
264  return 0;
265 }
266 
268 {
269  AVQSVDeviceContext *hwctx = ctx->hwctx;
270  QSVDeviceContext *s = ctx->internal->priv;
271  int hw_handle_supported = 0;
272  mfxHandleType handle_type;
273  enum AVHWDeviceType device_type;
274  enum AVPixelFormat pix_fmt;
275  mfxStatus err;
276 
277  err = MFXQueryIMPL(hwctx->session, &s->impl);
278  if (err == MFX_ERR_NONE)
279  err = MFXQueryVersion(hwctx->session, &s->ver);
280  if (err != MFX_ERR_NONE) {
281  av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
282  return AVERROR_UNKNOWN;
283  }
284 
285  if (MFX_IMPL_VIA_VAAPI == MFX_IMPL_VIA_MASK(s->impl)) {
286 #if CONFIG_VAAPI
287  handle_type = MFX_HANDLE_VA_DISPLAY;
288  device_type = AV_HWDEVICE_TYPE_VAAPI;
290  hw_handle_supported = 1;
291 #endif
292  } else if (MFX_IMPL_VIA_D3D11 == MFX_IMPL_VIA_MASK(s->impl)) {
293 #if CONFIG_D3D11VA
294  handle_type = MFX_HANDLE_D3D11_DEVICE;
295  device_type = AV_HWDEVICE_TYPE_D3D11VA;
297  hw_handle_supported = 1;
298 #endif
299  } else if (MFX_IMPL_VIA_D3D9 == MFX_IMPL_VIA_MASK(s->impl)) {
300 #if CONFIG_DXVA2
301  handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
302  device_type = AV_HWDEVICE_TYPE_DXVA2;
304  hw_handle_supported = 1;
305 #endif
306  }
307 
308  if (hw_handle_supported) {
309  err = MFXVideoCORE_GetHandle(hwctx->session, handle_type, &s->handle);
310  if (err == MFX_ERR_NONE) {
311  s->handle_type = handle_type;
312  s->child_device_type = device_type;
313  s->child_pix_fmt = pix_fmt;
314  }
315  }
316  if (!s->handle) {
317  av_log(ctx, AV_LOG_VERBOSE, "No supported hw handle could be retrieved "
318  "from the session\n");
319  }
320  return 0;
321 }
322 
324 {
325  QSVFramesContext *s = ctx->internal->priv;
326 
327  if (s->session_download) {
328  MFXVideoVPP_Close(s->session_download);
329  MFXClose(s->session_download);
330  }
331  s->session_download = NULL;
332  s->session_download_init = 0;
333 
334  if (s->session_upload) {
335  MFXVideoVPP_Close(s->session_upload);
336  MFXClose(s->session_upload);
337  }
338  s->session_upload = NULL;
339  s->session_upload_init = 0;
340 
341 #if HAVE_PTHREADS
342  pthread_mutex_destroy(&s->session_lock);
343 #endif
344 
345  av_freep(&s->mem_ids);
346 #if QSV_HAVE_OPAQUE
347  av_freep(&s->surface_ptrs);
348 #endif
349  av_freep(&s->surfaces_internal);
350  av_freep(&s->handle_pairs_internal);
351  av_frame_unref(&s->realigned_upload_frame);
352  av_frame_unref(&s->realigned_download_frame);
353  av_buffer_unref(&s->child_frames_ref);
354 }
355 
356 static void qsv_pool_release_dummy(void *opaque, uint8_t *data)
357 {
358 }
359 
360 static AVBufferRef *qsv_pool_alloc(void *opaque, size_t size)
361 {
363  QSVFramesContext *s = ctx->internal->priv;
364  AVQSVFramesContext *hwctx = ctx->hwctx;
365 
366  if (s->nb_surfaces_used < hwctx->nb_surfaces) {
367  s->nb_surfaces_used++;
368  return av_buffer_create((uint8_t*)(s->surfaces_internal + s->nb_surfaces_used - 1),
369  sizeof(*hwctx->surfaces), qsv_pool_release_dummy, NULL, 0);
370  }
371 
372  return NULL;
373 }
374 
376 {
377  AVQSVFramesContext *hwctx = ctx->hwctx;
378  QSVFramesContext *s = ctx->internal->priv;
379  QSVDeviceContext *device_priv = ctx->device_ctx->internal->priv;
380 
381  AVBufferRef *child_device_ref = NULL;
382  AVBufferRef *child_frames_ref = NULL;
383 
384  AVHWDeviceContext *child_device_ctx;
385  AVHWFramesContext *child_frames_ctx;
386 
387  int i, ret = 0;
388 
389  if (!device_priv->handle) {
391  "Cannot create a non-opaque internal surface pool without "
392  "a hardware handle\n");
393  return AVERROR(EINVAL);
394  }
395 
396  child_device_ref = av_hwdevice_ctx_alloc(device_priv->child_device_type);
397  if (!child_device_ref)
398  return AVERROR(ENOMEM);
399  child_device_ctx = (AVHWDeviceContext*)child_device_ref->data;
400 
401 #if CONFIG_VAAPI
402  if (child_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
403  AVVAAPIDeviceContext *child_device_hwctx = child_device_ctx->hwctx;
404  child_device_hwctx->display = (VADisplay)device_priv->handle;
405  }
406 #endif
407 #if CONFIG_D3D11VA
408  if (child_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
409  AVD3D11VADeviceContext *child_device_hwctx = child_device_ctx->hwctx;
410  ID3D11Device_AddRef((ID3D11Device*)device_priv->handle);
411  child_device_hwctx->device = (ID3D11Device*)device_priv->handle;
412  }
413 #endif
414 #if CONFIG_DXVA2
415  if (child_device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) {
416  AVDXVA2DeviceContext *child_device_hwctx = child_device_ctx->hwctx;
417  child_device_hwctx->devmgr = (IDirect3DDeviceManager9*)device_priv->handle;
418  }
419 #endif
420 
421  ret = av_hwdevice_ctx_init(child_device_ref);
422  if (ret < 0) {
423  av_log(ctx, AV_LOG_ERROR, "Error initializing a child device context\n");
424  goto fail;
425  }
426 
427  child_frames_ref = av_hwframe_ctx_alloc(child_device_ref);
428  if (!child_frames_ref) {
429  ret = AVERROR(ENOMEM);
430  goto fail;
431  }
432  child_frames_ctx = (AVHWFramesContext*)child_frames_ref->data;
433 
434  child_frames_ctx->format = device_priv->child_pix_fmt;
435  child_frames_ctx->sw_format = ctx->sw_format;
436  child_frames_ctx->initial_pool_size = ctx->initial_pool_size;
437  child_frames_ctx->width = FFALIGN(ctx->width, 16);
438  child_frames_ctx->height = FFALIGN(ctx->height, 16);
439 
440 #if CONFIG_D3D11VA
441  if (child_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
442  AVD3D11VAFramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
443  if (hwctx->frame_type == 0)
444  hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
445  if (hwctx->frame_type & MFX_MEMTYPE_SHARED_RESOURCE)
446  child_frames_hwctx->MiscFlags = D3D11_RESOURCE_MISC_SHARED;
447  child_frames_hwctx->BindFlags = qsv_get_d3d11va_bind_flags(hwctx->frame_type);
448  }
449 #endif
450 #if CONFIG_DXVA2
451  if (child_device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) {
452  AVDXVA2FramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
453  if (hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET)
454  child_frames_hwctx->surface_type = DXVA2_VideoProcessorRenderTarget;
455  else
456  child_frames_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget;
457  }
458 #endif
459 
460  ret = av_hwframe_ctx_init(child_frames_ref);
461  if (ret < 0) {
462  av_log(ctx, AV_LOG_ERROR, "Error initializing a child frames context\n");
463  goto fail;
464  }
465 
466 #if CONFIG_VAAPI
467  if (child_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
468  AVVAAPIFramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
469  for (i = 0; i < ctx->initial_pool_size; i++) {
470  s->handle_pairs_internal[i].first = child_frames_hwctx->surface_ids + i;
471  s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
472  s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
473  }
474  hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
475  }
476 #endif
477 #if CONFIG_D3D11VA
478  if (child_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
479  AVD3D11VAFramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
480  for (i = 0; i < ctx->initial_pool_size; i++) {
481  s->handle_pairs_internal[i].first = (mfxMemId)child_frames_hwctx->texture_infos[i].texture;
482  if(child_frames_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
483  s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
484  } else {
485  s->handle_pairs_internal[i].second = (mfxMemId)child_frames_hwctx->texture_infos[i].index;
486  }
487  s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
488  }
489  if (child_frames_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
490  hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
491  } else {
492  hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
493  }
494  }
495 #endif
496 #if CONFIG_DXVA2
497  if (child_device_ctx->type == AV_HWDEVICE_TYPE_DXVA2) {
498  AVDXVA2FramesContext *child_frames_hwctx = child_frames_ctx->hwctx;
499  for (i = 0; i < ctx->initial_pool_size; i++) {
500  s->handle_pairs_internal[i].first = (mfxMemId)child_frames_hwctx->surfaces[i];
501  s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
502  s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
503  }
504  if (child_frames_hwctx->surface_type == DXVA2_VideoProcessorRenderTarget)
505  hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
506  else
507  hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
508  }
509 #endif
510 
511  s->child_frames_ref = child_frames_ref;
512  child_frames_ref = NULL;
513 
514 fail:
515  av_buffer_unref(&child_device_ref);
516  av_buffer_unref(&child_frames_ref);
517  return ret;
518 }
519 
520 static int qsv_init_surface(AVHWFramesContext *ctx, mfxFrameSurface1 *surf)
521 {
522  const AVPixFmtDescriptor *desc;
523  uint32_t fourcc;
524 
525  desc = av_pix_fmt_desc_get(ctx->sw_format);
526  if (!desc)
527  return AVERROR(EINVAL);
528 
529  fourcc = qsv_fourcc_from_pix_fmt(ctx->sw_format);
530  if (!fourcc)
531  return AVERROR(EINVAL);
532 
533  surf->Info.BitDepthLuma = desc->comp[0].depth;
534  surf->Info.BitDepthChroma = desc->comp[0].depth;
535  surf->Info.Shift = qsv_shift_from_pix_fmt(ctx->sw_format);
536 
537  if (desc->log2_chroma_w && desc->log2_chroma_h)
538  surf->Info.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
539  else if (desc->log2_chroma_w)
540  surf->Info.ChromaFormat = MFX_CHROMAFORMAT_YUV422;
541  else
542  surf->Info.ChromaFormat = MFX_CHROMAFORMAT_YUV444;
543 
544  surf->Info.FourCC = fourcc;
545  surf->Info.Width = FFALIGN(ctx->width, 16);
546  surf->Info.CropW = ctx->width;
547  surf->Info.Height = FFALIGN(ctx->height, 16);
548  surf->Info.CropH = ctx->height;
549  surf->Info.FrameRateExtN = 25;
550  surf->Info.FrameRateExtD = 1;
551  surf->Info.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
552 
553  return 0;
554 }
555 
557 {
558  QSVFramesContext *s = ctx->internal->priv;
559  AVQSVFramesContext *frames_hwctx = ctx->hwctx;
560 
561  int i, ret = 0;
562 
563  if (ctx->initial_pool_size <= 0) {
564  av_log(ctx, AV_LOG_ERROR, "QSV requires a fixed frame pool size\n");
565  return AVERROR(EINVAL);
566  }
567 
568  s->handle_pairs_internal = av_calloc(ctx->initial_pool_size,
569  sizeof(*s->handle_pairs_internal));
570  if (!s->handle_pairs_internal)
571  return AVERROR(ENOMEM);
572 
573  s->surfaces_internal = av_calloc(ctx->initial_pool_size,
574  sizeof(*s->surfaces_internal));
575  if (!s->surfaces_internal)
576  return AVERROR(ENOMEM);
577 
578  for (i = 0; i < ctx->initial_pool_size; i++) {
579  ret = qsv_init_surface(ctx, &s->surfaces_internal[i]);
580  if (ret < 0)
581  return ret;
582  }
583 
584 #if QSV_HAVE_OPAQUE
585  if (!(frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)) {
587  if (ret < 0)
588  return ret;
589  }
590 #else
592  if (ret < 0)
593  return ret;
594 #endif
595 
596  ctx->internal->pool_internal = av_buffer_pool_init2(sizeof(mfxFrameSurface1),
598  if (!ctx->internal->pool_internal)
599  return AVERROR(ENOMEM);
600 
601  frames_hwctx->surfaces = s->surfaces_internal;
602  frames_hwctx->nb_surfaces = ctx->initial_pool_size;
603 
604  return 0;
605 }
606 
607 static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
608  mfxFrameAllocResponse *resp)
609 {
610  AVHWFramesContext *ctx = pthis;
611  QSVFramesContext *s = ctx->internal->priv;
612  AVQSVFramesContext *hwctx = ctx->hwctx;
613  mfxFrameInfo *i = &req->Info;
614  mfxFrameInfo *i1 = &hwctx->surfaces[0].Info;
615 
616  if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
617  !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
618  !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
619  return MFX_ERR_UNSUPPORTED;
620  if (i->Width > i1->Width || i->Height > i1->Height ||
621  i->FourCC != i1->FourCC || i->ChromaFormat != i1->ChromaFormat) {
622  av_log(ctx, AV_LOG_ERROR, "Mismatching surface properties in an "
623  "allocation request: %dx%d %d %d vs %dx%d %d %d\n",
624  i->Width, i->Height, i->FourCC, i->ChromaFormat,
625  i1->Width, i1->Height, i1->FourCC, i1->ChromaFormat);
626  return MFX_ERR_UNSUPPORTED;
627  }
628 
629  resp->mids = s->mem_ids;
630  resp->NumFrameActual = hwctx->nb_surfaces;
631 
632  return MFX_ERR_NONE;
633 }
634 
635 static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
636 {
637  return MFX_ERR_NONE;
638 }
639 
640 static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
641 {
642  return MFX_ERR_UNSUPPORTED;
643 }
644 
645 static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
646 {
647  return MFX_ERR_UNSUPPORTED;
648 }
649 
650 static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
651 {
652  mfxHDLPair *pair_dst = (mfxHDLPair*)hdl;
653  mfxHDLPair *pair_src = (mfxHDLPair*)mid;
654 
655  pair_dst->first = pair_src->first;
656 
657  if (pair_src->second != (mfxMemId)MFX_INFINITE)
658  pair_dst->second = pair_src->second;
659  return MFX_ERR_NONE;
660 }
661 
662 #if QSV_ONEVPL
663 
664 static int qsv_d3d11_update_config(void *ctx, mfxHDL handle, mfxConfig cfg)
665 {
666 #if CONFIG_D3D11VA
667  mfxStatus sts;
668  IDXGIAdapter *pDXGIAdapter;
669  DXGI_ADAPTER_DESC adapterDesc;
670  IDXGIDevice *pDXGIDevice = NULL;
671  HRESULT hr;
672  ID3D11Device *device = handle;
673  mfxVariant impl_value;
674 
675  hr = ID3D11Device_QueryInterface(device, &IID_IDXGIDevice, (void**)&pDXGIDevice);
676  if (SUCCEEDED(hr)) {
677  hr = IDXGIDevice_GetAdapter(pDXGIDevice, &pDXGIAdapter);
678  if (FAILED(hr)) {
679  av_log(ctx, AV_LOG_ERROR, "Error IDXGIDevice_GetAdapter %d\n", hr);
680  goto fail;
681  }
682 
683  hr = IDXGIAdapter_GetDesc(pDXGIAdapter, &adapterDesc);
684  if (FAILED(hr)) {
685  av_log(ctx, AV_LOG_ERROR, "Error IDXGIAdapter_GetDesc %d\n", hr);
686  goto fail;
687  }
688  } else {
689  av_log(ctx, AV_LOG_ERROR, "Error ID3D11Device_QueryInterface %d\n", hr);
690  goto fail;
691  }
692 
693  impl_value.Type = MFX_VARIANT_TYPE_U16;
694  impl_value.Data.U16 = adapterDesc.DeviceId;
695  sts = MFXSetConfigFilterProperty(cfg,
696  (const mfxU8 *)"mfxExtendedDeviceId.DeviceID", impl_value);
697  if (sts != MFX_ERR_NONE) {
698  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
699  "DeviceID property: %d.\n", sts);
700  goto fail;
701  }
702 
703  impl_value.Type = MFX_VARIANT_TYPE_PTR;
704  impl_value.Data.Ptr = &adapterDesc.AdapterLuid;
705  sts = MFXSetConfigFilterProperty(cfg,
706  (const mfxU8 *)"mfxExtendedDeviceId.DeviceLUID", impl_value);
707  if (sts != MFX_ERR_NONE) {
708  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
709  "DeviceLUID property: %d.\n", sts);
710  goto fail;
711  }
712 
713  impl_value.Type = MFX_VARIANT_TYPE_U32;
714  impl_value.Data.U32 = 0x0001;
715  sts = MFXSetConfigFilterProperty(cfg,
716  (const mfxU8 *)"mfxExtendedDeviceId.LUIDDeviceNodeMask", impl_value);
717  if (sts != MFX_ERR_NONE) {
718  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
719  "LUIDDeviceNodeMask property: %d.\n", sts);
720  goto fail;
721  }
722 
723  return 0;
724 
725 fail:
726 #endif
727  return AVERROR_UNKNOWN;
728 }
729 
730 static int qsv_d3d9_update_config(void *ctx, mfxHDL handle, mfxConfig cfg)
731 {
732  int ret = AVERROR_UNKNOWN;
733 #if CONFIG_DXVA2
734  mfxStatus sts;
735  IDirect3DDeviceManager9* devmgr = handle;
736  IDirect3DDevice9Ex *device = NULL;
737  HANDLE device_handle = 0;
738  IDirect3D9Ex *d3d9ex = NULL;
739  LUID luid;
740  D3DDEVICE_CREATION_PARAMETERS params;
741  HRESULT hr;
742  mfxVariant impl_value;
743 
744  hr = IDirect3DDeviceManager9_OpenDeviceHandle(devmgr, &device_handle);
745  if (FAILED(hr)) {
746  av_log(ctx, AV_LOG_ERROR, "Error OpenDeviceHandle %d\n", hr);
747  goto fail;
748  }
749 
750  hr = IDirect3DDeviceManager9_LockDevice(devmgr, device_handle, &device, TRUE);
751  if (FAILED(hr)) {
752  av_log(ctx, AV_LOG_ERROR, "Error LockDevice %d\n", hr);
753  goto fail;
754  }
755 
756  hr = IDirect3DDevice9Ex_GetCreationParameters(device, &params);
757  if (FAILED(hr)) {
758  av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9_GetCreationParameters %d\n", hr);
759  goto unlock;
760  }
761 
762  hr = IDirect3DDevice9Ex_GetDirect3D(device, &d3d9ex);
763  if (FAILED(hr)) {
764  av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9Ex_GetAdapterLUID %d\n", hr);
765  goto unlock;
766  }
767 
768  hr = IDirect3D9Ex_GetAdapterLUID(d3d9ex, params.AdapterOrdinal, &luid);
769  if (FAILED(hr)) {
770  av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9Ex_GetAdapterLUID %d\n", hr);
771  goto unlock;
772  }
773 
774  impl_value.Type = MFX_VARIANT_TYPE_PTR;
775  impl_value.Data.Ptr = &luid;
776  sts = MFXSetConfigFilterProperty(cfg,
777  (const mfxU8 *)"mfxExtendedDeviceId.DeviceLUID", impl_value);
778  if (sts != MFX_ERR_NONE) {
779  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
780  "DeviceLUID property: %d.\n", sts);
781  goto unlock;
782  }
783 
784  ret = 0;
785 
786 unlock:
787  IDirect3DDeviceManager9_UnlockDevice(devmgr, device_handle, FALSE);
788 fail:
789 #endif
790  return ret;
791 }
792 
793 static int qsv_va_update_config(void *ctx, mfxHDL handle, mfxConfig cfg)
794 {
795 #if CONFIG_VAAPI
796 #if VA_CHECK_VERSION(1, 15, 0)
797  mfxStatus sts;
798  VADisplay dpy = handle;
799  VAStatus vas;
800  VADisplayAttribute attr = {
801  .type = VADisplayPCIID,
802  };
803  mfxVariant impl_value;
804 
805  vas = vaGetDisplayAttributes(dpy, &attr, 1);
806  if (vas == VA_STATUS_SUCCESS && attr.flags != VA_DISPLAY_ATTRIB_NOT_SUPPORTED) {
807  impl_value.Type = MFX_VARIANT_TYPE_U16;
808  impl_value.Data.U16 = (attr.value & 0xFFFF);
809  sts = MFXSetConfigFilterProperty(cfg,
810  (const mfxU8 *)"mfxExtendedDeviceId.DeviceID", impl_value);
811  if (sts != MFX_ERR_NONE) {
812  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
813  "DeviceID property: %d.\n", sts);
814  goto fail;
815  }
816  } else {
817  av_log(ctx, AV_LOG_ERROR, "libva: Failed to get device id from the driver. Please "
818  "consider to upgrade the driver to support VA-API 1.15.0\n");
819  goto fail;
820  }
821 
822  return 0;
823 
824 fail:
825 #else
826  av_log(ctx, AV_LOG_ERROR, "libva: This version of libva doesn't support retrieving "
827  "the device information from the driver. Please consider to upgrade libva to "
828  "support VA-API 1.15.0\n");
829 #endif
830 #endif
831  return AVERROR_UNKNOWN;
832 }
833 
834 static int qsv_new_mfx_loader(void *ctx,
835  mfxHDL handle,
836  mfxHandleType handle_type,
837  mfxIMPL implementation,
838  mfxVersion *pver,
839  void **ploader)
840 {
841  mfxStatus sts;
842  mfxLoader loader = NULL;
843  mfxConfig cfg;
844  mfxVariant impl_value;
845 
846  *ploader = NULL;
847  loader = MFXLoad();
848  if (!loader) {
849  av_log(ctx, AV_LOG_ERROR, "Error creating a MFX loader\n");
850  goto fail;
851  }
852 
853  /* Create configurations for implementation */
854  cfg = MFXCreateConfig(loader);
855  if (!cfg) {
856  av_log(ctx, AV_LOG_ERROR, "Error creating a MFX configuration\n");
857  goto fail;
858  }
859 
860  impl_value.Type = MFX_VARIANT_TYPE_U32;
861  impl_value.Data.U32 = (implementation == MFX_IMPL_SOFTWARE) ?
862  MFX_IMPL_TYPE_SOFTWARE : MFX_IMPL_TYPE_HARDWARE;
863  sts = MFXSetConfigFilterProperty(cfg,
864  (const mfxU8 *)"mfxImplDescription.Impl", impl_value);
865  if (sts != MFX_ERR_NONE) {
866  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration "
867  "property: %d.\n", sts);
868  goto fail;
869  }
870 
871  impl_value.Type = MFX_VARIANT_TYPE_U32;
872  impl_value.Data.U32 = pver->Version;
873  sts = MFXSetConfigFilterProperty(cfg,
874  (const mfxU8 *)"mfxImplDescription.ApiVersion.Version",
875  impl_value);
876  if (sts != MFX_ERR_NONE) {
877  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration "
878  "property: %d.\n", sts);
879  goto fail;
880  }
881 
882  impl_value.Type = MFX_VARIANT_TYPE_U16;
883  impl_value.Data.U16 = 0x8086; // Intel device only
884  sts = MFXSetConfigFilterProperty(cfg,
885  (const mfxU8 *)"mfxExtendedDeviceId.VendorID", impl_value);
886  if (sts != MFX_ERR_NONE) {
887  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
888  "VendorID property: %d.\n", sts);
889  goto fail;
890  }
891 
892  if (MFX_HANDLE_VA_DISPLAY == handle_type) {
893  if (handle && qsv_va_update_config(ctx, handle, cfg))
894  goto fail;
895 
896  impl_value.Data.U32 = MFX_ACCEL_MODE_VIA_VAAPI;
897  } else if (MFX_HANDLE_D3D9_DEVICE_MANAGER == handle_type) {
898  if (handle && qsv_d3d9_update_config(ctx, handle, cfg))
899  goto fail;
900 
901  impl_value.Data.U32 = MFX_ACCEL_MODE_VIA_D3D9;
902  } else {
903  if (handle && qsv_d3d11_update_config(ctx, handle, cfg))
904  goto fail;
905 
906  impl_value.Data.U32 = MFX_ACCEL_MODE_VIA_D3D11;
907  }
908 
909  impl_value.Type = MFX_VARIANT_TYPE_U32;
910  sts = MFXSetConfigFilterProperty(cfg,
911  (const mfxU8 *)"mfxImplDescription.AccelerationMode", impl_value);
912  if (sts != MFX_ERR_NONE) {
913  av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
914  "AccelerationMode property: %d.\n", sts);
915  goto fail;
916  }
917 
918  *ploader = loader;
919 
920  return 0;
921 
922 fail:
923  if (loader)
924  MFXUnload(loader);
925 
926  return AVERROR_UNKNOWN;
927 }
928 
929 static int qsv_create_mfx_session_from_loader(void *ctx, mfxLoader loader, mfxSession *psession)
930 {
931  mfxStatus sts;
932  mfxSession session = NULL;
933  uint32_t impl_idx = 0;
934  mfxVersion ver;
935 
936  while (1) {
937  /* Enumerate all implementations */
938  mfxImplDescription *impl_desc;
939 
940  sts = MFXEnumImplementations(loader, impl_idx,
941  MFX_IMPLCAPS_IMPLDESCSTRUCTURE,
942  (mfxHDL *)&impl_desc);
943  /* Failed to find an available implementation */
944  if (sts == MFX_ERR_NOT_FOUND)
945  break;
946  else if (sts != MFX_ERR_NONE) {
947  impl_idx++;
948  continue;
949  }
950 
951  sts = MFXCreateSession(loader, impl_idx, &session);
952  MFXDispReleaseImplDescription(loader, impl_desc);
953  if (sts == MFX_ERR_NONE)
954  break;
955 
956  impl_idx++;
957  }
958 
959  if (sts != MFX_ERR_NONE) {
960  av_log(ctx, AV_LOG_ERROR, "Error creating a MFX session: %d.\n", sts);
961  goto fail;
962  }
963 
964  sts = MFXQueryVersion(session, &ver);
965  if (sts != MFX_ERR_NONE) {
966  av_log(ctx, AV_LOG_ERROR, "Error querying a MFX session: %d.\n", sts);
967  goto fail;
968  }
969 
970  av_log(ctx, AV_LOG_VERBOSE, "Initialize MFX session: implementation "
971  "version is %d.%d\n", ver.Major, ver.Minor);
972 
973  *psession = session;
974 
975  return 0;
976 
977 fail:
978  if (session)
979  MFXClose(session);
980 
981  return AVERROR_UNKNOWN;
982 }
983 
984 static int qsv_create_mfx_session(void *ctx,
985  mfxHDL handle,
986  mfxHandleType handle_type,
987  mfxIMPL implementation,
988  mfxVersion *pver,
989  mfxSession *psession,
990  void **ploader)
991 {
992  mfxLoader loader = NULL;
993 
995  "Use Intel(R) oneVPL to create MFX session, API version is "
996  "%d.%d, the required implementation version is %d.%d\n",
997  MFX_VERSION_MAJOR, MFX_VERSION_MINOR, pver->Major, pver->Minor);
998 
999  if (handle_type != MFX_HANDLE_VA_DISPLAY &&
1000  handle_type != MFX_HANDLE_D3D9_DEVICE_MANAGER &&
1001  handle_type != MFX_HANDLE_D3D11_DEVICE) {
1003  "Invalid MFX device handle type\n");
1004  return AVERROR(EXDEV);
1005  }
1006 
1007  *psession = NULL;
1008 
1009  if (!*ploader) {
1010  if (qsv_new_mfx_loader(ctx, handle, handle_type, implementation, pver, (void **)&loader))
1011  goto fail;
1012 
1013  av_assert0(loader);
1014  } else
1015  loader = *ploader; // Use the input mfxLoader to create mfx session
1016 
1017  if (qsv_create_mfx_session_from_loader(ctx, loader, psession))
1018  goto fail;
1019 
1020  if (!*ploader)
1021  *ploader = loader;
1022 
1023  return 0;
1024 
1025 fail:
1026  if (!*ploader && loader)
1027  MFXUnload(loader);
1028 
1029  return AVERROR_UNKNOWN;
1030 }
1031 
1032 #else
1033 
1034 static int qsv_create_mfx_session(void *ctx,
1035  mfxHDL handle,
1036  mfxHandleType handle_type,
1037  mfxIMPL implementation,
1038  mfxVersion *pver,
1039  mfxSession *psession,
1040  void **ploader)
1041 {
1042  mfxVersion ver;
1043  mfxStatus sts;
1044  mfxSession session = NULL;
1045 
1047  "Use Intel(R) Media SDK to create MFX session, API version is "
1048  "%d.%d, the required implementation version is %d.%d\n",
1049  MFX_VERSION_MAJOR, MFX_VERSION_MINOR, pver->Major, pver->Minor);
1050 
1051  *ploader = NULL;
1052  *psession = NULL;
1053  ver = *pver;
1054  sts = MFXInit(implementation, &ver, &session);
1055  if (sts != MFX_ERR_NONE) {
1056  av_log(ctx, AV_LOG_ERROR, "Error initializing an MFX session: "
1057  "%d.\n", sts);
1058  goto fail;
1059  }
1060 
1061  sts = MFXQueryVersion(session, &ver);
1062  if (sts != MFX_ERR_NONE) {
1063  av_log(ctx, AV_LOG_ERROR, "Error querying an MFX session: "
1064  "%d.\n", sts);
1065  goto fail;
1066  }
1067 
1068  av_log(ctx, AV_LOG_VERBOSE, "Initialize MFX session: implementation "
1069  "version is %d.%d\n", ver.Major, ver.Minor);
1070 
1071  MFXClose(session);
1072 
1073  sts = MFXInit(implementation, &ver, &session);
1074  if (sts != MFX_ERR_NONE) {
1075  av_log(ctx, AV_LOG_ERROR, "Error initializing an MFX session: "
1076  "%d.\n", sts);
1077  goto fail;
1078  }
1079 
1080  *psession = session;
1081 
1082  return 0;
1083 
1084 fail:
1085  if (session)
1086  MFXClose(session);
1087 
1088  return AVERROR_UNKNOWN;
1089 }
1090 
1091 #endif
1092 
1094  mfxSession *session, int upload)
1095 {
1096  AVQSVFramesContext *frames_hwctx = ctx->hwctx;
1097  QSVDeviceContext *device_priv = ctx->device_ctx->internal->priv;
1098  int opaque = 0;
1099 
1100  mfxFrameAllocator frame_allocator = {
1101  .pthis = ctx,
1102  .Alloc = frame_alloc,
1103  .Lock = frame_lock,
1104  .Unlock = frame_unlock,
1105  .GetHDL = frame_get_hdl,
1106  .Free = frame_free,
1107  };
1108 
1109  mfxVideoParam par;
1110  mfxStatus err;
1111  int ret = AVERROR_UNKNOWN;
1112  AVQSVDeviceContext *hwctx = ctx->device_ctx->hwctx;
1113  /* hwctx->loader is non-NULL for oneVPL user and NULL for non-oneVPL user */
1114  void **loader = &hwctx->loader;
1115 
1116 #if QSV_HAVE_OPAQUE
1117  QSVFramesContext *s = ctx->internal->priv;
1118  opaque = !!(frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
1119 #endif
1120 
1121  ret = qsv_create_mfx_session(ctx, device_priv->handle, device_priv->handle_type,
1122  device_priv->impl, &device_priv->ver, session, loader);
1123  if (ret)
1124  goto fail;
1125 
1126  if (device_priv->handle) {
1127  err = MFXVideoCORE_SetHandle(*session, device_priv->handle_type,
1128  device_priv->handle);
1129  if (err != MFX_ERR_NONE) {
1130  ret = AVERROR_UNKNOWN;
1131  goto fail;
1132  }
1133  }
1134 
1135  if (!opaque) {
1136  err = MFXVideoCORE_SetFrameAllocator(*session, &frame_allocator);
1137  if (err != MFX_ERR_NONE) {
1138  ret = AVERROR_UNKNOWN;
1139  goto fail;
1140  }
1141  }
1142 
1143  memset(&par, 0, sizeof(par));
1144 
1145  if (!opaque) {
1146  par.IOPattern = upload ? MFX_IOPATTERN_OUT_VIDEO_MEMORY :
1147  MFX_IOPATTERN_IN_VIDEO_MEMORY;
1148  }
1149 #if QSV_HAVE_OPAQUE
1150  else {
1151  par.ExtParam = s->ext_buffers;
1152  par.NumExtParam = FF_ARRAY_ELEMS(s->ext_buffers);
1153  par.IOPattern = upload ? MFX_IOPATTERN_OUT_OPAQUE_MEMORY :
1154  MFX_IOPATTERN_IN_OPAQUE_MEMORY;
1155  }
1156 #endif
1157 
1158  par.IOPattern |= upload ? MFX_IOPATTERN_IN_SYSTEM_MEMORY :
1159  MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
1160  par.AsyncDepth = 1;
1161 
1162  par.vpp.In = frames_hwctx->surfaces[0].Info;
1163 
1164  /* Apparently VPP requires the frame rate to be set to some value, otherwise
1165  * init will fail (probably for the framerate conversion filter). Since we
1166  * are only doing data upload/download here, we just invent an arbitrary
1167  * value */
1168  par.vpp.In.FrameRateExtN = 25;
1169  par.vpp.In.FrameRateExtD = 1;
1170  par.vpp.Out = par.vpp.In;
1171 
1172  err = MFXVideoVPP_Init(*session, &par);
1173  if (err != MFX_ERR_NONE) {
1174  av_log(ctx, AV_LOG_VERBOSE, "Error opening the internal VPP session."
1175  "Surface upload/download will not be possible\n");
1176 
1177  ret = AVERROR_UNKNOWN;
1178  goto fail;
1179  }
1180 
1181  return 0;
1182 
1183 fail:
1184  if (*session)
1185  MFXClose(*session);
1186 
1187  *session = NULL;
1188 
1189  return ret;
1190 }
1191 
1193 {
1194  QSVFramesContext *s = ctx->internal->priv;
1195  AVQSVFramesContext *frames_hwctx = ctx->hwctx;
1196 
1197  int opaque = 0;
1198 
1199  uint32_t fourcc;
1200  int i, ret;
1201 
1202 #if QSV_HAVE_OPAQUE
1203  opaque = !!(frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
1204 #endif
1205 
1206  fourcc = qsv_fourcc_from_pix_fmt(ctx->sw_format);
1207  if (!fourcc) {
1208  av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format\n");
1209  return AVERROR(ENOSYS);
1210  }
1211 
1212  if (!ctx->pool) {
1214  if (ret < 0) {
1215  av_log(ctx, AV_LOG_ERROR, "Error creating an internal frame pool\n");
1216  return ret;
1217  }
1218  }
1219 
1220  if (!opaque) {
1221  s->mem_ids = av_calloc(frames_hwctx->nb_surfaces, sizeof(*s->mem_ids));
1222  if (!s->mem_ids)
1223  return AVERROR(ENOMEM);
1224 
1225  for (i = 0; i < frames_hwctx->nb_surfaces; i++)
1226  s->mem_ids[i] = frames_hwctx->surfaces[i].Data.MemId;
1227  }
1228 #if QSV_HAVE_OPAQUE
1229  else {
1230  s->surface_ptrs = av_calloc(frames_hwctx->nb_surfaces,
1231  sizeof(*s->surface_ptrs));
1232  if (!s->surface_ptrs)
1233  return AVERROR(ENOMEM);
1234 
1235  for (i = 0; i < frames_hwctx->nb_surfaces; i++)
1236  s->surface_ptrs[i] = frames_hwctx->surfaces + i;
1237 
1238  s->opaque_alloc.In.Surfaces = s->surface_ptrs;
1239  s->opaque_alloc.In.NumSurface = frames_hwctx->nb_surfaces;
1240  s->opaque_alloc.In.Type = frames_hwctx->frame_type;
1241 
1242  s->opaque_alloc.Out = s->opaque_alloc.In;
1243 
1244  s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
1245  s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
1246 
1247  s->ext_buffers[0] = (mfxExtBuffer*)&s->opaque_alloc;
1248  }
1249 #endif
1250 
1251  s->session_download = NULL;
1252  s->session_upload = NULL;
1253 
1254  s->session_download_init = 0;
1255  s->session_upload_init = 0;
1256 
1257 #if HAVE_PTHREADS
1258  pthread_mutex_init(&s->session_lock, NULL);
1259 #endif
1260 
1261  return 0;
1262 }
1263 
1265 {
1266  frame->buf[0] = av_buffer_pool_get(ctx->pool);
1267  if (!frame->buf[0])
1268  return AVERROR(ENOMEM);
1269 
1270  frame->data[3] = frame->buf[0]->data;
1271  frame->format = AV_PIX_FMT_QSV;
1272  frame->width = ctx->width;
1273  frame->height = ctx->height;
1274 
1275  return 0;
1276 }
1277 
1279  enum AVHWFrameTransferDirection dir,
1280  enum AVPixelFormat **formats)
1281 {
1282  enum AVPixelFormat *fmts;
1283 
1284  fmts = av_malloc_array(2, sizeof(*fmts));
1285  if (!fmts)
1286  return AVERROR(ENOMEM);
1287 
1288  fmts[0] = ctx->sw_format;
1289  fmts[1] = AV_PIX_FMT_NONE;
1290 
1291  *formats = fmts;
1292 
1293  return 0;
1294 }
1295 
1297  AVHWFramesContext *src_ctx, int flags)
1298 {
1299  AVQSVFramesContext *src_hwctx = src_ctx->hwctx;
1300  int i;
1301 
1302  switch (dst_ctx->device_ctx->type) {
1303 #if CONFIG_VAAPI
1305  {
1306  AVVAAPIFramesContext *dst_hwctx = dst_ctx->hwctx;
1307  dst_hwctx->surface_ids = av_calloc(src_hwctx->nb_surfaces,
1308  sizeof(*dst_hwctx->surface_ids));
1309  if (!dst_hwctx->surface_ids)
1310  return AVERROR(ENOMEM);
1311  for (i = 0; i < src_hwctx->nb_surfaces; i++) {
1312  mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
1313  dst_hwctx->surface_ids[i] = *(VASurfaceID*)pair->first;
1314  }
1315  dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
1316  }
1317  break;
1318 #endif
1319 #if CONFIG_D3D11VA
1321  {
1322  D3D11_TEXTURE2D_DESC texDesc;
1323  dst_ctx->initial_pool_size = src_ctx->initial_pool_size;
1324  AVD3D11VAFramesContext *dst_hwctx = dst_ctx->hwctx;
1325  dst_hwctx->texture_infos = av_calloc(src_hwctx->nb_surfaces,
1326  sizeof(*dst_hwctx->texture_infos));
1327  if (!dst_hwctx->texture_infos)
1328  return AVERROR(ENOMEM);
1329  if (src_hwctx->frame_type & MFX_MEMTYPE_SHARED_RESOURCE)
1330  dst_hwctx->MiscFlags = D3D11_RESOURCE_MISC_SHARED;
1331  for (i = 0; i < src_hwctx->nb_surfaces; i++) {
1332  mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
1333  dst_hwctx->texture_infos[i].texture = (ID3D11Texture2D*)pair->first;
1334  dst_hwctx->texture_infos[i].index = pair->second == (mfxMemId)MFX_INFINITE ? (intptr_t)0 : (intptr_t)pair->second;
1335  }
1336  ID3D11Texture2D_GetDesc(dst_hwctx->texture_infos[0].texture, &texDesc);
1337  dst_hwctx->BindFlags = texDesc.BindFlags;
1338  }
1339  break;
1340 #endif
1341 #if CONFIG_DXVA2
1343  {
1344  AVDXVA2FramesContext *dst_hwctx = dst_ctx->hwctx;
1345  dst_hwctx->surfaces = av_calloc(src_hwctx->nb_surfaces,
1346  sizeof(*dst_hwctx->surfaces));
1347  if (!dst_hwctx->surfaces)
1348  return AVERROR(ENOMEM);
1349  for (i = 0; i < src_hwctx->nb_surfaces; i++) {
1350  mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
1351  dst_hwctx->surfaces[i] = (IDirect3DSurface9*)pair->first;
1352  }
1353  dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
1354  if (src_hwctx->frame_type == MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
1355  dst_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget;
1356  else
1357  dst_hwctx->surface_type = DXVA2_VideoProcessorRenderTarget;
1358  }
1359  break;
1360 #endif
1361  default:
1362  return AVERROR(ENOSYS);
1363  }
1364 
1365  return 0;
1366 }
1367 
1369  AVFrame *dst, const AVFrame *src, int flags)
1370 {
1371  QSVFramesContext *s = ctx->internal->priv;
1372  mfxFrameSurface1 *surf = (mfxFrameSurface1*)src->data[3];
1373  AVHWFramesContext *child_frames_ctx;
1374  const AVPixFmtDescriptor *desc;
1375  uint8_t *child_data;
1376  AVFrame *dummy;
1377  int ret = 0;
1378 
1379  if (!s->child_frames_ref)
1380  return AVERROR(ENOSYS);
1381  child_frames_ctx = (AVHWFramesContext*)s->child_frames_ref->data;
1382 
1383  switch (child_frames_ctx->device_ctx->type) {
1384 #if CONFIG_VAAPI
1386  {
1387  mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
1388  /* pair->first is *VASurfaceID while data[3] in vaapi frame is VASurfaceID, so
1389  * we need this casting for vaapi.
1390  * Add intptr_t to force cast from VASurfaceID(uint) type to pointer(long) type
1391  * to avoid compile warning */
1392  child_data = (uint8_t*)(intptr_t)*(VASurfaceID*)pair->first;
1393  break;
1394  }
1395 #endif
1396 #if CONFIG_D3D11VA
1398  {
1399  mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
1400  child_data = pair->first;
1401  break;
1402  }
1403 #endif
1404 #if CONFIG_DXVA2
1406  {
1407  mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
1408  child_data = pair->first;
1409  break;
1410  }
1411 #endif
1412  default:
1413  return AVERROR(ENOSYS);
1414  }
1415 
1416  if (dst->format == child_frames_ctx->format) {
1417  ret = ff_hwframe_map_create(s->child_frames_ref,
1418  dst, src, NULL, NULL);
1419  if (ret < 0)
1420  return ret;
1421 
1422  dst->width = src->width;
1423  dst->height = src->height;
1424 
1425  if (child_frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
1426  mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
1427  dst->data[0] = pair->first;
1428  dst->data[1] = pair->second == (mfxMemId)MFX_INFINITE ? (uint8_t *)0 : pair->second;
1429  } else {
1430  dst->data[3] = child_data;
1431  }
1432 
1433  return 0;
1434  }
1435 
1437  if (desc && desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1438  // This only supports mapping to software.
1439  return AVERROR(ENOSYS);
1440  }
1441 
1442  dummy = av_frame_alloc();
1443  if (!dummy)
1444  return AVERROR(ENOMEM);
1445 
1446  dummy->buf[0] = av_buffer_ref(src->buf[0]);
1447  dummy->hw_frames_ctx = av_buffer_ref(s->child_frames_ref);
1448  if (!dummy->buf[0] || !dummy->hw_frames_ctx)
1449  goto fail;
1450 
1451  dummy->format = child_frames_ctx->format;
1452  dummy->width = src->width;
1453  dummy->height = src->height;
1454 
1455  if (child_frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
1456  mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
1457  dummy->data[0] = pair->first;
1458  dummy->data[1] = pair->second == (mfxMemId)MFX_INFINITE ? (uint8_t *)0 : pair->second;
1459  } else {
1460  dummy->data[3] = child_data;
1461  }
1462 
1463  ret = av_hwframe_map(dst, dummy, flags);
1464 
1465 fail:
1466  av_frame_free(&dummy);
1467 
1468  return ret;
1469 }
1470 
1472  const AVFrame *src)
1473 {
1474  QSVFramesContext *s = ctx->internal->priv;
1475  AVHWFramesContext *child_frames_ctx = (AVHWFramesContext*)s->child_frames_ref->data;
1476  int download = !!src->hw_frames_ctx;
1477  mfxFrameSurface1 *surf = (mfxFrameSurface1*)(download ? src->data[3] : dst->data[3]);
1478 
1479  AVFrame *dummy;
1480  int ret;
1481 
1482  dummy = av_frame_alloc();
1483  if (!dummy)
1484  return AVERROR(ENOMEM);
1485 
1486  dummy->format = child_frames_ctx->format;
1487  dummy->width = src->width;
1488  dummy->height = src->height;
1489  dummy->buf[0] = download ? src->buf[0] : dst->buf[0];
1490  dummy->data[3] = surf->Data.MemId;
1491  dummy->hw_frames_ctx = s->child_frames_ref;
1492 
1493  ret = download ? av_hwframe_transfer_data(dst, dummy, 0) :
1495 
1496  dummy->buf[0] = NULL;
1497  dummy->data[3] = NULL;
1498  dummy->hw_frames_ctx = NULL;
1499 
1500  av_frame_free(&dummy);
1501 
1502  return ret;
1503 }
1504 
1505 static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
1506 {
1507  switch (frame->format) {
1508  case AV_PIX_FMT_NV12:
1509  case AV_PIX_FMT_P010:
1510  case AV_PIX_FMT_P012:
1511  surface->Data.Y = frame->data[0];
1512  surface->Data.UV = frame->data[1];
1513  break;
1514 
1515  case AV_PIX_FMT_YUV420P:
1516  surface->Data.Y = frame->data[0];
1517  surface->Data.U = frame->data[1];
1518  surface->Data.V = frame->data[2];
1519  break;
1520 
1521  case AV_PIX_FMT_BGRA:
1522  surface->Data.B = frame->data[0];
1523  surface->Data.G = frame->data[0] + 1;
1524  surface->Data.R = frame->data[0] + 2;
1525  surface->Data.A = frame->data[0] + 3;
1526  break;
1527 #if CONFIG_VAAPI
1528  case AV_PIX_FMT_YUYV422:
1529  surface->Data.Y = frame->data[0];
1530  surface->Data.U = frame->data[0] + 1;
1531  surface->Data.V = frame->data[0] + 3;
1532  break;
1533 
1534  case AV_PIX_FMT_Y210:
1535  case AV_PIX_FMT_Y212:
1536  surface->Data.Y16 = (mfxU16 *)frame->data[0];
1537  surface->Data.U16 = (mfxU16 *)frame->data[0] + 1;
1538  surface->Data.V16 = (mfxU16 *)frame->data[0] + 3;
1539  break;
1540  case AV_PIX_FMT_VUYX:
1541  surface->Data.V = frame->data[0];
1542  surface->Data.U = frame->data[0] + 1;
1543  surface->Data.Y = frame->data[0] + 2;
1544  // Only set Data.A to a valid address, the SDK doesn't
1545  // use the value from the frame.
1546  surface->Data.A = frame->data[0] + 3;
1547  break;
1548  case AV_PIX_FMT_XV30:
1549  surface->Data.U = frame->data[0];
1550  break;
1551  case AV_PIX_FMT_XV36:
1552  surface->Data.U = frame->data[0];
1553  surface->Data.Y = frame->data[0] + 2;
1554  surface->Data.V = frame->data[0] + 4;
1555  // Only set Data.A to a valid address, the SDK doesn't
1556  // use the value from the frame.
1557  surface->Data.A = frame->data[0] + 6;
1558  break;
1559 #endif
1560  default:
1561  return MFX_ERR_UNSUPPORTED;
1562  }
1563  surface->Data.Pitch = frame->linesize[0];
1564  surface->Data.TimeStamp = frame->pts;
1565 
1566  return 0;
1567 }
1568 
1570 {
1571  QSVFramesContext *s = ctx->internal->priv;
1572  atomic_int *inited = upload ? &s->session_upload_init : &s->session_download_init;
1573  mfxSession *session = upload ? &s->session_upload : &s->session_download;
1574  int ret = 0;
1575 
1576  if (atomic_load(inited))
1577  return 0;
1578 
1579 #if HAVE_PTHREADS
1580  pthread_mutex_lock(&s->session_lock);
1581 #endif
1582 
1583  if (!atomic_load(inited)) {
1584  ret = qsv_init_internal_session(ctx, session, upload);
1585  atomic_store(inited, 1);
1586  }
1587 
1588 #if HAVE_PTHREADS
1589  pthread_mutex_unlock(&s->session_lock);
1590 #endif
1591 
1592  return ret;
1593 }
1594 
1596  const AVFrame *src)
1597 {
1598  QSVFramesContext *s = ctx->internal->priv;
1599  mfxFrameSurface1 out = {{ 0 }};
1600  mfxFrameSurface1 *in = (mfxFrameSurface1*)src->data[3];
1601 
1602  mfxSyncPoint sync = NULL;
1603  mfxStatus err;
1604  int ret = 0;
1605  /* download to temp frame if the output is not padded as libmfx requires */
1606  AVFrame *tmp_frame = &s->realigned_download_frame;
1607  AVFrame *dst_frame;
1608  int realigned = 0;
1609 
1611  if (ret < 0)
1612  return ret;
1613 
1614  /* According to MSDK spec for mfxframeinfo, "Width must be a multiple of 16.
1615  * Height must be a multiple of 16 for progressive frame sequence and a
1616  * multiple of 32 otherwise.", so allign all frames to 16 before downloading. */
1617  if (dst->height & 15 || dst->linesize[0] & 15) {
1618  realigned = 1;
1619  if (tmp_frame->format != dst->format ||
1620  tmp_frame->width != FFALIGN(dst->linesize[0], 16) ||
1621  tmp_frame->height != FFALIGN(dst->height, 16)) {
1622  av_frame_unref(tmp_frame);
1623 
1624  tmp_frame->format = dst->format;
1625  tmp_frame->width = FFALIGN(dst->linesize[0], 16);
1626  tmp_frame->height = FFALIGN(dst->height, 16);
1627  ret = av_frame_get_buffer(tmp_frame, 0);
1628  if (ret < 0)
1629  return ret;
1630  }
1631  }
1632 
1633  dst_frame = realigned ? tmp_frame : dst;
1634 
1635  if (!s->session_download) {
1636  if (s->child_frames_ref)
1637  return qsv_transfer_data_child(ctx, dst_frame, src);
1638 
1639  av_log(ctx, AV_LOG_ERROR, "Surface download not possible\n");
1640  return AVERROR(ENOSYS);
1641  }
1642 
1643  out.Info = in->Info;
1644  map_frame_to_surface(dst_frame, &out);
1645 
1646  do {
1647  err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
1648  if (err == MFX_WRN_DEVICE_BUSY)
1649  av_usleep(1);
1650  } while (err == MFX_WRN_DEVICE_BUSY);
1651 
1652  if (err < 0 || !sync) {
1653  av_log(ctx, AV_LOG_ERROR, "Error downloading the surface\n");
1654  return AVERROR_UNKNOWN;
1655  }
1656 
1657  do {
1658  err = MFXVideoCORE_SyncOperation(s->session_download, sync, 1000);
1659  } while (err == MFX_WRN_IN_EXECUTION);
1660  if (err < 0) {
1661  av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation: %d\n", err);
1662  return AVERROR_UNKNOWN;
1663  }
1664 
1665  if (realigned) {
1666  tmp_frame->width = dst->width;
1667  tmp_frame->height = dst->height;
1668  ret = av_frame_copy(dst, tmp_frame);
1669  tmp_frame->width = FFALIGN(dst->linesize[0], 16);
1670  tmp_frame->height = FFALIGN(dst->height, 16);
1671  if (ret < 0)
1672  return ret;
1673  }
1674 
1675  return 0;
1676 }
1677 
1679  const AVFrame *src)
1680 {
1681  QSVFramesContext *s = ctx->internal->priv;
1682  mfxFrameSurface1 in = {{ 0 }};
1683  mfxFrameSurface1 *out = (mfxFrameSurface1*)dst->data[3];
1684  mfxFrameInfo tmp_info;
1685 
1686  mfxSyncPoint sync = NULL;
1687  mfxStatus err;
1688  int ret = 0;
1689  /* make a copy if the input is not padded as libmfx requires */
1690  AVFrame *tmp_frame = &s->realigned_upload_frame;
1691  const AVFrame *src_frame;
1692  int realigned = 0;
1693 
1695  if (ret < 0)
1696  return ret;
1697 
1698  /* According to MSDK spec for mfxframeinfo, "Width must be a multiple of 16.
1699  * Height must be a multiple of 16 for progressive frame sequence and a
1700  * multiple of 32 otherwise.", so allign all frames to 16 before uploading. */
1701  if (src->height & 15 || src->linesize[0] & 15) {
1702  realigned = 1;
1703  if (tmp_frame->format != src->format ||
1704  tmp_frame->width != FFALIGN(src->width, 16) ||
1705  tmp_frame->height != FFALIGN(src->height, 16)) {
1706  av_frame_unref(tmp_frame);
1707 
1708  tmp_frame->format = src->format;
1709  tmp_frame->width = FFALIGN(src->width, 16);
1710  tmp_frame->height = FFALIGN(src->height, 16);
1711  ret = av_frame_get_buffer(tmp_frame, 0);
1712  if (ret < 0)
1713  return ret;
1714  }
1715  ret = av_frame_copy(tmp_frame, src);
1716  if (ret < 0) {
1717  av_frame_unref(tmp_frame);
1718  return ret;
1719  }
1720  ret = qsv_fill_border(tmp_frame, src);
1721  if (ret < 0) {
1722  av_frame_unref(tmp_frame);
1723  return ret;
1724  }
1725 
1726  tmp_info = out->Info;
1727  out->Info.CropW = FFMIN(out->Info.Width, tmp_frame->width);
1728  out->Info.CropH = FFMIN(out->Info.Height, tmp_frame->height);
1729  }
1730 
1731  src_frame = realigned ? tmp_frame : src;
1732 
1733  if (!s->session_upload) {
1734  if (s->child_frames_ref)
1735  return qsv_transfer_data_child(ctx, dst, src_frame);
1736 
1737  av_log(ctx, AV_LOG_ERROR, "Surface upload not possible\n");
1738  return AVERROR(ENOSYS);
1739  }
1740 
1741  in.Info = out->Info;
1742  map_frame_to_surface(src_frame, &in);
1743 
1744  do {
1745  err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
1746  if (err == MFX_WRN_DEVICE_BUSY)
1747  av_usleep(1);
1748  } while (err == MFX_WRN_DEVICE_BUSY);
1749 
1750  if (err < 0 || !sync) {
1751  av_log(ctx, AV_LOG_ERROR, "Error uploading the surface\n");
1752  return AVERROR_UNKNOWN;
1753  }
1754 
1755  do {
1756  err = MFXVideoCORE_SyncOperation(s->session_upload, sync, 1000);
1757  } while (err == MFX_WRN_IN_EXECUTION);
1758  if (err < 0) {
1759  av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation\n");
1760  return AVERROR_UNKNOWN;
1761  }
1762 
1763  if (realigned) {
1764  out->Info.CropW = tmp_info.CropW;
1765  out->Info.CropH = tmp_info.CropH;
1766  }
1767 
1768  return 0;
1769 }
1770 
1772  AVHWFramesContext *src_ctx, int flags)
1773 {
1774  QSVFramesContext *s = dst_ctx->internal->priv;
1775  AVQSVFramesContext *dst_hwctx = dst_ctx->hwctx;
1776  int i;
1777 
1778  if (src_ctx->initial_pool_size == 0) {
1779  av_log(dst_ctx, AV_LOG_ERROR, "Only fixed-size pools can be "
1780  "mapped to QSV frames.\n");
1781  return AVERROR(EINVAL);
1782  }
1783 
1784  switch (src_ctx->device_ctx->type) {
1785 #if CONFIG_VAAPI
1787  {
1788  AVVAAPIFramesContext *src_hwctx = src_ctx->hwctx;
1789  s->handle_pairs_internal = av_calloc(src_ctx->initial_pool_size,
1790  sizeof(*s->handle_pairs_internal));
1791  if (!s->handle_pairs_internal)
1792  return AVERROR(ENOMEM);
1793  s->surfaces_internal = av_calloc(src_hwctx->nb_surfaces,
1794  sizeof(*s->surfaces_internal));
1795  if (!s->surfaces_internal)
1796  return AVERROR(ENOMEM);
1797  for (i = 0; i < src_hwctx->nb_surfaces; i++) {
1798  qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
1799  s->handle_pairs_internal[i].first = src_hwctx->surface_ids + i;
1800  s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
1801  s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
1802  }
1803  dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
1804  dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
1805  }
1806  break;
1807 #endif
1808 #if CONFIG_D3D11VA
1810  {
1811  AVD3D11VAFramesContext *src_hwctx = src_ctx->hwctx;
1812  s->handle_pairs_internal = av_calloc(src_ctx->initial_pool_size,
1813  sizeof(*s->handle_pairs_internal));
1814  if (!s->handle_pairs_internal)
1815  return AVERROR(ENOMEM);
1816  s->surfaces_internal = av_calloc(src_ctx->initial_pool_size,
1817  sizeof(*s->surfaces_internal));
1818  if (!s->surfaces_internal)
1819  return AVERROR(ENOMEM);
1820  for (i = 0; i < src_ctx->initial_pool_size; i++) {
1821  qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
1822  s->handle_pairs_internal[i].first = (mfxMemId)src_hwctx->texture_infos[i].texture;
1823  if (src_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
1824  s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
1825  } else {
1826  s->handle_pairs_internal[i].second = (mfxMemId)src_hwctx->texture_infos[i].index;
1827  }
1828  s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
1829  }
1830  dst_hwctx->nb_surfaces = src_ctx->initial_pool_size;
1831  if (src_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
1832  dst_hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
1833  } else {
1834  dst_hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
1835  }
1836  }
1837  break;
1838 #endif
1839 #if CONFIG_DXVA2
1841  {
1842  AVDXVA2FramesContext *src_hwctx = src_ctx->hwctx;
1843  s->handle_pairs_internal = av_calloc(src_ctx->initial_pool_size,
1844  sizeof(*s->handle_pairs_internal));
1845  if (!s->handle_pairs_internal)
1846  return AVERROR(ENOMEM);
1847  s->surfaces_internal = av_calloc(src_hwctx->nb_surfaces,
1848  sizeof(*s->surfaces_internal));
1849  if (!s->surfaces_internal)
1850  return AVERROR(ENOMEM);
1851  for (i = 0; i < src_hwctx->nb_surfaces; i++) {
1852  qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
1853  s->handle_pairs_internal[i].first = (mfxMemId)src_hwctx->surfaces[i];
1854  s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
1855  s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
1856  }
1857  dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
1858  if (src_hwctx->surface_type == DXVA2_VideoProcessorRenderTarget)
1859  dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
1860  else
1861  dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
1862  }
1863  break;
1864 #endif
1865  default:
1866  return AVERROR(ENOSYS);
1867  }
1868 
1869  dst_hwctx->surfaces = s->surfaces_internal;
1870 
1871  return 0;
1872 }
1873 
1874 static int qsv_map_to(AVHWFramesContext *dst_ctx,
1875  AVFrame *dst, const AVFrame *src, int flags)
1876 {
1877  AVQSVFramesContext *hwctx = dst_ctx->hwctx;
1878  int i, err, index = -1;
1879 
1880  for (i = 0; i < hwctx->nb_surfaces && index < 0; i++) {
1881  switch(src->format) {
1882 #if CONFIG_VAAPI
1883  case AV_PIX_FMT_VAAPI:
1884  {
1885  mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
1886  if (*(VASurfaceID*)pair->first == (VASurfaceID)src->data[3]) {
1887  index = i;
1888  break;
1889  }
1890  }
1891 #endif
1892 #if CONFIG_D3D11VA
1893  case AV_PIX_FMT_D3D11:
1894  {
1895  mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
1896  if (pair->first == src->data[0]
1897  && (pair->second == src->data[1]
1898  || (pair->second == (mfxMemId)MFX_INFINITE && src->data[1] == (uint8_t *)0))) {
1899  index = i;
1900  break;
1901  }
1902  }
1903 #endif
1904 #if CONFIG_DXVA2
1905  case AV_PIX_FMT_DXVA2_VLD:
1906  {
1907  mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
1908  if (pair->first == src->data[3]) {
1909  index = i;
1910  break;
1911  }
1912  }
1913 #endif
1914  }
1915  }
1916  if (index < 0) {
1917  av_log(dst_ctx, AV_LOG_ERROR, "Trying to map from a surface which "
1918  "is not in the mapped frames context.\n");
1919  return AVERROR(EINVAL);
1920  }
1921 
1923  dst, src, NULL, NULL);
1924  if (err)
1925  return err;
1926 
1927  dst->width = src->width;
1928  dst->height = src->height;
1929  dst->data[3] = (uint8_t*)&hwctx->surfaces[index];
1930 
1931  return 0;
1932 }
1933 
1935  const void *hwconfig,
1936  AVHWFramesConstraints *constraints)
1937 {
1938  int i;
1939 
1941  sizeof(*constraints->valid_sw_formats));
1942  if (!constraints->valid_sw_formats)
1943  return AVERROR(ENOMEM);
1944 
1945  for (i = 0; i < FF_ARRAY_ELEMS(supported_pixel_formats); i++)
1946  constraints->valid_sw_formats[i] = supported_pixel_formats[i].pix_fmt;
1948 
1949  constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
1950  if (!constraints->valid_hw_formats)
1951  return AVERROR(ENOMEM);
1952 
1953  constraints->valid_hw_formats[0] = AV_PIX_FMT_QSV;
1954  constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
1955 
1956  return 0;
1957 }
1958 
1960 {
1961  AVQSVDeviceContext *hwctx = ctx->hwctx;
1962  QSVDevicePriv *priv = ctx->user_opaque;
1963 
1964  if (hwctx->session)
1965  MFXClose(hwctx->session);
1966 
1967  if (hwctx->loader)
1968  MFXUnload(hwctx->loader);
1970  av_freep(&priv);
1971 }
1972 
1973 static mfxIMPL choose_implementation(const char *device, enum AVHWDeviceType child_device_type)
1974 {
1975  static const struct {
1976  const char *name;
1977  mfxIMPL impl;
1978  } impl_map[] = {
1979  { "auto", MFX_IMPL_AUTO },
1980  { "sw", MFX_IMPL_SOFTWARE },
1981  { "hw", MFX_IMPL_HARDWARE },
1982  { "auto_any", MFX_IMPL_AUTO_ANY },
1983  { "hw_any", MFX_IMPL_HARDWARE_ANY },
1984  { "hw2", MFX_IMPL_HARDWARE2 },
1985  { "hw3", MFX_IMPL_HARDWARE3 },
1986  { "hw4", MFX_IMPL_HARDWARE4 },
1987  };
1988 
1989  mfxIMPL impl = MFX_IMPL_AUTO_ANY;
1990  int i;
1991 
1992  if (device) {
1993  for (i = 0; i < FF_ARRAY_ELEMS(impl_map); i++)
1994  if (!strcmp(device, impl_map[i].name)) {
1995  impl = impl_map[i].impl;
1996  break;
1997  }
1998  if (i == FF_ARRAY_ELEMS(impl_map))
1999  impl = strtol(device, NULL, 0);
2000  }
2001 
2002  if (impl != MFX_IMPL_SOFTWARE) {
2003  if (child_device_type == AV_HWDEVICE_TYPE_D3D11VA)
2004  impl |= MFX_IMPL_VIA_D3D11;
2005  else if (child_device_type == AV_HWDEVICE_TYPE_DXVA2)
2006  impl |= MFX_IMPL_VIA_D3D9;
2007  }
2008 
2009  return impl;
2010 }
2011 
2013  mfxIMPL implementation,
2014  AVHWDeviceContext *child_device_ctx,
2015  int flags)
2016 {
2017  AVQSVDeviceContext *hwctx = ctx->hwctx;
2018 
2019  mfxVersion ver = { { 3, 1 } };
2020  mfxHDL handle;
2021  mfxHandleType handle_type;
2022  mfxStatus err;
2023  int ret;
2024 
2025  switch (child_device_ctx->type) {
2026 #if CONFIG_VAAPI
2028  {
2029  AVVAAPIDeviceContext *child_device_hwctx = child_device_ctx->hwctx;
2030  handle_type = MFX_HANDLE_VA_DISPLAY;
2031  handle = (mfxHDL)child_device_hwctx->display;
2032  }
2033  break;
2034 #endif
2035 #if CONFIG_D3D11VA
2037  {
2038  AVD3D11VADeviceContext *child_device_hwctx = child_device_ctx->hwctx;
2039  handle_type = MFX_HANDLE_D3D11_DEVICE;
2040  handle = (mfxHDL)child_device_hwctx->device;
2041  }
2042  break;
2043 #endif
2044 #if CONFIG_DXVA2
2046  {
2047  AVDXVA2DeviceContext *child_device_hwctx = child_device_ctx->hwctx;
2048  handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
2049  handle = (mfxHDL)child_device_hwctx->devmgr;
2050  }
2051  break;
2052 #endif
2053  default:
2054  ret = AVERROR(ENOSYS);
2055  goto fail;
2056  }
2057 
2058  ret = qsv_create_mfx_session(ctx, handle, handle_type, implementation, &ver,
2059  &hwctx->session, &hwctx->loader);
2060  if (ret)
2061  goto fail;
2062 
2063  err = MFXVideoCORE_SetHandle(hwctx->session, handle_type, handle);
2064  if (err != MFX_ERR_NONE) {
2065  av_log(ctx, AV_LOG_ERROR, "Error setting child device handle: "
2066  "%d\n", err);
2067  ret = AVERROR_UNKNOWN;
2068  goto fail;
2069  }
2070 
2071  return 0;
2072 
2073 fail:
2074  if (hwctx->session)
2075  MFXClose(hwctx->session);
2076 
2077  if (hwctx->loader)
2078  MFXUnload(hwctx->loader);
2079 
2080  hwctx->session = NULL;
2081  hwctx->loader = NULL;
2082  return ret;
2083 }
2084 
2086  AVHWDeviceContext *child_device_ctx,
2087  AVDictionary *opts, int flags)
2088 {
2089  mfxIMPL impl;
2090  impl = choose_implementation("hw_any", child_device_ctx->type);
2091  return qsv_device_derive_from_child(ctx, impl,
2092  child_device_ctx, flags);
2093 }
2094 
2095 static int qsv_device_create(AVHWDeviceContext *ctx, const char *device,
2096  AVDictionary *opts, int flags)
2097 {
2098  QSVDevicePriv *priv;
2099  enum AVHWDeviceType child_device_type;
2100  AVHWDeviceContext *child_device;
2101  AVDictionary *child_device_opts;
2102  AVDictionaryEntry *e;
2103 
2104  mfxIMPL impl;
2105  int ret;
2106 
2107  priv = av_mallocz(sizeof(*priv));
2108  if (!priv)
2109  return AVERROR(ENOMEM);
2110 
2111  ctx->user_opaque = priv;
2112  ctx->free = qsv_device_free;
2113 
2114  e = av_dict_get(opts, "child_device_type", NULL, 0);
2115  if (e) {
2116  child_device_type = av_hwdevice_find_type_by_name(e->value);
2117  if (child_device_type == AV_HWDEVICE_TYPE_NONE) {
2118  av_log(ctx, AV_LOG_ERROR, "Unknown child device type "
2119  "\"%s\".\n", e->value);
2120  return AVERROR(EINVAL);
2121  }
2122  } else if (CONFIG_VAAPI) {
2123  child_device_type = AV_HWDEVICE_TYPE_VAAPI;
2124 #if QSV_ONEVPL
2125  } else if (CONFIG_D3D11VA) { // Use D3D11 by default if d3d11va is enabled
2127  "Defaulting child_device_type to AV_HWDEVICE_TYPE_D3D11VA for oneVPL."
2128  "Please explicitly set child device type via \"-init_hw_device\" "
2129  "option if needed.\n");
2130  child_device_type = AV_HWDEVICE_TYPE_D3D11VA;
2131  } else if (CONFIG_DXVA2) {
2132  child_device_type = AV_HWDEVICE_TYPE_DXVA2;
2133 #else
2134  } else if (CONFIG_DXVA2) {
2136  "WARNING: defaulting child_device_type to AV_HWDEVICE_TYPE_DXVA2 for compatibility "
2137  "with old commandlines. This behaviour will be removed "
2138  "in the future. Please explicitly set device type via \"-init_hw_device\" option.\n");
2139  child_device_type = AV_HWDEVICE_TYPE_DXVA2;
2140  } else if (CONFIG_D3D11VA) {
2141  child_device_type = AV_HWDEVICE_TYPE_D3D11VA;
2142 #endif
2143  } else {
2144  av_log(ctx, AV_LOG_ERROR, "No supported child device type is enabled\n");
2145  return AVERROR(ENOSYS);
2146  }
2147 
2148  child_device_opts = NULL;
2149  switch (child_device_type) {
2150 #if CONFIG_VAAPI
2152  {
2153  // libmfx does not actually implement VAAPI properly, rather it
2154  // depends on the specific behaviour of a matching iHD driver when
2155  // used on recent Intel hardware. Set options to the VAAPI device
2156  // creation so that we should pick a usable setup by default if
2157  // possible, even when multiple devices and drivers are available.
2158  av_dict_set(&child_device_opts, "kernel_driver", "i915", 0);
2159  av_dict_set(&child_device_opts, "driver", "iHD", 0);
2160  }
2161  break;
2162 #endif
2163 #if CONFIG_D3D11VA
2165  break;
2166 #endif
2167 #if CONFIG_DXVA2
2169 #if QSV_ONEVPL
2170  {
2172  "d3d11va is not available or child device type is set to dxva2 "
2173  "explicitly for oneVPL.\n");
2174  }
2175 #endif
2176  break;
2177 #endif
2178  default:
2179  {
2180  av_log(ctx, AV_LOG_ERROR, "No supported child device type is enabled\n");
2181  return AVERROR(ENOSYS);
2182  }
2183  break;
2184  }
2185 
2186  e = av_dict_get(opts, "child_device", NULL, 0);
2187  ret = av_hwdevice_ctx_create(&priv->child_device_ctx, child_device_type,
2188  e ? e->value : NULL, child_device_opts, 0);
2189 
2190  av_dict_free(&child_device_opts);
2191  if (ret < 0)
2192  return ret;
2193 
2194  child_device = (AVHWDeviceContext*)priv->child_device_ctx->data;
2195 
2196  impl = choose_implementation(device, child_device_type);
2197 
2198  return qsv_device_derive_from_child(ctx, impl, child_device, 0);
2199 }
2200 
2203  .name = "QSV",
2204 
2205  .device_hwctx_size = sizeof(AVQSVDeviceContext),
2206  .device_priv_size = sizeof(QSVDeviceContext),
2207  .frames_hwctx_size = sizeof(AVQSVFramesContext),
2208  .frames_priv_size = sizeof(QSVFramesContext),
2209 
2210  .device_create = qsv_device_create,
2211  .device_derive = qsv_device_derive,
2212  .device_init = qsv_device_init,
2213  .frames_get_constraints = qsv_frames_get_constraints,
2214  .frames_init = qsv_frames_init,
2215  .frames_uninit = qsv_frames_uninit,
2216  .frames_get_buffer = qsv_get_buffer,
2217  .transfer_get_formats = qsv_transfer_get_formats,
2218  .transfer_data_to = qsv_transfer_data_to,
2219  .transfer_data_from = qsv_transfer_data_from,
2220  .map_to = qsv_map_to,
2221  .map_from = qsv_map_from,
2222  .frames_derive_to = qsv_frames_derive_to,
2223  .frames_derive_from = qsv_frames_derive_from,
2224 
2225  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_QSV, AV_PIX_FMT_NONE },
2226 };
formats
formats
Definition: signature.h:48
pthread_mutex_t
_fmutex pthread_mutex_t
Definition: os2threads.h:53
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:92
qsv_transfer_data_child
static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src)
Definition: hwcontext_qsv.c:1471
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:60
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:259
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:80
QSVFramesContext::child_frames_ref
AVBufferRef * child_frames_ref
Definition: hwcontext_qsv.c:91
qsv_transfer_data_to
static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src)
Definition: hwcontext_qsv.c:1678
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2858
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
qsv_map_from
static int qsv_map_from(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src, int flags)
Definition: hwcontext_qsv.c:1368
qsv_fourcc_from_pix_fmt
static uint32_t qsv_fourcc_from_pix_fmt(enum AVPixelFormat pix_fmt)
Definition: hwcontext_qsv.c:182
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
qsv_fill_border
static int qsv_fill_border(AVFrame *dst, const AVFrame *src)
Definition: hwcontext_qsv.c:219
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
QSVDeviceContext::ver
mfxVersion ver
Definition: hwcontext_qsv.c:75
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
AVFrame::width
int width
Definition: frame.h:397
AVQSVDeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_qsv.h:35
AVD3D11VAFramesContext::MiscFlags
UINT MiscFlags
D3D11_TEXTURE2D_DESC.MiscFlags used for texture creation.
Definition: hwcontext_d3d11va.h:166
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
qsv_device_derive
static int qsv_device_derive(AVHWDeviceContext *ctx, AVHWDeviceContext *child_device_ctx, AVDictionary *opts, int flags)
Definition: hwcontext_qsv.c:2085
AVDXVA2FramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_dxva2.h:46
qsv_frames_derive_from
static int qsv_frames_derive_from(AVHWFramesContext *dst_ctx, AVHWFramesContext *src_ctx, int flags)
Definition: hwcontext_qsv.c:1296
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
av_hwframe_map
int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags)
Map a hardware frame.
Definition: hwcontext.c:794
qsv_init_surface
static int qsv_init_surface(AVHWFramesContext *ctx, mfxFrameSurface1 *surf)
Definition: hwcontext_qsv.c:520
data
const char data[16]
Definition: mxf.c:146
supported_pixel_formats
static const struct @328 supported_pixel_formats[]
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
choose_implementation
static mfxIMPL choose_implementation(const char *device, enum AVHWDeviceType child_device_type)
Definition: hwcontext_qsv.c:1973
QSVDeviceContext
Definition: hwcontext_qsv.c:72
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:83
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:514
AVDXVA2DeviceContext::devmgr
IDirect3DDeviceManager9 * devmgr
Definition: hwcontext_dxva2.h:40
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AVVAAPIDeviceContext::display
VADisplay display
The VADisplay handle, to be filled by the user.
Definition: hwcontext_vaapi.h:72
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AVHWFramesContext::internal
AVHWFramesInternal * internal
Private data used internally by libavutil.
Definition: hwcontext.h:134
AVDictionary
Definition: dict.c:32
ff_hwframe_map_create
int ff_hwframe_map_create(AVBufferRef *hwframe_ref, AVFrame *dst, const AVFrame *src, void(*unmap)(AVHWFramesContext *ctx, HWMapDescriptor *hwmap), void *priv)
Definition: hwcontext.c:742
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesConstraints::valid_hw_formats
enum AVPixelFormat * valid_hw_formats
A list of possible values for format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE.
Definition: hwcontext.h:458
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
fourcc
uint32_t fourcc
Definition: hwcontext_qsv.c:111
av_hwdevice_ctx_init
int av_hwdevice_ctx_init(AVBufferRef *ref)
Finalize the device context before use.
Definition: hwcontext.c:201
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:525
QSVDeviceContext::handle_type
mfxHandleType handle_type
Definition: hwcontext_qsv.c:74
qsv_transfer_data_from
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src)
Definition: hwcontext_qsv.c:1595
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
AVHWFramesConstraints
This struct describes the constraints on hardware frames attached to a given device with a hardware-s...
Definition: hwcontext.h:453
QSVDevicePriv
Definition: hwcontext_qsv.c:68
AVD3D11VAFramesContext::BindFlags
UINT BindFlags
D3D11_TEXTURE2D_DESC.BindFlags used for texture creation.
Definition: hwcontext_d3d11va.h:160
AVVAAPIFramesContext::surface_ids
VASurfaceID * surface_ids
The surfaces IDs of all surfaces in the pool after creation.
Definition: hwcontext_vaapi.h:101
AVHWFramesInternal::priv
void * priv
Definition: hwcontext_internal.h:116
AVD3D11FrameDescriptor::texture
ID3D11Texture2D * texture
The texture in which the frame is located.
Definition: hwcontext_d3d11va.h:117
QSVDeviceContext::child_device_type
enum AVHWDeviceType child_device_type
Definition: hwcontext_qsv.c:78
qsv_init_child_ctx
static int qsv_init_child_ctx(AVHWFramesContext *ctx)
Definition: hwcontext_qsv.c:375
fail
#define fail()
Definition: checkasm.h:134
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
dummy
int dummy
Definition: motion.c:65
AV_HWDEVICE_TYPE_D3D11VA
@ AV_HWDEVICE_TYPE_D3D11VA
Definition: hwcontext.h:35
qsv_frames_get_constraints
static int qsv_frames_get_constraints(AVHWDeviceContext *ctx, const void *hwconfig, AVHWFramesConstraints *constraints)
Definition: hwcontext_qsv.c:1934
av_buffer_pool_init2
AVBufferPool * av_buffer_pool_init2(size_t size, void *opaque, AVBufferRef *(*alloc)(void *opaque, size_t size), void(*pool_free)(void *opaque))
Allocate and initialize a buffer pool with a more complex allocator.
Definition: buffer.c:259
QSVFramesContext::session_download_init
atomic_int session_download_init
Definition: hwcontext_qsv.c:84
qsv_frames_derive_to
static int qsv_frames_derive_to(AVHWFramesContext *dst_ctx, AVHWFramesContext *src_ctx, int flags)
Definition: hwcontext_qsv.c:1771
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
frame_free
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
Definition: hwcontext_qsv.c:635
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:512
avassert.h
HWContextType::type
enum AVHWDeviceType type
Definition: hwcontext_internal.h:30
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
av_hwdevice_ctx_alloc
AVBufferRef * av_hwdevice_ctx_alloc(enum AVHWDeviceType type)
Allocate an AVHWDeviceContext for a given hardware type.
Definition: hwcontext.c:143
AVHWFramesConstraints::valid_sw_formats
enum AVPixelFormat * valid_sw_formats
A list of possible values for sw_format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE.
Definition: hwcontext.h:465
QSVFramesContext::ext_buffers
mfxExtBuffer * ext_buffers[1]
Definition: hwcontext_qsv.c:103
QSVFramesContext::session_upload_init
atomic_int session_upload_init
Definition: hwcontext_qsv.c:86
frame_alloc
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req, mfxFrameAllocResponse *resp)
Definition: hwcontext_qsv.c:607
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
av_memcpy_backptr
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
Definition: mem.c:455
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
QSVDevicePriv::child_device_ctx
AVBufferRef * child_device_ctx
Definition: hwcontext_qsv.c:69
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:256
AVD3D11VADeviceContext::device
ID3D11Device * device
Device used for texture creation and access.
Definition: hwcontext_d3d11va.h:56
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
QSVDeviceContext::handle
mfxHDL handle
Definition: hwcontext_qsv.c:73
QSVFramesContext::mem_ids
mfxMemId * mem_ids
Definition: hwcontext_qsv.c:97
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVDXVA2FramesContext::nb_surfaces
int nb_surfaces
Definition: hwcontext_dxva2.h:59
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
MFXUnload
#define MFXUnload(a)
Definition: hwcontext_qsv.c:65
if
if(ret)
Definition: filter_design.txt:179
ff_hwcontext_type_qsv
const HWContextType ff_hwcontext_type_qsv
Definition: hwcontext_qsv.c:2201
qsv_create_mfx_session
static int qsv_create_mfx_session(void *ctx, mfxHDL handle, mfxHandleType handle_type, mfxIMPL implementation, mfxVersion *pver, mfxSession *psession, void **ploader)
Definition: hwcontext_qsv.c:1034
opts
AVDictionary * opts
Definition: movenc.c:50
AVD3D11VAFramesContext::texture_infos
AVD3D11FrameDescriptor * texture_infos
In case if texture structure member above is not NULL contains the same texture pointer for all eleme...
Definition: hwcontext_d3d11va.h:175
AVQSVFramesContext::surfaces
mfxFrameSurface1 * surfaces
Definition: hwcontext_qsv.h:54
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
qsv_frames_uninit
static void qsv_frames_uninit(AVHWFramesContext *ctx)
Definition: hwcontext_qsv.c:323
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
AV_HWDEVICE_TYPE_DXVA2
@ AV_HWDEVICE_TYPE_DXVA2
Definition: hwcontext.h:32
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
MFX_IMPL_VIA_MASK
#define MFX_IMPL_VIA_MASK(impl)
Definition: hwcontext_qsv.c:58
qsv_internal_session_check_init
static int qsv_internal_session_check_init(AVHWFramesContext *ctx, int upload)
Definition: hwcontext_qsv.c:1569
qsv_frames_init
static int qsv_frames_init(AVHWFramesContext *ctx)
Definition: hwcontext_qsv.c:1192
time.h
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:240
map_frame_to_surface
static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
Definition: hwcontext_qsv.c:1505
frame_unlock
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
Definition: hwcontext_qsv.c:645
index
int index
Definition: gxfenc.c:89
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:78
AVD3D11VAFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_d3d11va.h:131
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
QSVFramesContext::realigned_upload_frame
AVFrame realigned_upload_frame
Definition: hwcontext_qsv.c:105
qsv_init_internal_session
static int qsv_init_internal_session(AVHWFramesContext *ctx, mfxSession *session, int upload)
Definition: hwcontext_qsv.c:1093
hwcontext_dxva2.h
QSVFramesContext::opaque_alloc
mfxExtOpaqueSurfaceAlloc opaque_alloc
Definition: hwcontext_qsv.c:102
qsv_get_buffer
static int qsv_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
Definition: hwcontext_qsv.c:1264
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:509
AVDXVA2FramesContext::surface_type
DWORD surface_type
The surface type (e.g.
Definition: hwcontext_dxva2.h:51
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:771
size
int size
Definition: twinvq_data.h:10344
QSVFramesContext::nb_surfaces_used
int nb_surfaces_used
Definition: hwcontext_qsv.c:94
qsv_device_free
static void qsv_device_free(AVHWDeviceContext *ctx)
Definition: hwcontext_qsv.c:1959
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:412
ff_qsv_get_surface_base_handle
int ff_qsv_get_surface_base_handle(mfxFrameSurface1 *surf, enum AVHWDeviceType base_dev_type, void **base_handle)
Caller needs to allocate enough space for base_handle pointer.
Definition: hwcontext_qsv.c:155
qsv_transfer_get_formats
static int qsv_transfer_get_formats(AVHWFramesContext *ctx, enum AVHWFrameTransferDirection dir, enum AVPixelFormat **formats)
Definition: hwcontext_qsv.c:1278
buffer.h
AV_PIX_FMT_Y212
#define AV_PIX_FMT_Y212
Definition: pixfmt.h:513
qsv_device_derive_from_child
static int qsv_device_derive_from_child(AVHWDeviceContext *ctx, mfxIMPL implementation, AVHWDeviceContext *child_device_ctx, int flags)
Definition: hwcontext_qsv.c:2012
AVQSVDeviceContext::loader
void * loader
The mfxLoader handle used for mfxSession creation.
Definition: hwcontext_qsv.h:47
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
AVQSVFramesContext::nb_surfaces
int nb_surfaces
Definition: hwcontext_qsv.h:55
frame_get_hdl
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
Definition: hwcontext_qsv.c:650
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
AV_HWDEVICE_TYPE_VAAPI
@ AV_HWDEVICE_TYPE_VAAPI
Definition: hwcontext.h:31
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
av_image_get_linesize
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
Definition: imgutils.c:76
hwcontext_qsv.h
qsv_device_init
static int qsv_device_init(AVHWDeviceContext *ctx)
Definition: hwcontext_qsv.c:267
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVDXVA2DeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_dxva2.h:39
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
AVD3D11VADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_d3d11va.h:45
QSVFramesContext::handle_pairs_internal
mfxHDLPair * handle_pairs_internal
Definition: hwcontext_qsv.c:93
AVD3D11FrameDescriptor::index
intptr_t index
The index into the array texture element representing the frame, or 0 if the texture is not an array ...
Definition: hwcontext_d3d11va.h:125
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
QSVFramesContext::surface_ptrs
mfxFrameSurface1 ** surface_ptrs
Definition: hwcontext_qsv.c:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
QSVFramesContext::session_download
mfxSession session_download
Definition: hwcontext_qsv.c:83
AVDXVA2FramesContext::surfaces
IDirect3DSurface9 ** surfaces
The surface pool.
Definition: hwcontext_dxva2.h:58
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
AVHWFrameTransferDirection
AVHWFrameTransferDirection
Definition: hwcontext.h:415
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
hwcontext_vaapi.h
qsv_map_to
static int qsv_map_to(AVHWFramesContext *dst_ctx, AVFrame *dst, const AVFrame *src, int flags)
Definition: hwcontext_qsv.c:1874
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
pix_fmt
enum AVPixelFormat pix_fmt
Definition: hwcontext_qsv.c:110
AVHWDeviceContext::type
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:79
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:149
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:615
QSVDeviceContext::impl
mfxIMPL impl
Definition: hwcontext_qsv.c:76
QSVFramesContext::realigned_download_frame
AVFrame realigned_download_frame
Definition: hwcontext_qsv.c:106
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:448
frame_lock
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
Definition: hwcontext_qsv.c:640
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:664
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
qsv_pool_release_dummy
static void qsv_pool_release_dummy(void *opaque, uint8_t *data)
Definition: hwcontext_qsv.c:356
AVFrame::height
int height
Definition: frame.h:397
QSVDeviceContext::child_pix_fmt
enum AVPixelFormat child_pix_fmt
Definition: hwcontext_qsv.c:79
AVVAAPIFramesContext::nb_surfaces
int nb_surfaces
Definition: hwcontext_vaapi.h:102
AVQSVDeviceContext::session
mfxSession session
Definition: hwcontext_qsv.h:36
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
QSVFramesContext::session_upload
mfxSession session_upload
Definition: hwcontext_qsv.c:85
qsv_device_create
static int qsv_device_create(AVHWDeviceContext *ctx, const char *device, AVDictionary *opts, int flags)
Definition: hwcontext_qsv.c:2095
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:508
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:53
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
desc
const char * desc
Definition: libsvtav1.c:83
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
mfx_shift
uint16_t mfx_shift
Definition: hwcontext_qsv.c:112
qsv_shift_from_pix_fmt
static uint16_t qsv_shift_from_pix_fmt(enum AVPixelFormat pix_fmt)
Definition: hwcontext_qsv.c:192
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
hwcontext_internal.h
AVVAAPIFramesContext
VAAPI-specific data associated with a frame pool.
Definition: hwcontext_vaapi.h:88
QSVFramesContext::surfaces_internal
mfxFrameSurface1 * surfaces_internal
Definition: hwcontext_qsv.c:92
AVDictionaryEntry
Definition: dict.h:89
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVFramesContext
Definition: qsv_internal.h:109
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
imgutils.h
AV_PIX_FMT_XV36
#define AV_PIX_FMT_XV36
Definition: pixfmt.h:515
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
HWContextType
Definition: hwcontext_internal.h:29
qsv_pool_alloc
static AVBufferRef * qsv_pool_alloc(void *opaque, size_t size)
Definition: hwcontext_qsv.c:360
ID3D11Device
void ID3D11Device
Definition: nvenc.h:28
AVVAAPIDeviceContext
VAAPI connection details.
Definition: hwcontext_vaapi.h:68
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:403
hwcontext_d3d11va.h
qsv_init_pool
static int qsv_init_pool(AVHWFramesContext *ctx, uint32_t fourcc)
Definition: hwcontext_qsv.c:556
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:74