FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <sys/types.h>
29 
30 #include <mfxvideo.h>
31 
32 #include "libavutil/common.h"
33 #include "libavutil/fifo.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
37 #include "libavutil/mem.h"
38 #include "libavutil/log.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/pixfmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/imgutils.h"
45 #include "libavutil/avassert.h"
46 
47 #include "avcodec.h"
48 #include "codec_internal.h"
49 #include "internal.h"
50 #include "decode.h"
51 #include "hwconfig.h"
52 #include "qsv.h"
53 #include "qsv_internal.h"
54 #include "libavutil/refstruct.h"
55 
56 #if QSV_ONEVPL
57 #include <mfxdispatcher.h>
58 #else
59 #define MFXUnload(a) do { } while(0)
60 #endif
61 
62 static const AVRational mfx_tb = { 1, 90000 };
63 
64 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
65  MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
66  av_rescale_q(pts, pts_tb, mfx_tb) : pts)
67 
68 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
69  AV_NOPTS_VALUE : pts_tb.num ? \
70  av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
71 
72 #define MFX_IMPL_VIA_MASK(impl) (0x0f00 & (impl))
73 
74 typedef struct QSVAsyncFrame {
75  mfxSyncPoint *sync;
78 
79 typedef struct QSVContext {
80  // the session used for decoding
81  mfxSession session;
82  mfxVersion ver;
83  mfxHandleType handle_type;
84 
85  // the session we allocated internally, in case the caller did not provide
86  // one
88 
90 
91  /**
92  * a linked list of frames currently being used by QSV
93  */
95 
99 
101  uint32_t fourcc;
102  mfxFrameInfo frame_info;
106 
107  // options set by the caller
110  int gpu_copy;
111 
113 
114  mfxExtBuffer **ext_buffers;
116 } QSVContext;
117 
118 static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
119  &(const AVCodecHWConfigInternal) {
120  .public = {
124  .device_type = AV_HWDEVICE_TYPE_QSV,
125  },
126  .hwaccel = NULL,
127  },
128  NULL
129 };
130 
132  AVBufferPool *pool)
133 {
134  int ret = 0;
135 
136  ret = ff_decode_frame_props(avctx, frame);
137  if (ret < 0)
138  return ret;
139 
140  frame->width = avctx->coded_width;
141  frame->height = avctx->coded_height;
142 
143  switch (avctx->pix_fmt) {
144  case AV_PIX_FMT_NV12:
145  frame->linesize[0] = FFALIGN(avctx->coded_width, 128);
146  break;
147  case AV_PIX_FMT_P010:
148  case AV_PIX_FMT_P012:
149  case AV_PIX_FMT_YUYV422:
150  frame->linesize[0] = 2 * FFALIGN(avctx->coded_width, 128);
151  break;
152  case AV_PIX_FMT_Y210:
153  case AV_PIX_FMT_VUYX:
154  case AV_PIX_FMT_XV30:
155  case AV_PIX_FMT_Y212:
156  frame->linesize[0] = 4 * FFALIGN(avctx->coded_width, 128);
157  break;
158  case AV_PIX_FMT_XV36:
159  frame->linesize[0] = 8 * FFALIGN(avctx->coded_width, 128);
160  break;
161  default:
162  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
163  return AVERROR(EINVAL);
164  }
165 
166  frame->buf[0] = av_buffer_pool_get(pool);
167  if (!frame->buf[0])
168  return AVERROR(ENOMEM);
169 
170  frame->data[0] = frame->buf[0]->data;
171  if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
172  avctx->pix_fmt == AV_PIX_FMT_P010 ||
173  avctx->pix_fmt == AV_PIX_FMT_P012) {
174  frame->linesize[1] = frame->linesize[0];
175  frame->data[1] = frame->data[0] +
176  frame->linesize[0] * FFALIGN(avctx->coded_height, 64);
177  }
178 
180  if (ret < 0)
181  return ret;
182 
183  return 0;
184 }
185 
186 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
187  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
188 {
189  int ret;
190  mfxIMPL impl;
191 
192  if (q->gpu_copy == MFX_GPUCOPY_ON &&
193  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
194  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
195  "only works in system memory mode.\n");
196  q->gpu_copy = MFX_GPUCOPY_OFF;
197  }
198  if (session) {
199  q->session = session;
200  } else if (hw_frames_ref) {
201  if (q->internal_qs.session) {
202  MFXClose(q->internal_qs.session);
203  q->internal_qs.session = NULL;
204  }
207 
208  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
209  if (!q->frames_ctx.hw_frames_ctx)
210  return AVERROR(ENOMEM);
211 
213  &q->frames_ctx, q->load_plugins,
214 #if QSV_HAVE_OPAQUE
215  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
216 #else
217  0,
218 #endif
219  q->gpu_copy);
220  if (ret < 0) {
222  return ret;
223  }
224 
225  q->session = q->internal_qs.session;
226  } else if (hw_device_ref) {
227  if (q->internal_qs.session) {
228  MFXClose(q->internal_qs.session);
229  q->internal_qs.session = NULL;
230  }
231 
233  hw_device_ref, q->load_plugins, q->gpu_copy);
234  if (ret < 0)
235  return ret;
236 
237  q->session = q->internal_qs.session;
238  } else {
239  if (!q->internal_qs.session) {
241  q->load_plugins, q->gpu_copy);
242  if (ret < 0)
243  return ret;
244  }
245 
246  q->session = q->internal_qs.session;
247  }
248 
249  if (MFXQueryIMPL(q->session, &impl) == MFX_ERR_NONE) {
250  switch (MFX_IMPL_VIA_MASK(impl)) {
251  case MFX_IMPL_VIA_VAAPI:
252  q->handle_type = MFX_HANDLE_VA_DISPLAY;
253  break;
254 
255  case MFX_IMPL_VIA_D3D11:
256  q->handle_type = MFX_HANDLE_D3D11_DEVICE;
257  break;
258 
259  case MFX_IMPL_VIA_D3D9:
260  q->handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
261  break;
262 
263  default:
264  av_assert0(!"should not reach here");
265  }
266  } else {
267  av_log(avctx, AV_LOG_ERROR, "Error querying the implementation. \n");
268  goto fail;
269  }
270 
271  if (MFXQueryVersion(q->session, &q->ver) != MFX_ERR_NONE) {
272  av_log(avctx, AV_LOG_ERROR, "Error querying the session version. \n");
273  goto fail;
274  }
275 
276  /* make sure the decoder is uninitialized */
277  MFXVideoDECODE_Close(q->session);
278 
279  return 0;
280 
281 fail:
282  q->session = NULL;
283 
284  if (q->internal_qs.session) {
285  MFXClose(q->internal_qs.session);
286  q->internal_qs.session = NULL;
287  }
288 
289  if (q->internal_qs.loader) {
291  q->internal_qs.loader = NULL;
292  }
293 
294  return AVERROR_EXTERNAL;
295 }
296 
297 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
298 {
299  mfxSession session = NULL;
300  int iopattern = 0;
301  int ret;
302  enum AVPixelFormat pix_fmts[3] = {
303  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
304  pix_fmt, /* system memory format obtained from bitstream parser */
305  AV_PIX_FMT_NONE };
306 
307  ret = ff_get_format(avctx, pix_fmts);
308  if (ret < 0) {
309  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
310  return ret;
311  }
312 
313  if (!q->async_fifo) {
314  q->async_fifo = av_fifo_alloc2(q->async_depth, sizeof(QSVAsyncFrame), 0);
315  if (!q->async_fifo)
316  return AVERROR(ENOMEM);
317  }
318 
319  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
320  AVQSVContext *user_ctx = avctx->hwaccel_context;
321  session = user_ctx->session;
322  iopattern = user_ctx->iopattern;
323  q->ext_buffers = user_ctx->ext_buffers;
324  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
325  }
326 
327  if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_QSV) {
328  AVHWFramesContext *hwframes_ctx;
329  AVQSVFramesContext *frames_hwctx;
330 
332 
333  if (!avctx->hw_frames_ctx) {
334  av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
335  return AVERROR(ENOMEM);
336  }
337 
338  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
339  frames_hwctx = hwframes_ctx->hwctx;
340  hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
341  hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
342  hwframes_ctx->format = AV_PIX_FMT_QSV;
343  hwframes_ctx->sw_format = avctx->sw_pix_fmt;
344  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 2, 9) && q->handle_type != MFX_HANDLE_D3D9_DEVICE_MANAGER)
345  hwframes_ctx->initial_pool_size = 0;
346  else
347  hwframes_ctx->initial_pool_size = q->suggest_pool_size + 16 + avctx->extra_hw_frames;
348  frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
349 
351 
352  if (ret < 0) {
353  av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
355  return ret;
356  }
357  }
358 
359  if (avctx->hw_frames_ctx) {
360  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
361  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
362 
363  if (!iopattern) {
364 #if QSV_HAVE_OPAQUE
365  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
366  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
367  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
368  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
369 #else
370  if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
371  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
372 #endif
373  }
374  }
375 
376  if (!iopattern)
377  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
378  q->iopattern = iopattern;
379 
380  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
381 
382  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
383  if (ret < 0) {
384  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
385  return ret;
386  }
387 
388  param->IOPattern = q->iopattern;
389  param->AsyncDepth = q->async_depth;
390  param->ExtParam = q->ext_buffers;
391  param->NumExtParam = q->nb_ext_buffers;
392 
393  return 0;
394  }
395 
396 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
397 {
398  int ret;
399 
400  avctx->width = param->mfx.FrameInfo.CropW;
401  avctx->height = param->mfx.FrameInfo.CropH;
402  avctx->coded_width = param->mfx.FrameInfo.Width;
403  avctx->coded_height = param->mfx.FrameInfo.Height;
404  avctx->level = param->mfx.CodecLevel;
405  avctx->profile = param->mfx.CodecProfile;
406  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
407  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
408 
409  ret = MFXVideoDECODE_Init(q->session, param);
410  if (ret < 0)
411  return ff_qsv_print_error(avctx, ret,
412  "Error initializing the MFX video decoder");
413 
414  q->frame_info = param->mfx.FrameInfo;
415 
416  if (!avctx->hw_frames_ctx) {
417  ret = av_image_get_buffer_size(avctx->pix_fmt, FFALIGN(avctx->coded_width, 128), FFALIGN(avctx->coded_height, 64), 1);
418  if (ret < 0)
419  return ret;
421  }
422  return 0;
423 }
424 
426  const AVPacket *avpkt, enum AVPixelFormat pix_fmt,
427  mfxVideoParam *param)
428 {
429  int ret;
430  mfxExtVideoSignalInfo video_signal_info = { 0 };
431  mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
432  mfxBitstream bs = { 0 };
433 
434  if (avpkt->size) {
435  bs.Data = avpkt->data;
436  bs.DataLength = avpkt->size;
437  bs.MaxLength = bs.DataLength;
438  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
439  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
440  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
441  } else
442  return AVERROR_INVALIDDATA;
443 
444 
445  if(!q->session) {
446  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
447  if (ret < 0)
448  return ret;
449  }
450 
452  if (ret < 0)
453  return ret;
454 
455  param->mfx.CodecId = ret;
456  video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
457  video_signal_info.Header.BufferSz = sizeof(video_signal_info);
458  // The SDK doesn't support other ext buffers when calling MFXVideoDECODE_DecodeHeader,
459  // so do not append this buffer to the existent buffer array
460  param->ExtParam = header_ext_params;
461  param->NumExtParam = 1;
462  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
463  if (MFX_ERR_MORE_DATA == ret) {
464  return AVERROR(EAGAIN);
465  }
466  if (ret < 0)
467  return ff_qsv_print_error(avctx, ret,
468  "Error decoding stream header");
469 
470  avctx->color_range = video_signal_info.VideoFullRange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
471 
472  if (video_signal_info.ColourDescriptionPresent) {
473  avctx->color_primaries = video_signal_info.ColourPrimaries;
474  avctx->color_trc = video_signal_info.TransferCharacteristics;
475  avctx->colorspace = video_signal_info.MatrixCoefficients;
476  }
477 
478  param->ExtParam = q->ext_buffers;
479  param->NumExtParam = q->nb_ext_buffers;
480 
481  if (param->mfx.FrameInfo.FrameRateExtN == 0 || param->mfx.FrameInfo.FrameRateExtD == 0) {
482  param->mfx.FrameInfo.FrameRateExtN = 25;
483  param->mfx.FrameInfo.FrameRateExtD = 1;
484  }
485 
486 #if QSV_VERSION_ATLEAST(1, 34)
487  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1)
488  param->mfx.FilmGrain = (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) ? 0 : param->mfx.FilmGrain;
489 #endif
490 
491  return 0;
492 }
493 
495 {
496  int ret;
497 
498  if (q->pool)
499  ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
500  else
501  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
502 
503  if (ret < 0)
504  return ret;
505 
506  if (frame->frame->format == AV_PIX_FMT_QSV) {
507  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
508  } else {
509  ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
510  if (ret < 0) {
511  av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
512  return ret;
513  }
514  }
515 
516  frame->surface.Info = q->frame_info;
517 
518  if (q->frames_ctx.mids) {
520  if (ret < 0)
521  return ret;
522 
523  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
524  }
525 
526  frame->surface.Data.ExtParam = frame->ext_param;
527  frame->surface.Data.NumExtParam = 0;
528  frame->num_ext_params = 0;
529  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
530  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
531  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->dec_info);
532 #if QSV_VERSION_ATLEAST(1, 34)
533  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1) {
534  frame->av1_film_grain_param.Header.BufferId = MFX_EXTBUFF_AV1_FILM_GRAIN_PARAM;
535  frame->av1_film_grain_param.Header.BufferSz = sizeof(frame->av1_film_grain_param);
536  frame->av1_film_grain_param.FilmGrainFlags = 0;
537  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->av1_film_grain_param);
538  }
539 #endif
540 
541 #if QSV_VERSION_ATLEAST(1, 35)
542  if ((QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) ||
543  (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 2, 9) && avctx->codec_id == AV_CODEC_ID_AV1)) {
544  frame->mdcv.Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
545  frame->mdcv.Header.BufferSz = sizeof(frame->mdcv);
546  // The data in mdcv is valid when this flag is 1
547  frame->mdcv.InsertPayloadToggle = 0;
548  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->mdcv);
549 
550  frame->clli.Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
551  frame->clli.Header.BufferSz = sizeof(frame->clli);
552  // The data in clli is valid when this flag is 1
553  frame->clli.InsertPayloadToggle = 0;
554  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->clli);
555  }
556 #endif
557 
558  frame->used = 1;
559 
560  return 0;
561 }
562 
564 {
565  QSVFrame *cur = q->work_frames;
566  while (cur) {
567  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
568  cur->used = 0;
569  av_frame_unref(cur->frame);
570  }
571  cur = cur->next;
572  }
573 }
574 
575 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
576 {
577  QSVFrame *frame, **last;
578  int ret;
579 
581 
582  frame = q->work_frames;
583  last = &q->work_frames;
584  while (frame) {
585  if (!frame->used) {
586  ret = alloc_frame(avctx, q, frame);
587  if (ret < 0)
588  return ret;
589  *surf = &frame->surface;
590  return 0;
591  }
592 
593  last = &frame->next;
594  frame = frame->next;
595  }
596 
597  frame = av_mallocz(sizeof(*frame));
598  if (!frame)
599  return AVERROR(ENOMEM);
600  frame->frame = av_frame_alloc();
601  if (!frame->frame) {
602  av_freep(&frame);
603  return AVERROR(ENOMEM);
604  }
605  *last = frame;
606 
607  ret = alloc_frame(avctx, q, frame);
608  if (ret < 0)
609  return ret;
610 
611  *surf = &frame->surface;
612 
613  return 0;
614 }
615 
616 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
617 {
618  QSVFrame *cur = q->work_frames;
619  while (cur) {
620  if (surf == &cur->surface)
621  return cur;
622  cur = cur->next;
623  }
624  return NULL;
625 }
626 
627 #if QSV_VERSION_ATLEAST(1, 34)
628 static int qsv_export_film_grain(AVCodecContext *avctx, mfxExtAV1FilmGrainParam *ext_param, AVFrame *frame)
629 {
630  AVFilmGrainParams *fgp;
632  int i;
633 
634  if (!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_APPLY))
635  return 0;
636 
638 
639  if (!fgp)
640  return AVERROR(ENOMEM);
641 
643  fgp->seed = ext_param->GrainSeed;
644  aom = &fgp->codec.aom;
645 
646  aom->chroma_scaling_from_luma = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CHROMA_SCALING_FROM_LUMA);
647  aom->scaling_shift = ext_param->GrainScalingMinus8 + 8;
648  aom->ar_coeff_lag = ext_param->ArCoeffLag;
649  aom->ar_coeff_shift = ext_param->ArCoeffShiftMinus6 + 6;
650  aom->grain_scale_shift = ext_param->GrainScaleShift;
651  aom->overlap_flag = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_OVERLAP);
652  aom->limit_output_range = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CLIP_TO_RESTRICTED_RANGE);
653 
654  aom->num_y_points = ext_param->NumYPoints;
655 
656  for (i = 0; i < aom->num_y_points; i++) {
657  aom->y_points[i][0] = ext_param->PointY[i].Value;
658  aom->y_points[i][1] = ext_param->PointY[i].Scaling;
659  }
660 
661  aom->num_uv_points[0] = ext_param->NumCbPoints;
662 
663  for (i = 0; i < aom->num_uv_points[0]; i++) {
664  aom->uv_points[0][i][0] = ext_param->PointCb[i].Value;
665  aom->uv_points[0][i][1] = ext_param->PointCb[i].Scaling;
666  }
667 
668  aom->num_uv_points[1] = ext_param->NumCrPoints;
669 
670  for (i = 0; i < aom->num_uv_points[1]; i++) {
671  aom->uv_points[1][i][0] = ext_param->PointCr[i].Value;
672  aom->uv_points[1][i][1] = ext_param->PointCr[i].Scaling;
673  }
674 
675  for (i = 0; i < 24; i++)
676  aom->ar_coeffs_y[i] = ext_param->ArCoeffsYPlus128[i] - 128;
677 
678  for (i = 0; i < 25; i++) {
679  aom->ar_coeffs_uv[0][i] = ext_param->ArCoeffsCbPlus128[i] - 128;
680  aom->ar_coeffs_uv[1][i] = ext_param->ArCoeffsCrPlus128[i] - 128;
681  }
682 
683  aom->uv_mult[0] = ext_param->CbMult;
684  aom->uv_mult[1] = ext_param->CrMult;
685  aom->uv_mult_luma[0] = ext_param->CbLumaMult;
686  aom->uv_mult_luma[1] = ext_param->CrLumaMult;
687  aom->uv_offset[0] = ext_param->CbOffset;
688  aom->uv_offset[1] = ext_param->CrOffset;
689 
690  return 0;
691 }
692 #endif
693 
694 #if QSV_VERSION_ATLEAST(1, 35)
695 static int qsv_export_hdr_side_data(AVCodecContext *avctx, mfxExtMasteringDisplayColourVolume *mdcv,
696  mfxExtContentLightLevelInfo *clli, AVFrame *frame)
697 {
698  int ret;
699 
700  // The SDK reuses this flag for HDR SEI parsing
701  if (mdcv->InsertPayloadToggle) {
702  AVMasteringDisplayMetadata *mastering;
703  const int mapping[3] = {2, 0, 1};
704  const int chroma_den = 50000;
705  const int luma_den = 10000;
706  int i;
707 
708  ret = ff_decode_mastering_display_new(avctx, frame, &mastering);
709  if (ret < 0)
710  return ret;
711 
712  if (mastering) {
713  for (i = 0; i < 3; i++) {
714  const int j = mapping[i];
715  mastering->display_primaries[i][0] = av_make_q(mdcv->DisplayPrimariesX[j], chroma_den);
716  mastering->display_primaries[i][1] = av_make_q(mdcv->DisplayPrimariesY[j], chroma_den);
717  }
718 
719  mastering->white_point[0] = av_make_q(mdcv->WhitePointX, chroma_den);
720  mastering->white_point[1] = av_make_q(mdcv->WhitePointY, chroma_den);
721 
722  mastering->max_luminance = av_make_q(mdcv->MaxDisplayMasteringLuminance, luma_den);
723  mastering->min_luminance = av_make_q(mdcv->MinDisplayMasteringLuminance, luma_den);
724 
725  mastering->has_luminance = 1;
726  mastering->has_primaries = 1;
727  }
728  }
729 
730  // The SDK reuses this flag for HDR SEI parsing
731  if (clli->InsertPayloadToggle) {
732  AVContentLightMetadata *light;
733 
734  ret = ff_decode_content_light_new(avctx, frame, &light);
735  if (ret < 0)
736  return ret;
737 
738  if (light) {
739  light->MaxCLL = clli->MaxContentLightLevel;
740  light->MaxFALL = clli->MaxPicAverageLightLevel;
741  }
742  }
743 
744  return 0;
745 }
746 
747 static int qsv_export_hdr_side_data_av1(AVCodecContext *avctx, mfxExtMasteringDisplayColourVolume *mdcv,
748  mfxExtContentLightLevelInfo *clli, AVFrame *frame)
749 {
750  if (mdcv->InsertPayloadToggle) {
752  const int chroma_den = 1 << 16;
753  const int max_luma_den = 1 << 8;
754  const int min_luma_den = 1 << 14;
755 
756  if (!mastering)
757  return AVERROR(ENOMEM);
758 
759  for (int i = 0; i < 3; i++) {
760  mastering->display_primaries[i][0] = av_make_q(mdcv->DisplayPrimariesX[i], chroma_den);
761  mastering->display_primaries[i][1] = av_make_q(mdcv->DisplayPrimariesY[i], chroma_den);
762  }
763 
764  mastering->white_point[0] = av_make_q(mdcv->WhitePointX, chroma_den);
765  mastering->white_point[1] = av_make_q(mdcv->WhitePointY, chroma_den);
766 
767  mastering->max_luminance = av_make_q(mdcv->MaxDisplayMasteringLuminance, max_luma_den);
768  mastering->min_luminance = av_make_q(mdcv->MinDisplayMasteringLuminance, min_luma_den);
769 
770  mastering->has_luminance = 1;
771  mastering->has_primaries = 1;
772  }
773 
774  if (clli->InsertPayloadToggle) {
776  if (!light)
777  return AVERROR(ENOMEM);
778 
779  light->MaxCLL = clli->MaxContentLightLevel;
780  light->MaxFALL = clli->MaxPicAverageLightLevel;
781  }
782 
783  return 0;
784 }
785 
786 #endif
787 
788 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
789  AVFrame *frame, int *got_frame,
790  const AVPacket *avpkt)
791 {
792  mfxFrameSurface1 *insurf;
793  mfxFrameSurface1 *outsurf;
794  mfxSyncPoint *sync;
795  mfxBitstream bs = { { { 0 } } };
796  int ret;
797 
798  if (avpkt->size) {
799  bs.Data = avpkt->data;
800  bs.DataLength = avpkt->size;
801  bs.MaxLength = bs.DataLength;
802  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
803  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
804  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
805  }
806 
807  sync = av_mallocz(sizeof(*sync));
808  if (!sync) {
809  av_freep(&sync);
810  return AVERROR(ENOMEM);
811  }
812 
813  do {
814  ret = get_surface(avctx, q, &insurf);
815  if (ret < 0) {
816  av_freep(&sync);
817  return ret;
818  }
819 
820  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
821  insurf, &outsurf, sync);
822  if (ret == MFX_WRN_DEVICE_BUSY)
823  av_usleep(500);
824 
825  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
826 
827  if (ret == MFX_ERR_INCOMPATIBLE_VIDEO_PARAM) {
828  q->reinit_flag = 1;
829  av_log(avctx, AV_LOG_DEBUG, "Video parameter change\n");
830  av_freep(&sync);
831  return 0;
832  }
833 
834  if (ret != MFX_ERR_NONE &&
835  ret != MFX_ERR_MORE_DATA &&
836  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
837  ret != MFX_ERR_MORE_SURFACE) {
838  av_freep(&sync);
839  return ff_qsv_print_error(avctx, ret,
840  "Error during QSV decoding.");
841  }
842 
843  /* make sure we do not enter an infinite loop if the SDK
844  * did not consume any data and did not return anything */
845  if (!*sync && !bs.DataOffset) {
846  bs.DataOffset = avpkt->size;
847  ++q->zero_consume_run;
848  if (q->zero_consume_run > 1 &&
849  (avpkt->size ||
850  ret != MFX_ERR_MORE_DATA))
851  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
852  } else {
853  q->zero_consume_run = 0;
854  }
855 
856  if (*sync) {
857  QSVAsyncFrame aframe;
858  QSVFrame *out_frame = find_frame(q, outsurf);
859 
860  if (!out_frame) {
861  av_log(avctx, AV_LOG_ERROR,
862  "The returned surface does not correspond to any frame\n");
863  av_freep(&sync);
864  return AVERROR_BUG;
865  }
866 
867  out_frame->queued += 1;
868 
869  aframe = (QSVAsyncFrame){ sync, out_frame };
870  av_fifo_write(q->async_fifo, &aframe, 1);
871  } else {
872  av_freep(&sync);
873  }
874 
875  if ((av_fifo_can_read(q->async_fifo) >= q->async_depth) ||
876  (!avpkt->size && av_fifo_can_read(q->async_fifo))) {
877  QSVAsyncFrame aframe;
878  AVFrame *src_frame;
879 
880  av_fifo_read(q->async_fifo, &aframe, 1);
881  aframe.frame->queued -= 1;
882 
883  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
884  do {
885  ret = MFXVideoCORE_SyncOperation(q->session, *aframe.sync, 1000);
886  } while (ret == MFX_WRN_IN_EXECUTION);
887  }
888 
889  av_freep(&aframe.sync);
890 
891  src_frame = aframe.frame->frame;
892 
893  ret = av_frame_ref(frame, src_frame);
894  if (ret < 0)
895  return ret;
896 
897  outsurf = &aframe.frame->surface;
898 
899  frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
900 #if QSV_VERSION_ATLEAST(1, 34)
902  QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) &&
903  avctx->codec_id == AV_CODEC_ID_AV1) {
904  ret = qsv_export_film_grain(avctx, &aframe.frame->av1_film_grain_param, frame);
905 
906  if (ret < 0)
907  return ret;
908  }
909 #endif
910 
911 #if QSV_VERSION_ATLEAST(1, 35)
912  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) {
913  ret = qsv_export_hdr_side_data(avctx, &aframe.frame->mdcv, &aframe.frame->clli, frame);
914 
915  if (ret < 0)
916  return ret;
917  }
918 
919  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 2, 9) && avctx->codec_id == AV_CODEC_ID_AV1) {
920  ret = qsv_export_hdr_side_data_av1(avctx, &aframe.frame->mdcv, &aframe.frame->clli, frame);
921  if (ret < 0)
922  return ret;
923  }
924 #endif
925 
926  frame->repeat_pict =
927  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
928  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
929  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
931  !!(outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF);
932  frame->flags |= AV_FRAME_FLAG_INTERLACED *
933  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
934  frame->pict_type = ff_qsv_map_pictype(aframe.frame->dec_info.FrameType);
935 
936  if (avctx->codec_id == AV_CODEC_ID_H264 ||
937  avctx->codec_id == AV_CODEC_ID_HEVC ||
938  avctx->codec_id == AV_CODEC_ID_VVC) {
939  if (aframe.frame->dec_info.FrameType & MFX_FRAMETYPE_IDR)
940  frame->flags |= AV_FRAME_FLAG_KEY;
941  else
942  frame->flags &= ~AV_FRAME_FLAG_KEY;
943  } else {
944  if (aframe.frame->dec_info.FrameType & MFX_FRAMETYPE_I)
945  frame->flags |= AV_FRAME_FLAG_KEY;
946  else
947  frame->flags &= ~AV_FRAME_FLAG_KEY;
948  }
949  frame->crop_left = outsurf->Info.CropX;
950  frame->crop_top = outsurf->Info.CropY;
951  frame->crop_right = outsurf->Info.Width - (outsurf->Info.CropX + outsurf->Info.CropW);
952  frame->crop_bottom = outsurf->Info.Height - (outsurf->Info.CropY + outsurf->Info.CropH);
953 
954  /* update the surface properties */
955  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
956  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
957 
958  *got_frame = 1;
959  }
960 
961  return bs.DataOffset;
962 }
963 
965 {
966  QSVFrame *cur = q->work_frames;
967 
968  if (q->session)
969  MFXVideoDECODE_Close(q->session);
970 
971  if (q->async_fifo) {
972  QSVAsyncFrame aframe;
973  while (av_fifo_read(q->async_fifo, &aframe, 1) >= 0)
974  av_freep(&aframe.sync);
976  }
977 
978  while (cur) {
979  q->work_frames = cur->next;
980  av_frame_free(&cur->frame);
981  av_freep(&cur);
982  cur = q->work_frames;
983  }
984 
986 
990 }
991 
993  AVFrame *frame, int *got_frame, const AVPacket *pkt)
994 {
995  int ret;
996  mfxVideoParam param = { 0 };
998 
999  if (!pkt->size)
1000  return qsv_decode(avctx, q, frame, got_frame, pkt);
1001 
1002  /* TODO: flush delayed frames on reinit */
1003 
1004  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
1005  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
1006  // the assumption may be not correct but will be updated after header decoded if not true.
1007  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
1008  pix_fmt = q->orig_pix_fmt;
1009  if (!avctx->coded_width)
1010  avctx->coded_width = 1280;
1011  if (!avctx->coded_height)
1012  avctx->coded_height = 720;
1013 
1014  /* decode zero-size pkt to flush the buffered pkt before reinit */
1015  if (q->reinit_flag) {
1016  AVPacket zero_pkt = {0};
1017  ret = qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
1018  if (ret < 0 || *got_frame)
1019  return ret;
1020  }
1021 
1022  if (q->reinit_flag || !q->session || !q->initialized) {
1023  mfxFrameAllocRequest request;
1024  memset(&request, 0, sizeof(request));
1025 
1026  q->reinit_flag = 0;
1027  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
1028  if (ret < 0) {
1029  if (ret == AVERROR(EAGAIN))
1030  av_log(avctx, AV_LOG_VERBOSE, "More data is required to decode header\n");
1031  else
1032  av_log(avctx, AV_LOG_ERROR, "Error decoding header\n");
1033  goto reinit_fail;
1034  }
1035  param.IOPattern = q->iopattern;
1036 
1037  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
1038 
1039  avctx->coded_width = param.mfx.FrameInfo.Width;
1040  avctx->coded_height = param.mfx.FrameInfo.Height;
1041 
1042  ret = MFXVideoDECODE_QueryIOSurf(q->session, &param, &request);
1043  if (ret < 0)
1044  return ff_qsv_print_error(avctx, ret, "Error querying IO surface");
1045 
1046  q->suggest_pool_size = request.NumFrameSuggested;
1047 
1048  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
1049  if (ret < 0)
1050  goto reinit_fail;
1051  q->initialized = 0;
1052  }
1053 
1054  if (!q->initialized) {
1055  ret = qsv_decode_init_context(avctx, q, &param);
1056  if (ret < 0)
1057  goto reinit_fail;
1058  q->initialized = 1;
1059  }
1060 
1061  return qsv_decode(avctx, q, frame, got_frame, pkt);
1062 
1063 reinit_fail:
1064  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
1065  return ret;
1066 }
1067 
1072 };
1073 
1074 typedef struct QSVDecContext {
1075  AVClass *class;
1077 
1079 
1081 
1083 } QSVDecContext;
1084 
1086 {
1087  AVPacket pkt;
1088  while (av_fifo_read(s->packet_fifo, &pkt, 1) >= 0)
1089  av_packet_unref(&pkt);
1090 
1091  av_packet_unref(&s->buffer_pkt);
1092 }
1093 
1095 {
1096  QSVDecContext *s = avctx->priv_data;
1097 
1099 
1101 
1102  av_fifo_freep2(&s->packet_fifo);
1103 
1104  return 0;
1105 }
1106 
1108 {
1109  QSVDecContext *s = avctx->priv_data;
1110  int ret;
1111  const char *uid = NULL;
1112 
1113  if (avctx->codec_id == AV_CODEC_ID_VP8) {
1114  uid = "f622394d8d87452f878c51f2fc9b4131";
1115  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
1116  uid = "a922394d8d87452f878c51f2fc9b4131";
1117  }
1118  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
1119  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
1120  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
1121 
1122  if (s->qsv.load_plugins[0]) {
1123  av_log(avctx, AV_LOG_WARNING,
1124  "load_plugins is not empty, but load_plugin is not set to 'none'."
1125  "The load_plugin value will be ignored.\n");
1126  } else {
1127  if (s->load_plugin == LOAD_PLUGIN_HEVC_SW)
1128  uid = uid_hevcdec_sw;
1129  else
1130  uid = uid_hevcdec_hw;
1131  }
1132  }
1133  if (uid) {
1134  av_freep(&s->qsv.load_plugins);
1135  s->qsv.load_plugins = av_strdup(uid);
1136  if (!s->qsv.load_plugins)
1137  return AVERROR(ENOMEM);
1138  }
1139 
1140  s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12;
1141  s->packet_fifo = av_fifo_alloc2(1, sizeof(AVPacket),
1143  if (!s->packet_fifo) {
1144  ret = AVERROR(ENOMEM);
1145  goto fail;
1146  }
1147 
1148  if (!avctx->pkt_timebase.num)
1149  av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
1150 
1151  return 0;
1152 fail:
1153  qsv_decode_close(avctx);
1154  return ret;
1155 }
1156 
1158  int *got_frame, AVPacket *avpkt)
1159 {
1160  QSVDecContext *s = avctx->priv_data;
1161  int ret;
1162 
1163  /* buffer the input packet */
1164  if (avpkt->size) {
1165  AVPacket input_ref;
1166 
1167  ret = av_packet_ref(&input_ref, avpkt);
1168  if (ret < 0)
1169  return ret;
1170  av_fifo_write(s->packet_fifo, &input_ref, 1);
1171  }
1172 
1173  /* process buffered data */
1174  while (!*got_frame) {
1175  /* prepare the input data */
1176  if (s->buffer_pkt.size <= 0) {
1177  /* no more data */
1178  if (!av_fifo_can_read(s->packet_fifo))
1179  return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
1180  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
1181  if (!s->qsv.reinit_flag) {
1182  av_packet_unref(&s->buffer_pkt);
1183  av_fifo_read(s->packet_fifo, &s->buffer_pkt, 1);
1184  }
1185  }
1186 
1187  ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
1188  if (ret < 0){
1189  if (ret == AVERROR(EAGAIN))
1190  ret = 0;
1191 
1192  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
1193  the decoder will keep decoding the failure packet. */
1194  av_packet_unref(&s->buffer_pkt);
1195  return ret;
1196  }
1197  if (s->qsv.reinit_flag)
1198  continue;
1199 
1200  s->buffer_pkt.size -= ret;
1201  s->buffer_pkt.data += ret;
1202  }
1203 
1204  return avpkt->size;
1205 }
1206 
1208 {
1209  QSVDecContext *s = avctx->priv_data;
1210 
1212 
1213  s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE;
1214  s->qsv.initialized = 0;
1215 }
1216 
1217 #define OFFSET(x) offsetof(QSVDecContext, x)
1218 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1219 
1220 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
1221 static const AVClass x##_qsv_class = { \
1222  .class_name = #x "_qsv", \
1223  .item_name = av_default_item_name, \
1224  .option = opt, \
1225  .version = LIBAVUTIL_VERSION_INT, \
1226 }; \
1227 const FFCodec ff_##x##_qsv_decoder = { \
1228  .p.name = #x "_qsv", \
1229  CODEC_LONG_NAME(#X " video (Intel Quick Sync Video acceleration)"), \
1230  .priv_data_size = sizeof(QSVDecContext), \
1231  .p.type = AVMEDIA_TYPE_VIDEO, \
1232  .p.id = AV_CODEC_ID_##X, \
1233  .init = qsv_decode_init, \
1234  FF_CODEC_DECODE_CB(qsv_decode_frame), \
1235  .flush = qsv_decode_flush, \
1236  .close = qsv_decode_close, \
1237  .bsfs = bsf_name, \
1238  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
1239  .p.priv_class = &x##_qsv_class, \
1240  .hw_configs = qsv_hw_configs, \
1241  .p.wrapper_name = "qsv", \
1242  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING, \
1243 }; \
1244 
1245 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
1246 
1247 #if CONFIG_HEVC_QSV_DECODER
1248 static const AVOption hevc_options[] = {
1249  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1250 
1251  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, .unit = "load_plugin" },
1252  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, .unit = "load_plugin" },
1253  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, .unit = "load_plugin" },
1254  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, .unit = "load_plugin" },
1255 
1256  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
1257  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
1258 
1259  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, .unit = "gpu_copy"},
1260  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, .unit = "gpu_copy"},
1261  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, .unit = "gpu_copy"},
1262  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, .unit = "gpu_copy"},
1263  { NULL },
1264 };
1265 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
1266 #endif
1267 
1268 static const AVOption options[] = {
1269  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1270 
1271  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, .unit = "gpu_copy"},
1272  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, .unit = "gpu_copy"},
1273  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, .unit = "gpu_copy"},
1274  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, .unit = "gpu_copy"},
1275  { NULL },
1276 };
1277 
1278 #if CONFIG_H264_QSV_DECODER
1279 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
1280 #endif
1281 
1282 #if CONFIG_MPEG2_QSV_DECODER
1283 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
1284 #endif
1285 
1286 #if CONFIG_VC1_QSV_DECODER
1287 DEFINE_QSV_DECODER(vc1, VC1, NULL)
1288 #endif
1289 
1290 #if CONFIG_MJPEG_QSV_DECODER
1291 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
1292 #endif
1293 
1294 #if CONFIG_VP8_QSV_DECODER
1295 DEFINE_QSV_DECODER(vp8, VP8, NULL)
1296 #endif
1297 
1298 #if CONFIG_VP9_QSV_DECODER
1299 DEFINE_QSV_DECODER(vp9, VP9, NULL)
1300 #endif
1301 
1302 #if CONFIG_AV1_QSV_DECODER
1303 DEFINE_QSV_DECODER(av1, AV1, NULL)
1304 #endif
1305 
1306 #if CONFIG_VVC_QSV_DECODER
1307 DEFINE_QSV_DECODER(vvc, VVC, "vvc_mp4toannexb")
1308 #endif
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:75
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1437
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:283
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvdec.c:1071
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uid
UID uid
Definition: mxfenc.c:2486
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1208
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:115
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:213
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:200
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvdec.c:1069
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:63
OFFSET
#define OFFSET(x)
Definition: qsvdec.c:1217
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:111
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:247
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:337
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:1159
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:379
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:660
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:263
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:588
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:690
AVOption
AVOption.
Definition: opt.h:429
qsv_decode_close_qsvcontext
static void qsv_decode_close_qsvcontext(QSVContext *q)
Definition: qsvdec.c:964
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:348
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:609
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.c:94
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:220
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvdec.c:1070
qsv_decode_init_context
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:396
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:80
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:100
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:213
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:1032
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:107
ff_qsv_map_frame_to_surface
int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
Definition: qsv.c:287
fifo.h
qsv_decode_flush
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:1207
QSVContext::suggest_pool_size
int suggest_pool_size
Definition: qsvdec.c:104
fail
#define fail()
Definition: checkasm.h:214
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
Definition: qsvdec.c:788
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:99
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:311
QSVDecContext::qsv
QSVContext qsv
Definition: qsvdec.c:1076
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:122
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:49
AVRational::num
int num
Numerator.
Definition: rational.h:59
QSVDecContext::packet_fifo
AVFifo * packet_fifo
Definition: qsvdec.c:1080
QSVContext::async_fifo
AVFifo * async_fifo
Definition: qsvdec.c:96
refstruct.h
QSVContext
Definition: qsvdec.c:79
qsv_internal.h
AVFilmGrainAOMParams
This structure describes how to handle film grain synthesis for AOM codecs.
Definition: film_grain_params.h:44
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:575
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:606
avassert.h
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:653
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:198
ASYNC_DEPTH_DEFAULT
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:50
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:106
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AVHWFramesContext::height
int height
Definition: hwcontext.h:220
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
QSVDecContext
Definition: qsvdec.c:1074
QSVContext::iopattern
int iopattern
Definition: qsvdec.c:109
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:390
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
s
#define s(width, name)
Definition: cbs_vp9.c:198
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2866
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.c:98
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.c:89
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.c:87
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
mfx_tb
static const AVRational mfx_tb
Definition: qsvdec.c:62
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
QSVContext::ver
mfxVersion ver
Definition: qsvdec.c:82
qsv_process_data
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: qsvdec.c:992
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:63
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:33
MFX_PTS_TO_PTS
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
Definition: qsvdec.c:68
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:339
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
ff_decode_mastering_display_new
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
Definition: decode.c:2206
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:1109
QSVFrame
Definition: qsv_internal.h:79
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:106
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:213
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:677
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:284
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
QSV_HAVE_OPAQUE
#define QSV_HAVE_OPAQUE
Definition: qsv_internal.h:68
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:297
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:104
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:328
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
qsv_get_continuous_buffer
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:131
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.c:115
options
Definition: swscale.c:43
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:81
time.h
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:494
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:247
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: packet.c:440
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.c:112
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1636
QSVContext::initialized
int initialized
Definition: qsvdec.c:105
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.c:101
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.c:114
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:62
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.c:102
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1729
AVPacket::size
int size
Definition: packet.h:589
AVFifo
Definition: fifo.c:35
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
AVCodecContext::extra_hw_frames
int extra_hw_frames
Video decoding only.
Definition: avcodec.h:1506
DEFINE_QSV_DECODER_WITH_OPTION
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:1220
codec_internal.h
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:603
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
QSVContext::handle_type
mfxHandleType handle_type
Definition: qsvdec.c:83
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:186
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:550
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
qsv_decode_close
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:1094
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:201
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:563
AVCodecHWConfigInternal
Definition: hwconfig.h:25
frame.h
AV_PIX_FMT_Y212
#define AV_PIX_FMT_Y212
Definition: pixfmt.h:607
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
AV_CODEC_ID_VVC
@ AV_CODEC_ID_VVC
Definition: codec_id.h:252
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:82
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:80
QSVFramesContext::mids
QSVMid * mids
The memory ids for the external frames.
Definition: qsv_internal.h:124
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
QSVAsyncFrame::frame
QSVFrame * frame
Definition: qsvdec.c:76
hwcontext_qsv.h
MFXUnload
#define MFXUnload(a)
Definition: qsvdec.c:59
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:425
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.c:103
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:361
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
qsv_hw_configs
static const AVCodecHWConfigInternal *const qsv_hw_configs[]
Definition: qsvdec.c:118
QSVDecContext::buffer_pkt
AVPacket buffer_pkt
Definition: qsvdec.c:1082
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.c:81
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:228
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:69
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1483
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
QSVDecContext::load_plugin
int load_plugin
Definition: qsvdec.c:1078
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1461
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:616
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
QSVFrame::queued
int queued
Definition: qsv_internal.h:99
QSVContext::async_depth
int async_depth
Definition: qsvdec.c:108
QSVSession
Definition: qsv_internal.h:105
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:153
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:54
LoadPlugin
LoadPlugin
Definition: qsvdec.c:1068
ff_decode_content_light_new
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
Definition: decode.c:2251
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.c:97
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1571
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:74
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.c:100
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:58
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:50
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:112
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1626
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
AVFilmGrainParams::codec
union AVFilmGrainParams::@500 codec
Additional fields may be added both here and in any structure included.
QSVSession::session
mfxSession session
Definition: qsv_internal.h:106
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:207
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:105
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1782
qsv_decode_frame
static int qsv_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:1157
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:602
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:117
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:53
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:190
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
qsv_decode_init
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:1107
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:83
mastering_display_metadata.h
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1648
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:298
qsv_clear_buffers
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:1085
DEFINE_QSV_DECODER
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:1245
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVAsyncFrame::sync
mfxSyncPoint * sync
Definition: qsvdec.c:75
QSVFramesContext
Definition: qsv_internal.h:114
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:116
AVPacket
This structure stores compressed data.
Definition: packet.h:565
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
imgutils.h
PTS_TO_MFX_PTS
#define PTS_TO_MFX_PTS(pts, pts_tb)
Definition: qsvdec.c:64
AV_PIX_FMT_XV36
#define AV_PIX_FMT_XV36
Definition: pixfmt.h:610
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:192
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
VD
#define VD
Definition: qsvdec.c:1218
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.c:110
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:646
QSVAsyncFrame
Definition: qsvdec.c:74
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:56
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:406
QSVSession::loader
void * loader
Definition: qsv_internal.h:111
ff_qsv_frame_add_ext_param
void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame, mfxExtBuffer *param)
Definition: qsv.c:1177
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:30
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:102
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:189
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:205
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:681
MFX_IMPL_VIA_MASK
#define MFX_IMPL_VIA_MASK(impl)
Definition: qsvdec.c:72
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:400
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:93
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:86