FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <sys/types.h>
29 
30 #include <mfxvideo.h>
31 
32 #include "libavutil/common.h"
33 #include "libavutil/fifo.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
37 #include "libavutil/mem.h"
38 #include "libavutil/log.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/pixfmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/imgutils.h"
45 
46 #include "avcodec.h"
47 #include "codec_internal.h"
48 #include "internal.h"
49 #include "decode.h"
50 #include "hwconfig.h"
51 #include "qsv.h"
52 #include "qsv_internal.h"
53 
54 #if QSV_ONEVPL
55 #include <mfxdispatcher.h>
56 #else
57 #define MFXUnload(a) do { } while(0)
58 #endif
59 
60 static const AVRational mfx_tb = { 1, 90000 };
61 
62 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
63  MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
64  av_rescale_q(pts, pts_tb, mfx_tb) : pts)
65 
66 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
67  AV_NOPTS_VALUE : pts_tb.num ? \
68  av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
69 
70 typedef struct QSVAsyncFrame {
71  mfxSyncPoint *sync;
74 
75 typedef struct QSVContext {
76  // the session used for decoding
77  mfxSession session;
78  mfxVersion ver;
79 
80  // the session we allocated internally, in case the caller did not provide
81  // one
83 
85 
86  /**
87  * a linked list of frames currently being used by QSV
88  */
90 
94 
96  uint32_t fourcc;
97  mfxFrameInfo frame_info;
101 
102  // options set by the caller
105  int gpu_copy;
106 
108 
109  mfxExtBuffer **ext_buffers;
111 } QSVContext;
112 
113 static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
114  &(const AVCodecHWConfigInternal) {
115  .public = {
119  .device_type = AV_HWDEVICE_TYPE_QSV,
120  },
121  .hwaccel = NULL,
122  },
123  NULL
124 };
125 
127  AVBufferPool *pool)
128 {
129  int ret = 0;
130 
131  ret = ff_decode_frame_props(avctx, frame);
132  if (ret < 0)
133  return ret;
134 
135  frame->width = avctx->width;
136  frame->height = avctx->height;
137 
138  switch (avctx->pix_fmt) {
139  case AV_PIX_FMT_NV12:
140  frame->linesize[0] = FFALIGN(avctx->width, 128);
141  break;
142  case AV_PIX_FMT_P010:
143  case AV_PIX_FMT_P012:
144  case AV_PIX_FMT_YUYV422:
145  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
146  break;
147  case AV_PIX_FMT_Y210:
148  case AV_PIX_FMT_VUYX:
149  case AV_PIX_FMT_XV30:
150  case AV_PIX_FMT_Y212:
151  frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
152  break;
153  case AV_PIX_FMT_XV36:
154  frame->linesize[0] = 8 * FFALIGN(avctx->width, 128);
155  break;
156  default:
157  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
158  return AVERROR(EINVAL);
159  }
160 
161  frame->buf[0] = av_buffer_pool_get(pool);
162  if (!frame->buf[0])
163  return AVERROR(ENOMEM);
164 
165  frame->data[0] = frame->buf[0]->data;
166  if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
167  avctx->pix_fmt == AV_PIX_FMT_P010 ||
168  avctx->pix_fmt == AV_PIX_FMT_P012) {
169  frame->linesize[1] = frame->linesize[0];
170  frame->data[1] = frame->data[0] +
171  frame->linesize[0] * FFALIGN(avctx->height, 64);
172  }
173 
175  if (ret < 0)
176  return ret;
177 
178  return 0;
179 }
180 
181 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
182  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
183 {
184  int ret;
185 
186  if (q->gpu_copy == MFX_GPUCOPY_ON &&
187  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
188  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
189  "only works in system memory mode.\n");
190  q->gpu_copy = MFX_GPUCOPY_OFF;
191  }
192  if (session) {
193  q->session = session;
194  } else if (hw_frames_ref) {
195  if (q->internal_qs.session) {
196  MFXClose(q->internal_qs.session);
197  q->internal_qs.session = NULL;
198  }
200 
201  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
202  if (!q->frames_ctx.hw_frames_ctx)
203  return AVERROR(ENOMEM);
204 
206  &q->frames_ctx, q->load_plugins,
207 #if QSV_HAVE_OPAQUE
208  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
209 #else
210  0,
211 #endif
212  q->gpu_copy);
213  if (ret < 0) {
215  return ret;
216  }
217 
218  q->session = q->internal_qs.session;
219  } else if (hw_device_ref) {
220  if (q->internal_qs.session) {
221  MFXClose(q->internal_qs.session);
222  q->internal_qs.session = NULL;
223  }
224 
226  hw_device_ref, q->load_plugins, q->gpu_copy);
227  if (ret < 0)
228  return ret;
229 
230  q->session = q->internal_qs.session;
231  } else {
232  if (!q->internal_qs.session) {
234  q->load_plugins, q->gpu_copy);
235  if (ret < 0)
236  return ret;
237  }
238 
239  q->session = q->internal_qs.session;
240  }
241 
242  if (MFXQueryVersion(q->session, &q->ver) != MFX_ERR_NONE) {
243  av_log(avctx, AV_LOG_ERROR, "Error querying the session version. \n");
244  q->session = NULL;
245 
246  if (q->internal_qs.session) {
247  MFXClose(q->internal_qs.session);
248  q->internal_qs.session = NULL;
249  }
250 
251  if (q->internal_qs.loader) {
253  q->internal_qs.loader = NULL;
254  }
255 
256  return AVERROR_EXTERNAL;
257  }
258 
259  /* make sure the decoder is uninitialized */
260  MFXVideoDECODE_Close(q->session);
261 
262  return 0;
263 }
264 
265 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
266 {
267  mfxSession session = NULL;
268  int iopattern = 0;
269  int ret;
270  enum AVPixelFormat pix_fmts[3] = {
271  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
272  pix_fmt, /* system memory format obtained from bitstream parser */
273  AV_PIX_FMT_NONE };
274 
275  ret = ff_get_format(avctx, pix_fmts);
276  if (ret < 0) {
277  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
278  return ret;
279  }
280 
281  if (!q->async_fifo) {
282  q->async_fifo = av_fifo_alloc2(q->async_depth, sizeof(QSVAsyncFrame), 0);
283  if (!q->async_fifo)
284  return AVERROR(ENOMEM);
285  }
286 
287  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
288  AVQSVContext *user_ctx = avctx->hwaccel_context;
289  session = user_ctx->session;
290  iopattern = user_ctx->iopattern;
291  q->ext_buffers = user_ctx->ext_buffers;
292  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
293  }
294 
295  if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_QSV) {
296  AVHWFramesContext *hwframes_ctx;
297  AVQSVFramesContext *frames_hwctx;
298 
300 
301  if (!avctx->hw_frames_ctx) {
302  av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
303  return AVERROR(ENOMEM);
304  }
305 
306  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
307  frames_hwctx = hwframes_ctx->hwctx;
308  hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
309  hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
310  hwframes_ctx->format = AV_PIX_FMT_QSV;
311  hwframes_ctx->sw_format = avctx->sw_pix_fmt;
312  hwframes_ctx->initial_pool_size = q->suggest_pool_size + 16 + avctx->extra_hw_frames;
313  frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
314 
316 
317  if (ret < 0) {
318  av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
320  return ret;
321  }
322  }
323 
324  if (avctx->hw_frames_ctx) {
325  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
326  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
327 
328  if (!iopattern) {
329 #if QSV_HAVE_OPAQUE
330  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
331  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
332  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
333  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
334 #else
335  if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
336  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
337 #endif
338  }
339  }
340 
341  if (!iopattern)
342  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
343  q->iopattern = iopattern;
344 
345  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
346 
347  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
348  if (ret < 0) {
349  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
350  return ret;
351  }
352 
353  param->IOPattern = q->iopattern;
354  param->AsyncDepth = q->async_depth;
355  param->ExtParam = q->ext_buffers;
356  param->NumExtParam = q->nb_ext_buffers;
357 
358  return 0;
359  }
360 
361 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
362 {
363  int ret;
364 
365  avctx->width = param->mfx.FrameInfo.CropW;
366  avctx->height = param->mfx.FrameInfo.CropH;
367  avctx->coded_width = param->mfx.FrameInfo.Width;
368  avctx->coded_height = param->mfx.FrameInfo.Height;
369  avctx->level = param->mfx.CodecLevel;
370  avctx->profile = param->mfx.CodecProfile;
371  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
372  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
373 
374  ret = MFXVideoDECODE_Init(q->session, param);
375  if (ret < 0)
376  return ff_qsv_print_error(avctx, ret,
377  "Error initializing the MFX video decoder");
378 
379  q->frame_info = param->mfx.FrameInfo;
380 
381  if (!avctx->hw_frames_ctx) {
382  ret = av_image_get_buffer_size(avctx->pix_fmt, FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1);
383  if (ret < 0)
384  return ret;
386  }
387  return 0;
388 }
389 
391  const AVPacket *avpkt, enum AVPixelFormat pix_fmt,
392  mfxVideoParam *param)
393 {
394  int ret;
395  mfxExtVideoSignalInfo video_signal_info = { 0 };
396  mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
397  mfxBitstream bs = { 0 };
398 
399  if (avpkt->size) {
400  bs.Data = avpkt->data;
401  bs.DataLength = avpkt->size;
402  bs.MaxLength = bs.DataLength;
403  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
404  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
405  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
406  } else
407  return AVERROR_INVALIDDATA;
408 
409 
410  if(!q->session) {
411  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
412  if (ret < 0)
413  return ret;
414  }
415 
417  if (ret < 0)
418  return ret;
419 
420  param->mfx.CodecId = ret;
421  video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
422  video_signal_info.Header.BufferSz = sizeof(video_signal_info);
423  // The SDK doesn't support other ext buffers when calling MFXVideoDECODE_DecodeHeader,
424  // so do not append this buffer to the existent buffer array
425  param->ExtParam = header_ext_params;
426  param->NumExtParam = 1;
427  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
428  if (MFX_ERR_MORE_DATA == ret) {
429  return AVERROR(EAGAIN);
430  }
431  if (ret < 0)
432  return ff_qsv_print_error(avctx, ret,
433  "Error decoding stream header");
434 
435  avctx->color_range = video_signal_info.VideoFullRange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
436 
437  if (video_signal_info.ColourDescriptionPresent) {
438  avctx->color_primaries = video_signal_info.ColourPrimaries;
439  avctx->color_trc = video_signal_info.TransferCharacteristics;
440  avctx->colorspace = video_signal_info.MatrixCoefficients;
441  }
442 
443  param->ExtParam = q->ext_buffers;
444  param->NumExtParam = q->nb_ext_buffers;
445 
446 #if QSV_VERSION_ATLEAST(1, 34)
447  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1)
448  param->mfx.FilmGrain = (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) ? 0 : param->mfx.FilmGrain;
449 #endif
450 
451  return 0;
452 }
453 
455 {
456  int ret;
457 
458  if (q->pool)
459  ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
460  else
461  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
462 
463  if (ret < 0)
464  return ret;
465 
466  if (frame->frame->format == AV_PIX_FMT_QSV) {
467  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
468  } else {
469  ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
470  if (ret < 0) {
471  av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
472  return ret;
473  }
474  }
475 
476  frame->surface.Info = q->frame_info;
477 
478  if (q->frames_ctx.mids) {
480  if (ret < 0)
481  return ret;
482 
483  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
484  }
485 
486  frame->surface.Data.ExtParam = frame->ext_param;
487  frame->surface.Data.NumExtParam = 0;
488  frame->num_ext_params = 0;
489  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
490  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
491  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->dec_info);
492 #if QSV_VERSION_ATLEAST(1, 34)
493  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1) {
494  frame->av1_film_grain_param.Header.BufferId = MFX_EXTBUFF_AV1_FILM_GRAIN_PARAM;
495  frame->av1_film_grain_param.Header.BufferSz = sizeof(frame->av1_film_grain_param);
496  frame->av1_film_grain_param.FilmGrainFlags = 0;
497  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->av1_film_grain_param);
498  }
499 #endif
500 
501 #if QSV_VERSION_ATLEAST(1, 35)
502  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) {
503  frame->mdcv.Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
504  frame->mdcv.Header.BufferSz = sizeof(frame->mdcv);
505  // The data in mdcv is valid when this flag is 1
506  frame->mdcv.InsertPayloadToggle = 0;
507  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->mdcv);
508 
509  frame->clli.Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
510  frame->clli.Header.BufferSz = sizeof(frame->clli);
511  // The data in clli is valid when this flag is 1
512  frame->clli.InsertPayloadToggle = 0;
513  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->clli);
514  }
515 #endif
516 
517  frame->used = 1;
518 
519  return 0;
520 }
521 
523 {
524  QSVFrame *cur = q->work_frames;
525  while (cur) {
526  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
527  cur->used = 0;
528  av_frame_unref(cur->frame);
529  }
530  cur = cur->next;
531  }
532 }
533 
534 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
535 {
536  QSVFrame *frame, **last;
537  int ret;
538 
540 
541  frame = q->work_frames;
542  last = &q->work_frames;
543  while (frame) {
544  if (!frame->used) {
545  ret = alloc_frame(avctx, q, frame);
546  if (ret < 0)
547  return ret;
548  *surf = &frame->surface;
549  return 0;
550  }
551 
552  last = &frame->next;
553  frame = frame->next;
554  }
555 
556  frame = av_mallocz(sizeof(*frame));
557  if (!frame)
558  return AVERROR(ENOMEM);
559  frame->frame = av_frame_alloc();
560  if (!frame->frame) {
561  av_freep(&frame);
562  return AVERROR(ENOMEM);
563  }
564  *last = frame;
565 
566  ret = alloc_frame(avctx, q, frame);
567  if (ret < 0)
568  return ret;
569 
570  *surf = &frame->surface;
571 
572  return 0;
573 }
574 
575 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
576 {
577  QSVFrame *cur = q->work_frames;
578  while (cur) {
579  if (surf == &cur->surface)
580  return cur;
581  cur = cur->next;
582  }
583  return NULL;
584 }
585 
586 #if QSV_VERSION_ATLEAST(1, 34)
587 static int qsv_export_film_grain(AVCodecContext *avctx, mfxExtAV1FilmGrainParam *ext_param, AVFrame *frame)
588 {
589  AVFilmGrainParams *fgp;
591  int i;
592 
593  if (!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_APPLY))
594  return 0;
595 
597 
598  if (!fgp)
599  return AVERROR(ENOMEM);
600 
602  fgp->seed = ext_param->GrainSeed;
603  aom = &fgp->codec.aom;
604 
605  aom->chroma_scaling_from_luma = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CHROMA_SCALING_FROM_LUMA);
606  aom->scaling_shift = ext_param->GrainScalingMinus8 + 8;
607  aom->ar_coeff_lag = ext_param->ArCoeffLag;
608  aom->ar_coeff_shift = ext_param->ArCoeffShiftMinus6 + 6;
609  aom->grain_scale_shift = ext_param->GrainScaleShift;
610  aom->overlap_flag = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_OVERLAP);
611  aom->limit_output_range = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CLIP_TO_RESTRICTED_RANGE);
612 
613  aom->num_y_points = ext_param->NumYPoints;
614 
615  for (i = 0; i < aom->num_y_points; i++) {
616  aom->y_points[i][0] = ext_param->PointY[i].Value;
617  aom->y_points[i][1] = ext_param->PointY[i].Scaling;
618  }
619 
620  aom->num_uv_points[0] = ext_param->NumCbPoints;
621 
622  for (i = 0; i < aom->num_uv_points[0]; i++) {
623  aom->uv_points[0][i][0] = ext_param->PointCb[i].Value;
624  aom->uv_points[0][i][1] = ext_param->PointCb[i].Scaling;
625  }
626 
627  aom->num_uv_points[1] = ext_param->NumCrPoints;
628 
629  for (i = 0; i < aom->num_uv_points[1]; i++) {
630  aom->uv_points[1][i][0] = ext_param->PointCr[i].Value;
631  aom->uv_points[1][i][1] = ext_param->PointCr[i].Scaling;
632  }
633 
634  for (i = 0; i < 24; i++)
635  aom->ar_coeffs_y[i] = ext_param->ArCoeffsYPlus128[i] - 128;
636 
637  for (i = 0; i < 25; i++) {
638  aom->ar_coeffs_uv[0][i] = ext_param->ArCoeffsCbPlus128[i] - 128;
639  aom->ar_coeffs_uv[1][i] = ext_param->ArCoeffsCrPlus128[i] - 128;
640  }
641 
642  aom->uv_mult[0] = ext_param->CbMult;
643  aom->uv_mult[1] = ext_param->CrMult;
644  aom->uv_mult_luma[0] = ext_param->CbLumaMult;
645  aom->uv_mult_luma[1] = ext_param->CrLumaMult;
646  aom->uv_offset[0] = ext_param->CbOffset;
647  aom->uv_offset[1] = ext_param->CrOffset;
648 
649  return 0;
650 }
651 #endif
652 
653 #if QSV_VERSION_ATLEAST(1, 35)
654 static int qsv_export_hdr_side_data(AVCodecContext *avctx, mfxExtMasteringDisplayColourVolume *mdcv,
655  mfxExtContentLightLevelInfo *clli, AVFrame *frame)
656 {
657  int ret;
658 
659  // The SDK re-uses this flag for HDR SEI parsing
660  if (mdcv->InsertPayloadToggle) {
661  AVMasteringDisplayMetadata *mastering;
662  const int mapping[3] = {2, 0, 1};
663  const int chroma_den = 50000;
664  const int luma_den = 10000;
665  int i;
666 
667  ret = ff_decode_mastering_display_new(avctx, frame, &mastering);
668  if (ret < 0)
669  return ret;
670 
671  if (mastering) {
672  for (i = 0; i < 3; i++) {
673  const int j = mapping[i];
674  mastering->display_primaries[i][0] = av_make_q(mdcv->DisplayPrimariesX[j], chroma_den);
675  mastering->display_primaries[i][1] = av_make_q(mdcv->DisplayPrimariesY[j], chroma_den);
676  }
677 
678  mastering->white_point[0] = av_make_q(mdcv->WhitePointX, chroma_den);
679  mastering->white_point[1] = av_make_q(mdcv->WhitePointY, chroma_den);
680 
681  mastering->max_luminance = av_make_q(mdcv->MaxDisplayMasteringLuminance, luma_den);
682  mastering->min_luminance = av_make_q(mdcv->MinDisplayMasteringLuminance, luma_den);
683 
684  mastering->has_luminance = 1;
685  mastering->has_primaries = 1;
686  }
687  }
688 
689  // The SDK re-uses this flag for HDR SEI parsing
690  if (clli->InsertPayloadToggle) {
691  AVContentLightMetadata *light;
692 
693  ret = ff_decode_content_light_new(avctx, frame, &light);
694  if (ret < 0)
695  return ret;
696 
697  if (light) {
698  light->MaxCLL = clli->MaxContentLightLevel;
699  light->MaxFALL = clli->MaxPicAverageLightLevel;
700  }
701  }
702 
703  return 0;
704 }
705 
706 #endif
707 
708 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
709  AVFrame *frame, int *got_frame,
710  const AVPacket *avpkt)
711 {
712  mfxFrameSurface1 *insurf;
713  mfxFrameSurface1 *outsurf;
714  mfxSyncPoint *sync;
715  mfxBitstream bs = { { { 0 } } };
716  int ret;
717 
718  if (avpkt->size) {
719  bs.Data = avpkt->data;
720  bs.DataLength = avpkt->size;
721  bs.MaxLength = bs.DataLength;
722  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
723  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
724  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
725  }
726 
727  sync = av_mallocz(sizeof(*sync));
728  if (!sync) {
729  av_freep(&sync);
730  return AVERROR(ENOMEM);
731  }
732 
733  do {
734  ret = get_surface(avctx, q, &insurf);
735  if (ret < 0) {
736  av_freep(&sync);
737  return ret;
738  }
739 
740  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
741  insurf, &outsurf, sync);
742  if (ret == MFX_WRN_DEVICE_BUSY)
743  av_usleep(500);
744 
745  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
746 
747  if (ret == MFX_ERR_INCOMPATIBLE_VIDEO_PARAM) {
748  q->reinit_flag = 1;
749  av_log(avctx, AV_LOG_DEBUG, "Video parameter change\n");
750  av_freep(&sync);
751  return 0;
752  }
753 
754  if (ret != MFX_ERR_NONE &&
755  ret != MFX_ERR_MORE_DATA &&
756  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
757  ret != MFX_ERR_MORE_SURFACE) {
758  av_freep(&sync);
759  return ff_qsv_print_error(avctx, ret,
760  "Error during QSV decoding.");
761  }
762 
763  /* make sure we do not enter an infinite loop if the SDK
764  * did not consume any data and did not return anything */
765  if (!*sync && !bs.DataOffset) {
766  bs.DataOffset = avpkt->size;
767  ++q->zero_consume_run;
768  if (q->zero_consume_run > 1 &&
769  (avpkt->size ||
770  ret != MFX_ERR_MORE_DATA))
771  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
772  } else {
773  q->zero_consume_run = 0;
774  }
775 
776  if (*sync) {
777  QSVAsyncFrame aframe;
778  QSVFrame *out_frame = find_frame(q, outsurf);
779 
780  if (!out_frame) {
781  av_log(avctx, AV_LOG_ERROR,
782  "The returned surface does not correspond to any frame\n");
783  av_freep(&sync);
784  return AVERROR_BUG;
785  }
786 
787  out_frame->queued += 1;
788 
789  aframe = (QSVAsyncFrame){ sync, out_frame };
790  av_fifo_write(q->async_fifo, &aframe, 1);
791  } else {
792  av_freep(&sync);
793  }
794 
795  if ((av_fifo_can_read(q->async_fifo) >= q->async_depth) ||
796  (!avpkt->size && av_fifo_can_read(q->async_fifo))) {
797  QSVAsyncFrame aframe;
798  AVFrame *src_frame;
799 
800  av_fifo_read(q->async_fifo, &aframe, 1);
801  aframe.frame->queued -= 1;
802 
803  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
804  do {
805  ret = MFXVideoCORE_SyncOperation(q->session, *aframe.sync, 1000);
806  } while (ret == MFX_WRN_IN_EXECUTION);
807  }
808 
809  av_freep(&aframe.sync);
810 
811  src_frame = aframe.frame->frame;
812 
813  ret = av_frame_ref(frame, src_frame);
814  if (ret < 0)
815  return ret;
816 
817  outsurf = &aframe.frame->surface;
818 
819  frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
820 #if QSV_VERSION_ATLEAST(1, 34)
822  QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) &&
823  avctx->codec_id == AV_CODEC_ID_AV1) {
824  ret = qsv_export_film_grain(avctx, &aframe.frame->av1_film_grain_param, frame);
825 
826  if (ret < 0)
827  return ret;
828  }
829 #endif
830 
831 #if QSV_VERSION_ATLEAST(1, 35)
832  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) {
833  ret = qsv_export_hdr_side_data(avctx, &aframe.frame->mdcv, &aframe.frame->clli, frame);
834 
835  if (ret < 0)
836  return ret;
837  }
838 #endif
839 
840  frame->repeat_pict =
841  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
842  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
843  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
845  !!(outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF);
847  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
848  frame->pict_type = ff_qsv_map_pictype(aframe.frame->dec_info.FrameType);
849  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
850  if (avctx->codec_id == AV_CODEC_ID_H264) {
851  if (aframe.frame->dec_info.FrameType & MFX_FRAMETYPE_IDR)
853  else
855  }
856 
857  /* update the surface properties */
858  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
859  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
860 
861  *got_frame = 1;
862  }
863 
864  return bs.DataOffset;
865 }
866 
868 {
869  QSVFrame *cur = q->work_frames;
870 
871  if (q->session)
872  MFXVideoDECODE_Close(q->session);
873 
874  if (q->async_fifo) {
875  QSVAsyncFrame aframe;
876  while (av_fifo_read(q->async_fifo, &aframe, 1) >= 0)
877  av_freep(&aframe.sync);
879  }
880 
881  while (cur) {
882  q->work_frames = cur->next;
883  av_frame_free(&cur->frame);
884  av_freep(&cur);
885  cur = q->work_frames;
886  }
887 
889 
893 }
894 
896  AVFrame *frame, int *got_frame, const AVPacket *pkt)
897 {
898  int ret;
899  mfxVideoParam param = { 0 };
901 
902  if (!pkt->size)
903  return qsv_decode(avctx, q, frame, got_frame, pkt);
904 
905  /* TODO: flush delayed frames on reinit */
906 
907  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
908  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
909  // the assumption may be not corret but will be updated after header decoded if not true.
910  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
911  pix_fmt = q->orig_pix_fmt;
912  if (!avctx->coded_width)
913  avctx->coded_width = 1280;
914  if (!avctx->coded_height)
915  avctx->coded_height = 720;
916 
917  /* decode zero-size pkt to flush the buffered pkt before reinit */
918  if (q->reinit_flag) {
919  AVPacket zero_pkt = {0};
920  ret = qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
921  if (ret < 0 || *got_frame)
922  return ret;
923  }
924 
925  if (q->reinit_flag || !q->session || !q->initialized) {
926  mfxFrameAllocRequest request;
927  memset(&request, 0, sizeof(request));
928 
929  q->reinit_flag = 0;
930  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
931  if (ret < 0) {
932  if (ret == AVERROR(EAGAIN))
933  av_log(avctx, AV_LOG_VERBOSE, "More data is required to decode header\n");
934  else
935  av_log(avctx, AV_LOG_ERROR, "Error decoding header\n");
936  goto reinit_fail;
937  }
938  param.IOPattern = q->iopattern;
939 
940  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
941 
942  avctx->coded_width = param.mfx.FrameInfo.Width;
943  avctx->coded_height = param.mfx.FrameInfo.Height;
944 
945  ret = MFXVideoDECODE_QueryIOSurf(q->session, &param, &request);
946  if (ret < 0)
947  return ff_qsv_print_error(avctx, ret, "Error querying IO surface");
948 
949  q->suggest_pool_size = request.NumFrameSuggested;
950 
951  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
952  if (ret < 0)
953  goto reinit_fail;
954  q->initialized = 0;
955  }
956 
957  if (!q->initialized) {
958  ret = qsv_decode_init_context(avctx, q, &param);
959  if (ret < 0)
960  goto reinit_fail;
961  q->initialized = 1;
962  }
963 
964  return qsv_decode(avctx, q, frame, got_frame, pkt);
965 
966 reinit_fail:
967  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
968  return ret;
969 }
970 
975 };
976 
977 typedef struct QSVDecContext {
978  AVClass *class;
980 
982 
984 
986 } QSVDecContext;
987 
989 {
990  AVPacket pkt;
991  while (av_fifo_read(s->packet_fifo, &pkt, 1) >= 0)
993 
994  av_packet_unref(&s->buffer_pkt);
995 }
996 
998 {
999  QSVDecContext *s = avctx->priv_data;
1000 
1002 
1004 
1005  av_fifo_freep2(&s->packet_fifo);
1006 
1007  return 0;
1008 }
1009 
1011 {
1012  QSVDecContext *s = avctx->priv_data;
1013  int ret;
1014  const char *uid = NULL;
1015 
1016  if (avctx->codec_id == AV_CODEC_ID_VP8) {
1017  uid = "f622394d8d87452f878c51f2fc9b4131";
1018  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
1019  uid = "a922394d8d87452f878c51f2fc9b4131";
1020  }
1021  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
1022  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
1023  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
1024 
1025  if (s->qsv.load_plugins[0]) {
1026  av_log(avctx, AV_LOG_WARNING,
1027  "load_plugins is not empty, but load_plugin is not set to 'none'."
1028  "The load_plugin value will be ignored.\n");
1029  } else {
1030  if (s->load_plugin == LOAD_PLUGIN_HEVC_SW)
1031  uid = uid_hevcdec_sw;
1032  else
1033  uid = uid_hevcdec_hw;
1034  }
1035  }
1036  if (uid) {
1037  av_freep(&s->qsv.load_plugins);
1038  s->qsv.load_plugins = av_strdup(uid);
1039  if (!s->qsv.load_plugins)
1040  return AVERROR(ENOMEM);
1041  }
1042 
1043  s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12;
1044  s->packet_fifo = av_fifo_alloc2(1, sizeof(AVPacket),
1046  if (!s->packet_fifo) {
1047  ret = AVERROR(ENOMEM);
1048  goto fail;
1049  }
1050 
1051  if (!avctx->pkt_timebase.num)
1052  av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
1053 
1054  return 0;
1055 fail:
1056  qsv_decode_close(avctx);
1057  return ret;
1058 }
1059 
1061  int *got_frame, AVPacket *avpkt)
1062 {
1063  QSVDecContext *s = avctx->priv_data;
1064  int ret;
1065 
1066  /* buffer the input packet */
1067  if (avpkt->size) {
1068  AVPacket input_ref;
1069 
1070  ret = av_packet_ref(&input_ref, avpkt);
1071  if (ret < 0)
1072  return ret;
1073  av_fifo_write(s->packet_fifo, &input_ref, 1);
1074  }
1075 
1076  /* process buffered data */
1077  while (!*got_frame) {
1078  /* prepare the input data */
1079  if (s->buffer_pkt.size <= 0) {
1080  /* no more data */
1081  if (!av_fifo_can_read(s->packet_fifo))
1082  return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
1083  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
1084  if (!s->qsv.reinit_flag) {
1085  av_packet_unref(&s->buffer_pkt);
1086  av_fifo_read(s->packet_fifo, &s->buffer_pkt, 1);
1087  }
1088  }
1089 
1090  ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
1091  if (ret < 0){
1092  if (ret == AVERROR(EAGAIN))
1093  ret = 0;
1094 
1095  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
1096  the decoder will keep decoding the failure packet. */
1097  av_packet_unref(&s->buffer_pkt);
1098  return ret;
1099  }
1100  if (s->qsv.reinit_flag)
1101  continue;
1102 
1103  s->buffer_pkt.size -= ret;
1104  s->buffer_pkt.data += ret;
1105  }
1106 
1107  return avpkt->size;
1108 }
1109 
1111 {
1112  QSVDecContext *s = avctx->priv_data;
1113 
1115 
1116  s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE;
1117  s->qsv.initialized = 0;
1118 }
1119 
1120 #define OFFSET(x) offsetof(QSVDecContext, x)
1121 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1122 
1123 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
1124 static const AVClass x##_qsv_class = { \
1125  .class_name = #x "_qsv", \
1126  .item_name = av_default_item_name, \
1127  .option = opt, \
1128  .version = LIBAVUTIL_VERSION_INT, \
1129 }; \
1130 const FFCodec ff_##x##_qsv_decoder = { \
1131  .p.name = #x "_qsv", \
1132  CODEC_LONG_NAME(#X " video (Intel Quick Sync Video acceleration)"), \
1133  .priv_data_size = sizeof(QSVDecContext), \
1134  .p.type = AVMEDIA_TYPE_VIDEO, \
1135  .p.id = AV_CODEC_ID_##X, \
1136  .init = qsv_decode_init, \
1137  FF_CODEC_DECODE_CB(qsv_decode_frame), \
1138  .flush = qsv_decode_flush, \
1139  .close = qsv_decode_close, \
1140  .bsfs = bsf_name, \
1141  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
1142  .p.priv_class = &x##_qsv_class, \
1143  .hw_configs = qsv_hw_configs, \
1144  .p.wrapper_name = "qsv", \
1145  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
1146 }; \
1147 
1148 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
1149 
1150 #if CONFIG_HEVC_QSV_DECODER
1151 static const AVOption hevc_options[] = {
1152  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1153 
1154  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, .unit = "load_plugin" },
1155  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, .unit = "load_plugin" },
1156  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, .unit = "load_plugin" },
1157  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, .unit = "load_plugin" },
1158 
1159  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
1160  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
1161 
1162  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, .unit = "gpu_copy"},
1163  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, .unit = "gpu_copy"},
1164  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, .unit = "gpu_copy"},
1165  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, .unit = "gpu_copy"},
1166  { NULL },
1167 };
1168 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
1169 #endif
1170 
1171 static const AVOption options[] = {
1172  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1173 
1174  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, .unit = "gpu_copy"},
1175  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, .unit = "gpu_copy"},
1176  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, .unit = "gpu_copy"},
1177  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, .unit = "gpu_copy"},
1178  { NULL },
1179 };
1180 
1181 #if CONFIG_H264_QSV_DECODER
1182 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
1183 #endif
1184 
1185 #if CONFIG_MPEG2_QSV_DECODER
1186 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
1187 #endif
1188 
1189 #if CONFIG_VC1_QSV_DECODER
1190 DEFINE_QSV_DECODER(vc1, VC1, NULL)
1191 #endif
1192 
1193 #if CONFIG_MJPEG_QSV_DECODER
1194 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
1195 #endif
1196 
1197 #if CONFIG_VP8_QSV_DECODER
1198 DEFINE_QSV_DECODER(vp8, VP8, NULL)
1199 #endif
1200 
1201 #if CONFIG_VP9_QSV_DECODER
1202 DEFINE_QSV_DECODER(vp9, VP9, NULL)
1203 #endif
1204 
1205 #if CONFIG_AV1_QSV_DECODER
1206 DEFINE_QSV_DECODER(av1, AV1, NULL)
1207 #endif
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:60
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1451
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:283
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvdec.c:974
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uid
UID uid
Definition: mxfenc.c:2421
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1220
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:115
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:200
options
static const AVOption options[]
Definition: qsvdec.c:1171
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:197
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvdec.c:972
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:63
OFFSET
#define OFFSET(x)
Definition: qsvdec.c:1120
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:260
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:322
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:1125
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:375
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:678
AVFrame::width
int width
Definition: frame.h:416
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:522
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:708
AVOption
AVOption.
Definition: opt.h:346
qsv_decode_close_qsvcontext
static void qsv_decode_close_qsvcontext(QSVContext *q)
Definition: qsvdec.c:867
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:344
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:534
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.c:89
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:616
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:217
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvdec.c:973
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:557
qsv_decode_init_context
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:361
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:80
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:100
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:250
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:1001
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:608
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
ff_qsv_map_frame_to_surface
int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
Definition: qsv.c:283
fifo.h
qsv_decode_flush
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:1110
QSVContext::suggest_pool_size
int suggest_pool_size
Definition: qsvdec.c:99
fail
#define fail()
Definition: checkasm.h:179
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
Definition: qsvdec.c:708
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:99
QSVDecContext::qsv
QSVContext qsv
Definition: qsvdec.c:979
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:122
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:49
AVRational::num
int num
Numerator.
Definition: rational.h:59
QSVDecContext::packet_fifo
AVFifo * packet_fifo
Definition: qsvdec.c:983
QSVContext::async_fifo
AVFifo * async_fifo
Definition: qsvdec.c:91
QSVContext
Definition: qsvdec.c:75
qsv_internal.h
AVFilmGrainAOMParams
This structure describes how to handle film grain synthesis for AOM codecs.
Definition: film_grain_params.h:44
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:302
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:118
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:534
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:532
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:671
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:194
ASYNC_DEPTH_DEFAULT
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:50
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:90
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AVHWFramesContext::height
int height
Definition: hwcontext.h:217
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
QSVDecContext
Definition: qsvdec.c:977
QSVContext::iopattern
int iopattern
Definition: qsvdec.c:104
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:390
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
s
#define s(width, name)
Definition: cbs_vp9.c:198
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2938
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.c:93
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.c:84
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.c:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
mfx_tb
static const AVRational mfx_tb
Definition: qsvdec.c:60
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
QSVContext::ver
mfxVersion ver
Definition: qsvdec.c:78
qsv_process_data
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: qsvdec.c:895
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:63
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:32
MFX_PTS_TO_PTS
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
Definition: qsvdec.c:66
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:343
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_decode_mastering_display_new
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
Definition: decode.c:1862
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:1078
QSVFrame
Definition: qsv_internal.h:79
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:106
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:210
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:695
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:280
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
QSV_HAVE_OPAQUE
#define QSV_HAVE_OPAQUE
Definition: qsv_internal.h:68
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:265
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:100
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:328
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
qsv_get_continuous_buffer
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:126
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.c:110
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:81
time.h
QSVFramesContext::mids_buf
AVBufferRef * mids_buf
Definition: qsv_internal.h:122
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:454
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:247
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:435
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.c:107
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1783
QSVContext::initialized
int initialized
Definition: qsvdec.c:100
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.c:96
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.c:109
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:62
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.c:97
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:446
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1569
AVPacket::size
int size
Definition: packet.h:523
AVFifo
Definition: fifo.c:35
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:354
AVCodecContext::extra_hw_frames
int extra_hw_frames
Video decoding only.
Definition: avcodec.h:1520
DEFINE_QSV_DECODER_WITH_OPTION
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:1123
codec_internal.h
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:529
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:181
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:551
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
qsv_decode_close
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:997
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:238
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:522
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:431
AVCodecHWConfigInternal
Definition: hwconfig.h:25
AVFilmGrainParams::codec
union AVFilmGrainParams::@357 codec
Additional fields may be added both here and in any structure included.
frame.h
AV_PIX_FMT_Y212
#define AV_PIX_FMT_Y212
Definition: pixfmt.h:533
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:80
QSVFramesContext::mids
QSVMid * mids
Definition: qsv_internal.h:123
QSVAsyncFrame::frame
QSVFrame * frame
Definition: qsvdec.c:72
hwcontext_qsv.h
MFXUnload
#define MFXUnload(a)
Definition: qsvdec.c:57
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:390
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.c:98
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:357
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
qsv_hw_configs
static const AVCodecHWConfigInternal *const qsv_hw_configs[]
Definition: qsvdec.c:113
QSVDecContext::buffer_pkt
AVPacket buffer_pkt
Definition: qsvdec.c:985
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.c:77
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:576
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:69
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1497
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:603
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:666
QSVDecContext::load_plugin
int load_plugin
Definition: qsvdec.c:981
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1475
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:575
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
QSVFrame::queued
int queued
Definition: qsv_internal.h:99
QSVContext::async_depth
int async_depth
Definition: qsvdec.c:103
QSVSession
Definition: qsv_internal.h:105
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:150
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:54
LoadPlugin
LoadPlugin
Definition: qsvdec.c:971
ff_decode_content_light_new
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
Definition: decode.c:1874
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.c:92
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:315
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1461
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:416
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:74
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.c:95
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:50
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:112
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1639
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
QSVSession::session
mfxSession session
Definition: qsv_internal.h:106
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:203
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:105
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1926
qsv_decode_frame
static int qsv_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:1060
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:528
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:117
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:53
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:187
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
qsv_decode_init
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:1010
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:83
mastering_display_metadata.h
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1545
qsv_clear_buffers
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:988
DEFINE_QSV_DECODER
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:1148
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVAsyncFrame::sync
mfxSyncPoint * sync
Definition: qsvdec.c:71
QSVFramesContext
Definition: qsv_internal.h:114
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVPacket
This structure stores compressed data.
Definition: packet.h:499
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
imgutils.h
PTS_TO_MFX_PTS
#define PTS_TO_MFX_PTS(pts, pts_tb)
Definition: qsvdec.c:62
AV_PIX_FMT_XV36
#define AV_PIX_FMT_XV36
Definition: pixfmt.h:535
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:192
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
VD
#define VD
Definition: qsvdec.c:1121
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.c:105
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:664
QSVAsyncFrame
Definition: qsvdec.c:70
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:56
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:406
QSVSession::loader
void * loader
Definition: qsv_internal.h:111
ff_qsv_frame_add_ext_param
void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame, mfxExtBuffer *param)
Definition: qsv.c:1143
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:30
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:102
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:185
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:242
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:677
AVFrame::repeat_pict
int repeat_pict
Number of fields in this frame which should be repeated, i.e.
Definition: frame.h:512
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:420
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:93
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:86