FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include <stdint.h>
27 #include <string.h>
28 #include <sys/types.h>
29 
30 #include <mfxvideo.h>
31 
32 #include "libavutil/common.h"
33 #include "libavutil/fifo.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
37 #include "libavutil/mem.h"
38 #include "libavutil/log.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/pixfmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/imgutils.h"
45 
46 #include "avcodec.h"
47 #include "codec_internal.h"
48 #include "internal.h"
49 #include "decode.h"
50 #include "hwconfig.h"
51 #include "qsv.h"
52 #include "qsv_internal.h"
53 
54 #if QSV_ONEVPL
55 #include <mfxdispatcher.h>
56 #else
57 #define MFXUnload(a) do { } while(0)
58 #endif
59 
60 static const AVRational mfx_tb = { 1, 90000 };
61 
62 #define PTS_TO_MFX_PTS(pts, pts_tb) ((pts) == AV_NOPTS_VALUE ? \
63  MFX_TIMESTAMP_UNKNOWN : pts_tb.num ? \
64  av_rescale_q(pts, pts_tb, mfx_tb) : pts)
65 
66 #define MFX_PTS_TO_PTS(mfx_pts, pts_tb) ((mfx_pts) == MFX_TIMESTAMP_UNKNOWN ? \
67  AV_NOPTS_VALUE : pts_tb.num ? \
68  av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
69 
70 typedef struct QSVAsyncFrame {
71  mfxSyncPoint *sync;
74 
75 typedef struct QSVContext {
76  // the session used for decoding
77  mfxSession session;
78  mfxVersion ver;
79 
80  // the session we allocated internally, in case the caller did not provide
81  // one
83 
85 
86  /**
87  * a linked list of frames currently being used by QSV
88  */
90 
94 
96  uint32_t fourcc;
97  mfxFrameInfo frame_info;
101 
102  // options set by the caller
105  int gpu_copy;
106 
108 
109  mfxExtBuffer **ext_buffers;
111 } QSVContext;
112 
113 static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
114  &(const AVCodecHWConfigInternal) {
115  .public = {
119  .device_type = AV_HWDEVICE_TYPE_QSV,
120  },
121  .hwaccel = NULL,
122  },
123  NULL
124 };
125 
127  AVBufferPool *pool)
128 {
129  int ret = 0;
130 
132 
133  frame->width = avctx->width;
134  frame->height = avctx->height;
135 
136  switch (avctx->pix_fmt) {
137  case AV_PIX_FMT_NV12:
138  frame->linesize[0] = FFALIGN(avctx->width, 128);
139  break;
140  case AV_PIX_FMT_P010:
141  case AV_PIX_FMT_P012:
142  case AV_PIX_FMT_YUYV422:
143  frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
144  break;
145  case AV_PIX_FMT_Y210:
146  case AV_PIX_FMT_VUYX:
147  case AV_PIX_FMT_XV30:
148  case AV_PIX_FMT_Y212:
149  frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
150  break;
151  case AV_PIX_FMT_XV36:
152  frame->linesize[0] = 8 * FFALIGN(avctx->width, 128);
153  break;
154  default:
155  av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
156  return AVERROR(EINVAL);
157  }
158 
159  frame->buf[0] = av_buffer_pool_get(pool);
160  if (!frame->buf[0])
161  return AVERROR(ENOMEM);
162 
163  frame->data[0] = frame->buf[0]->data;
164  if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
165  avctx->pix_fmt == AV_PIX_FMT_P010 ||
166  avctx->pix_fmt == AV_PIX_FMT_P012) {
167  frame->linesize[1] = frame->linesize[0];
168  frame->data[1] = frame->data[0] +
169  frame->linesize[0] * FFALIGN(avctx->height, 64);
170  }
171 
173  if (ret < 0)
174  return ret;
175 
176  return 0;
177 }
178 
179 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
180  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
181 {
182  int ret;
183 
184  if (q->gpu_copy == MFX_GPUCOPY_ON &&
185  !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
186  av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
187  "only works in system memory mode.\n");
188  q->gpu_copy = MFX_GPUCOPY_OFF;
189  }
190  if (session) {
191  q->session = session;
192  } else if (hw_frames_ref) {
193  if (q->internal_qs.session) {
194  MFXClose(q->internal_qs.session);
195  q->internal_qs.session = NULL;
196  }
198 
199  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
200  if (!q->frames_ctx.hw_frames_ctx)
201  return AVERROR(ENOMEM);
202 
204  &q->frames_ctx, q->load_plugins,
205 #if QSV_HAVE_OPAQUE
206  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY,
207 #else
208  0,
209 #endif
210  q->gpu_copy);
211  if (ret < 0) {
213  return ret;
214  }
215 
216  q->session = q->internal_qs.session;
217  } else if (hw_device_ref) {
218  if (q->internal_qs.session) {
219  MFXClose(q->internal_qs.session);
220  q->internal_qs.session = NULL;
221  }
222 
224  hw_device_ref, q->load_plugins, q->gpu_copy);
225  if (ret < 0)
226  return ret;
227 
228  q->session = q->internal_qs.session;
229  } else {
230  if (!q->internal_qs.session) {
232  q->load_plugins, q->gpu_copy);
233  if (ret < 0)
234  return ret;
235  }
236 
237  q->session = q->internal_qs.session;
238  }
239 
240  if (MFXQueryVersion(q->session, &q->ver) != MFX_ERR_NONE) {
241  av_log(avctx, AV_LOG_ERROR, "Error querying the session version. \n");
242  q->session = NULL;
243 
244  if (q->internal_qs.session) {
245  MFXClose(q->internal_qs.session);
246  q->internal_qs.session = NULL;
247  }
248 
249  if (q->internal_qs.loader) {
251  q->internal_qs.loader = NULL;
252  }
253 
254  return AVERROR_EXTERNAL;
255  }
256 
257  /* make sure the decoder is uninitialized */
258  MFXVideoDECODE_Close(q->session);
259 
260  return 0;
261 }
262 
263 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
264 {
265  mfxSession session = NULL;
266  int iopattern = 0;
267  int ret;
268  enum AVPixelFormat pix_fmts[3] = {
269  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
270  pix_fmt, /* system memory format obtained from bitstream parser */
271  AV_PIX_FMT_NONE };
272 
273  ret = ff_get_format(avctx, pix_fmts);
274  if (ret < 0) {
275  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
276  return ret;
277  }
278 
279  if (!q->async_fifo) {
280  q->async_fifo = av_fifo_alloc2(q->async_depth, sizeof(QSVAsyncFrame), 0);
281  if (!q->async_fifo)
282  return AVERROR(ENOMEM);
283  }
284 
285  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
286  AVQSVContext *user_ctx = avctx->hwaccel_context;
287  session = user_ctx->session;
288  iopattern = user_ctx->iopattern;
289  q->ext_buffers = user_ctx->ext_buffers;
290  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
291  }
292 
293  if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_QSV) {
294  AVHWFramesContext *hwframes_ctx;
295  AVQSVFramesContext *frames_hwctx;
296 
298 
299  if (!avctx->hw_frames_ctx) {
300  av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
301  return AVERROR(ENOMEM);
302  }
303 
304  hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
305  frames_hwctx = hwframes_ctx->hwctx;
306  hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
307  hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
308  hwframes_ctx->format = AV_PIX_FMT_QSV;
309  hwframes_ctx->sw_format = avctx->sw_pix_fmt;
310  hwframes_ctx->initial_pool_size = q->suggest_pool_size + 16 + avctx->extra_hw_frames;
311  frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
312 
314 
315  if (ret < 0) {
316  av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
318  return ret;
319  }
320  }
321 
322  if (avctx->hw_frames_ctx) {
323  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
324  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
325 
326  if (!iopattern) {
327 #if QSV_HAVE_OPAQUE
328  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
329  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
330  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
331  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
332 #else
333  if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
334  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
335 #endif
336  }
337  }
338 
339  if (!iopattern)
340  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
341  q->iopattern = iopattern;
342 
343  ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder");
344 
345  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
346  if (ret < 0) {
347  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
348  return ret;
349  }
350 
351  param->IOPattern = q->iopattern;
352  param->AsyncDepth = q->async_depth;
353  param->ExtParam = q->ext_buffers;
354  param->NumExtParam = q->nb_ext_buffers;
355 
356  return 0;
357  }
358 
359 static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
360 {
361  int ret;
362 
363  avctx->width = param->mfx.FrameInfo.CropW;
364  avctx->height = param->mfx.FrameInfo.CropH;
365  avctx->coded_width = param->mfx.FrameInfo.Width;
366  avctx->coded_height = param->mfx.FrameInfo.Height;
367  avctx->level = param->mfx.CodecLevel;
368  avctx->profile = param->mfx.CodecProfile;
369  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
370  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
371 
372  ret = MFXVideoDECODE_Init(q->session, param);
373  if (ret < 0)
374  return ff_qsv_print_error(avctx, ret,
375  "Error initializing the MFX video decoder");
376 
377  q->frame_info = param->mfx.FrameInfo;
378 
379  if (!avctx->hw_frames_ctx)
381  FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz);
382  return 0;
383 }
384 
386  const AVPacket *avpkt, enum AVPixelFormat pix_fmt,
387  mfxVideoParam *param)
388 {
389  int ret;
390  mfxExtVideoSignalInfo video_signal_info = { 0 };
391  mfxExtBuffer *header_ext_params[1] = { (mfxExtBuffer *)&video_signal_info };
392  mfxBitstream bs = { 0 };
393 
394  if (avpkt->size) {
395  bs.Data = avpkt->data;
396  bs.DataLength = avpkt->size;
397  bs.MaxLength = bs.DataLength;
398  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
399  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
400  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
401  } else
402  return AVERROR_INVALIDDATA;
403 
404 
405  if(!q->session) {
406  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
407  if (ret < 0)
408  return ret;
409  }
410 
412  if (ret < 0)
413  return ret;
414 
415  param->mfx.CodecId = ret;
416  video_signal_info.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
417  video_signal_info.Header.BufferSz = sizeof(video_signal_info);
418  // The SDK doesn't support other ext buffers when calling MFXVideoDECODE_DecodeHeader,
419  // so do not append this buffer to the existent buffer array
420  param->ExtParam = header_ext_params;
421  param->NumExtParam = 1;
422  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
423  if (MFX_ERR_MORE_DATA == ret) {
424  return AVERROR(EAGAIN);
425  }
426  if (ret < 0)
427  return ff_qsv_print_error(avctx, ret,
428  "Error decoding stream header");
429 
430  avctx->color_range = video_signal_info.VideoFullRange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
431 
432  if (video_signal_info.ColourDescriptionPresent) {
433  avctx->color_primaries = video_signal_info.ColourPrimaries;
434  avctx->color_trc = video_signal_info.TransferCharacteristics;
435  avctx->colorspace = video_signal_info.MatrixCoefficients;
436  }
437 
438  param->ExtParam = q->ext_buffers;
439  param->NumExtParam = q->nb_ext_buffers;
440 
441 #if QSV_VERSION_ATLEAST(1, 34)
442  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1)
443  param->mfx.FilmGrain = (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) ? 0 : param->mfx.FilmGrain;
444 #endif
445 
446  return 0;
447 }
448 
450 {
451  int ret;
452 
453  if (q->pool)
454  ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool);
455  else
456  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
457 
458  if (ret < 0)
459  return ret;
460 
461  if (frame->frame->format == AV_PIX_FMT_QSV) {
462  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
463  } else {
464  ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
465  if (ret < 0) {
466  av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
467  return ret;
468  }
469  }
470 
471  frame->surface.Info = q->frame_info;
472 
473  if (q->frames_ctx.mids) {
475  if (ret < 0)
476  return ret;
477 
478  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
479  }
480 
481  frame->surface.Data.ExtParam = frame->ext_param;
482  frame->surface.Data.NumExtParam = 0;
483  frame->num_ext_params = 0;
484  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
485  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
486  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->dec_info);
487 #if QSV_VERSION_ATLEAST(1, 34)
488  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) && avctx->codec_id == AV_CODEC_ID_AV1) {
489  frame->av1_film_grain_param.Header.BufferId = MFX_EXTBUFF_AV1_FILM_GRAIN_PARAM;
490  frame->av1_film_grain_param.Header.BufferSz = sizeof(frame->av1_film_grain_param);
491  frame->av1_film_grain_param.FilmGrainFlags = 0;
492  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->av1_film_grain_param);
493  }
494 #endif
495 
496 #if QSV_VERSION_ATLEAST(1, 35)
497  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) {
498  frame->mdcv.Header.BufferId = MFX_EXTBUFF_MASTERING_DISPLAY_COLOUR_VOLUME;
499  frame->mdcv.Header.BufferSz = sizeof(frame->mdcv);
500  // The data in mdcv is valid when this flag is 1
501  frame->mdcv.InsertPayloadToggle = 0;
502  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->mdcv);
503 
504  frame->clli.Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO;
505  frame->clli.Header.BufferSz = sizeof(frame->clli);
506  // The data in clli is valid when this flag is 1
507  frame->clli.InsertPayloadToggle = 0;
508  ff_qsv_frame_add_ext_param(avctx, frame, (mfxExtBuffer *)&frame->clli);
509  }
510 #endif
511 
512  frame->used = 1;
513 
514  return 0;
515 }
516 
518 {
519  QSVFrame *cur = q->work_frames;
520  while (cur) {
521  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
522  cur->used = 0;
523  av_frame_unref(cur->frame);
524  }
525  cur = cur->next;
526  }
527 }
528 
529 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
530 {
531  QSVFrame *frame, **last;
532  int ret;
533 
535 
536  frame = q->work_frames;
537  last = &q->work_frames;
538  while (frame) {
539  if (!frame->used) {
540  ret = alloc_frame(avctx, q, frame);
541  if (ret < 0)
542  return ret;
543  *surf = &frame->surface;
544  return 0;
545  }
546 
547  last = &frame->next;
548  frame = frame->next;
549  }
550 
551  frame = av_mallocz(sizeof(*frame));
552  if (!frame)
553  return AVERROR(ENOMEM);
554  frame->frame = av_frame_alloc();
555  if (!frame->frame) {
556  av_freep(&frame);
557  return AVERROR(ENOMEM);
558  }
559  *last = frame;
560 
561  ret = alloc_frame(avctx, q, frame);
562  if (ret < 0)
563  return ret;
564 
565  *surf = &frame->surface;
566 
567  return 0;
568 }
569 
570 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
571 {
572  QSVFrame *cur = q->work_frames;
573  while (cur) {
574  if (surf == &cur->surface)
575  return cur;
576  cur = cur->next;
577  }
578  return NULL;
579 }
580 
581 #if QSV_VERSION_ATLEAST(1, 34)
582 static int qsv_export_film_grain(AVCodecContext *avctx, mfxExtAV1FilmGrainParam *ext_param, AVFrame *frame)
583 {
584  AVFilmGrainParams *fgp;
586  int i;
587 
588  if (!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_APPLY))
589  return 0;
590 
592 
593  if (!fgp)
594  return AVERROR(ENOMEM);
595 
597  fgp->seed = ext_param->GrainSeed;
598  aom = &fgp->codec.aom;
599 
600  aom->chroma_scaling_from_luma = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CHROMA_SCALING_FROM_LUMA);
601  aom->scaling_shift = ext_param->GrainScalingMinus8 + 8;
602  aom->ar_coeff_lag = ext_param->ArCoeffLag;
603  aom->ar_coeff_shift = ext_param->ArCoeffShiftMinus6 + 6;
604  aom->grain_scale_shift = ext_param->GrainScaleShift;
605  aom->overlap_flag = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_OVERLAP);
606  aom->limit_output_range = !!(ext_param->FilmGrainFlags & MFX_FILM_GRAIN_CLIP_TO_RESTRICTED_RANGE);
607 
608  aom->num_y_points = ext_param->NumYPoints;
609 
610  for (i = 0; i < aom->num_y_points; i++) {
611  aom->y_points[i][0] = ext_param->PointY[i].Value;
612  aom->y_points[i][1] = ext_param->PointY[i].Scaling;
613  }
614 
615  aom->num_uv_points[0] = ext_param->NumCbPoints;
616 
617  for (i = 0; i < aom->num_uv_points[0]; i++) {
618  aom->uv_points[0][i][0] = ext_param->PointCb[i].Value;
619  aom->uv_points[0][i][1] = ext_param->PointCb[i].Scaling;
620  }
621 
622  aom->num_uv_points[1] = ext_param->NumCrPoints;
623 
624  for (i = 0; i < aom->num_uv_points[1]; i++) {
625  aom->uv_points[1][i][0] = ext_param->PointCr[i].Value;
626  aom->uv_points[1][i][1] = ext_param->PointCr[i].Scaling;
627  }
628 
629  for (i = 0; i < 24; i++)
630  aom->ar_coeffs_y[i] = ext_param->ArCoeffsYPlus128[i] - 128;
631 
632  for (i = 0; i < 25; i++) {
633  aom->ar_coeffs_uv[0][i] = ext_param->ArCoeffsCbPlus128[i] - 128;
634  aom->ar_coeffs_uv[1][i] = ext_param->ArCoeffsCrPlus128[i] - 128;
635  }
636 
637  aom->uv_mult[0] = ext_param->CbMult;
638  aom->uv_mult[1] = ext_param->CrMult;
639  aom->uv_mult_luma[0] = ext_param->CbLumaMult;
640  aom->uv_mult_luma[1] = ext_param->CrLumaMult;
641  aom->uv_offset[0] = ext_param->CbOffset;
642  aom->uv_offset[1] = ext_param->CrOffset;
643 
644  return 0;
645 }
646 #endif
647 
648 #if QSV_VERSION_ATLEAST(1, 35)
649 static int qsv_export_hdr_side_data(AVCodecContext *avctx, mfxExtMasteringDisplayColourVolume *mdcv,
650  mfxExtContentLightLevelInfo *clli, AVFrame *frame)
651 {
652  // The SDK re-uses this flag for HDR SEI parsing
653  if (mdcv->InsertPayloadToggle) {
655  const int mapping[3] = {2, 0, 1};
656  const int chroma_den = 50000;
657  const int luma_den = 10000;
658  int i;
659 
660  if (!mastering)
661  return AVERROR(ENOMEM);
662 
663  for (i = 0; i < 3; i++) {
664  const int j = mapping[i];
665  mastering->display_primaries[i][0] = av_make_q(mdcv->DisplayPrimariesX[j], chroma_den);
666  mastering->display_primaries[i][1] = av_make_q(mdcv->DisplayPrimariesY[j], chroma_den);
667  }
668 
669  mastering->white_point[0] = av_make_q(mdcv->WhitePointX, chroma_den);
670  mastering->white_point[1] = av_make_q(mdcv->WhitePointY, chroma_den);
671 
672  mastering->max_luminance = av_make_q(mdcv->MaxDisplayMasteringLuminance, luma_den);
673  mastering->min_luminance = av_make_q(mdcv->MinDisplayMasteringLuminance, luma_den);
674 
675  mastering->has_luminance = 1;
676  mastering->has_primaries = 1;
677  }
678 
679  // The SDK re-uses this flag for HDR SEI parsing
680  if (clli->InsertPayloadToggle) {
682 
683  if (!light)
684  return AVERROR(ENOMEM);
685 
686  light->MaxCLL = clli->MaxContentLightLevel;
687  light->MaxFALL = clli->MaxPicAverageLightLevel;
688  }
689 
690  return 0;
691 }
692 
693 #endif
694 
695 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
696  AVFrame *frame, int *got_frame,
697  const AVPacket *avpkt)
698 {
699  mfxFrameSurface1 *insurf;
700  mfxFrameSurface1 *outsurf;
701  mfxSyncPoint *sync;
702  mfxBitstream bs = { { { 0 } } };
703  int ret;
704 
705  if (avpkt->size) {
706  bs.Data = avpkt->data;
707  bs.DataLength = avpkt->size;
708  bs.MaxLength = bs.DataLength;
709  bs.TimeStamp = PTS_TO_MFX_PTS(avpkt->pts, avctx->pkt_timebase);
710  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
711  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
712  }
713 
714  sync = av_mallocz(sizeof(*sync));
715  if (!sync) {
716  av_freep(&sync);
717  return AVERROR(ENOMEM);
718  }
719 
720  do {
721  ret = get_surface(avctx, q, &insurf);
722  if (ret < 0) {
723  av_freep(&sync);
724  return ret;
725  }
726 
727  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
728  insurf, &outsurf, sync);
729  if (ret == MFX_WRN_DEVICE_BUSY)
730  av_usleep(500);
731 
732  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
733 
734  if (ret == MFX_ERR_INCOMPATIBLE_VIDEO_PARAM) {
735  q->reinit_flag = 1;
736  av_log(avctx, AV_LOG_DEBUG, "Video parameter change\n");
737  av_freep(&sync);
738  return 0;
739  }
740 
741  if (ret != MFX_ERR_NONE &&
742  ret != MFX_ERR_MORE_DATA &&
743  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
744  ret != MFX_ERR_MORE_SURFACE) {
745  av_freep(&sync);
746  return ff_qsv_print_error(avctx, ret,
747  "Error during QSV decoding.");
748  }
749 
750  /* make sure we do not enter an infinite loop if the SDK
751  * did not consume any data and did not return anything */
752  if (!*sync && !bs.DataOffset) {
753  bs.DataOffset = avpkt->size;
754  ++q->zero_consume_run;
755  if (q->zero_consume_run > 1)
756  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
757  } else {
758  q->zero_consume_run = 0;
759  }
760 
761  if (*sync) {
762  QSVAsyncFrame aframe;
763  QSVFrame *out_frame = find_frame(q, outsurf);
764 
765  if (!out_frame) {
766  av_log(avctx, AV_LOG_ERROR,
767  "The returned surface does not correspond to any frame\n");
768  av_freep(&sync);
769  return AVERROR_BUG;
770  }
771 
772  out_frame->queued += 1;
773 
774  aframe = (QSVAsyncFrame){ sync, out_frame };
775  av_fifo_write(q->async_fifo, &aframe, 1);
776  } else {
777  av_freep(&sync);
778  }
779 
780  if ((av_fifo_can_read(q->async_fifo) >= q->async_depth) ||
781  (!avpkt->size && av_fifo_can_read(q->async_fifo))) {
782  QSVAsyncFrame aframe;
783  AVFrame *src_frame;
784 
785  av_fifo_read(q->async_fifo, &aframe, 1);
786  aframe.frame->queued -= 1;
787 
788  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
789  do {
790  ret = MFXVideoCORE_SyncOperation(q->session, *aframe.sync, 1000);
791  } while (ret == MFX_WRN_IN_EXECUTION);
792  }
793 
794  av_freep(&aframe.sync);
795 
796  src_frame = aframe.frame->frame;
797 
798  ret = av_frame_ref(frame, src_frame);
799  if (ret < 0)
800  return ret;
801 
802  outsurf = &aframe.frame->surface;
803 
804  frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
805 #if QSV_VERSION_ATLEAST(1, 34)
807  QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 34) &&
808  avctx->codec_id == AV_CODEC_ID_AV1) {
809  ret = qsv_export_film_grain(avctx, &aframe.frame->av1_film_grain_param, frame);
810 
811  if (ret < 0)
812  return ret;
813  }
814 #endif
815 
816 #if QSV_VERSION_ATLEAST(1, 35)
817  if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 1, 35) && avctx->codec_id == AV_CODEC_ID_HEVC) {
818  ret = qsv_export_hdr_side_data(avctx, &aframe.frame->mdcv, &aframe.frame->clli, frame);
819 
820  if (ret < 0)
821  return ret;
822  }
823 #endif
824 
825  frame->repeat_pict =
826  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
827  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
828  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
829  frame->top_field_first =
830  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
831  frame->interlaced_frame =
832  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
833  frame->pict_type = ff_qsv_map_pictype(aframe.frame->dec_info.FrameType);
834  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
835  if (avctx->codec_id == AV_CODEC_ID_H264)
836  frame->key_frame = !!(aframe.frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
837 
838  /* update the surface properties */
839  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
840  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
841 
842  *got_frame = 1;
843  }
844 
845  return bs.DataOffset;
846 }
847 
849 {
850  QSVFrame *cur = q->work_frames;
851 
852  if (q->session)
853  MFXVideoDECODE_Close(q->session);
854 
855  if (q->async_fifo) {
856  QSVAsyncFrame aframe;
857  while (av_fifo_read(q->async_fifo, &aframe, 1) >= 0)
858  av_freep(&aframe.sync);
860  }
861 
862  while (cur) {
863  q->work_frames = cur->next;
864  av_frame_free(&cur->frame);
865  av_freep(&cur);
866  cur = q->work_frames;
867  }
868 
870 
874 }
875 
877  AVFrame *frame, int *got_frame, const AVPacket *pkt)
878 {
879  int ret;
880  mfxVideoParam param = { 0 };
882 
883  if (!pkt->size)
884  return qsv_decode(avctx, q, frame, got_frame, pkt);
885 
886  /* TODO: flush delayed frames on reinit */
887 
888  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
889  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
890  // the assumption may be not corret but will be updated after header decoded if not true.
891  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
892  pix_fmt = q->orig_pix_fmt;
893  if (!avctx->coded_width)
894  avctx->coded_width = 1280;
895  if (!avctx->coded_height)
896  avctx->coded_height = 720;
897 
898  /* decode zero-size pkt to flush the buffered pkt before reinit */
899  if (q->reinit_flag) {
900  AVPacket zero_pkt = {0};
901  ret = qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
902  if (ret < 0 || *got_frame)
903  return ret;
904  }
905 
906  if (q->reinit_flag || !q->session || !q->initialized) {
907  mfxFrameAllocRequest request;
908  memset(&request, 0, sizeof(request));
909 
910  q->reinit_flag = 0;
911  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
912  if (ret < 0) {
913  if (ret == AVERROR(EAGAIN))
914  av_log(avctx, AV_LOG_INFO, "More data is required to decode header\n");
915  else
916  av_log(avctx, AV_LOG_ERROR, "Error decoding header\n");
917  goto reinit_fail;
918  }
919  param.IOPattern = q->iopattern;
920 
921  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
922 
923  avctx->coded_width = param.mfx.FrameInfo.Width;
924  avctx->coded_height = param.mfx.FrameInfo.Height;
925 
926  ret = MFXVideoDECODE_QueryIOSurf(q->session, &param, &request);
927  if (ret < 0)
928  return ff_qsv_print_error(avctx, ret, "Error querying IO surface");
929 
930  q->suggest_pool_size = request.NumFrameSuggested;
931 
932  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
933  if (ret < 0)
934  goto reinit_fail;
935  q->initialized = 0;
936  }
937 
938  if (!q->initialized) {
939  ret = qsv_decode_init_context(avctx, q, &param);
940  if (ret < 0)
941  goto reinit_fail;
942  q->initialized = 1;
943  }
944 
945  return qsv_decode(avctx, q, frame, got_frame, pkt);
946 
947 reinit_fail:
948  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
949  return ret;
950 }
951 
956 };
957 
958 typedef struct QSVDecContext {
959  AVClass *class;
961 
963 
965 
967 } QSVDecContext;
968 
970 {
971  AVPacket pkt;
972  while (av_fifo_read(s->packet_fifo, &pkt, 1) >= 0)
974 
975  av_packet_unref(&s->buffer_pkt);
976 }
977 
979 {
980  QSVDecContext *s = avctx->priv_data;
981 
983 
985 
986  av_fifo_freep2(&s->packet_fifo);
987 
988  return 0;
989 }
990 
992 {
993  QSVDecContext *s = avctx->priv_data;
994  int ret;
995  const char *uid = NULL;
996 
997  if (avctx->codec_id == AV_CODEC_ID_VP8) {
998  uid = "f622394d8d87452f878c51f2fc9b4131";
999  } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
1000  uid = "a922394d8d87452f878c51f2fc9b4131";
1001  }
1002  else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) {
1003  static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6";
1004  static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e";
1005 
1006  if (s->qsv.load_plugins[0]) {
1007  av_log(avctx, AV_LOG_WARNING,
1008  "load_plugins is not empty, but load_plugin is not set to 'none'."
1009  "The load_plugin value will be ignored.\n");
1010  } else {
1011  if (s->load_plugin == LOAD_PLUGIN_HEVC_SW)
1012  uid = uid_hevcdec_sw;
1013  else
1014  uid = uid_hevcdec_hw;
1015  }
1016  }
1017  if (uid) {
1018  av_freep(&s->qsv.load_plugins);
1019  s->qsv.load_plugins = av_strdup(uid);
1020  if (!s->qsv.load_plugins)
1021  return AVERROR(ENOMEM);
1022  }
1023 
1024  s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12;
1025  s->packet_fifo = av_fifo_alloc2(1, sizeof(AVPacket),
1027  if (!s->packet_fifo) {
1028  ret = AVERROR(ENOMEM);
1029  goto fail;
1030  }
1031 
1032  if (!avctx->pkt_timebase.num)
1033  av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
1034 
1035  return 0;
1036 fail:
1037  qsv_decode_close(avctx);
1038  return ret;
1039 }
1040 
1042  int *got_frame, AVPacket *avpkt)
1043 {
1044  QSVDecContext *s = avctx->priv_data;
1045  int ret;
1046 
1047  /* buffer the input packet */
1048  if (avpkt->size) {
1049  AVPacket input_ref;
1050 
1051  ret = av_packet_ref(&input_ref, avpkt);
1052  if (ret < 0)
1053  return ret;
1054  av_fifo_write(s->packet_fifo, &input_ref, 1);
1055  }
1056 
1057  /* process buffered data */
1058  while (!*got_frame) {
1059  /* prepare the input data */
1060  if (s->buffer_pkt.size <= 0) {
1061  /* no more data */
1062  if (!av_fifo_can_read(s->packet_fifo))
1063  return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt);
1064  /* in progress of reinit, no read from fifo and keep the buffer_pkt */
1065  if (!s->qsv.reinit_flag) {
1066  av_packet_unref(&s->buffer_pkt);
1067  av_fifo_read(s->packet_fifo, &s->buffer_pkt, 1);
1068  }
1069  }
1070 
1071  ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt);
1072  if (ret < 0){
1073  /* Drop buffer_pkt when failed to decode the packet. Otherwise,
1074  the decoder will keep decoding the failure packet. */
1075  av_packet_unref(&s->buffer_pkt);
1076  return ret;
1077  }
1078  if (s->qsv.reinit_flag)
1079  continue;
1080 
1081  s->buffer_pkt.size -= ret;
1082  s->buffer_pkt.data += ret;
1083  }
1084 
1085  return avpkt->size;
1086 }
1087 
1089 {
1090  QSVDecContext *s = avctx->priv_data;
1091 
1093 
1094  s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE;
1095  s->qsv.initialized = 0;
1096 }
1097 
1098 #define OFFSET(x) offsetof(QSVDecContext, x)
1099 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1100 
1101 #define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \
1102 static const AVClass x##_qsv_class = { \
1103  .class_name = #x "_qsv", \
1104  .item_name = av_default_item_name, \
1105  .option = opt, \
1106  .version = LIBAVUTIL_VERSION_INT, \
1107 }; \
1108 const FFCodec ff_##x##_qsv_decoder = { \
1109  .p.name = #x "_qsv", \
1110  CODEC_LONG_NAME(#X " video (Intel Quick Sync Video acceleration)"), \
1111  .priv_data_size = sizeof(QSVDecContext), \
1112  .p.type = AVMEDIA_TYPE_VIDEO, \
1113  .p.id = AV_CODEC_ID_##X, \
1114  .init = qsv_decode_init, \
1115  FF_CODEC_DECODE_CB(qsv_decode_frame), \
1116  .flush = qsv_decode_flush, \
1117  .close = qsv_decode_close, \
1118  .bsfs = bsf_name, \
1119  .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \
1120  .p.priv_class = &x##_qsv_class, \
1121  .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1122  AV_PIX_FMT_P010, \
1123  AV_PIX_FMT_P012, \
1124  AV_PIX_FMT_YUYV422, \
1125  AV_PIX_FMT_Y210, \
1126  AV_PIX_FMT_Y212, \
1127  AV_PIX_FMT_VUYX, \
1128  AV_PIX_FMT_XV30, \
1129  AV_PIX_FMT_XV36, \
1130  AV_PIX_FMT_QSV, \
1131  AV_PIX_FMT_NONE }, \
1132  .hw_configs = qsv_hw_configs, \
1133  .p.wrapper_name = "qsv", \
1134  .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
1135 }; \
1136 
1137 #define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options)
1138 
1139 #if CONFIG_HEVC_QSV_DECODER
1140 static const AVOption hevc_options[] = {
1141  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1142 
1143  { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, "load_plugin" },
1144  { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, "load_plugin" },
1145  { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, "load_plugin" },
1146  { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, "load_plugin" },
1147 
1148  { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session",
1149  OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD },
1150 
1151  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
1152  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
1153  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
1154  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
1155  { NULL },
1156 };
1157 DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options)
1158 #endif
1159 
1160 static const AVOption options[] = {
1161  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD },
1162 
1163  { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"},
1164  { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"},
1165  { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"},
1166  { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"},
1167  { NULL },
1168 };
1169 
1170 #if CONFIG_H264_QSV_DECODER
1171 DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb")
1172 #endif
1173 
1174 #if CONFIG_MPEG2_QSV_DECODER
1175 DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL)
1176 #endif
1177 
1178 #if CONFIG_VC1_QSV_DECODER
1179 DEFINE_QSV_DECODER(vc1, VC1, NULL)
1180 #endif
1181 
1182 #if CONFIG_MJPEG_QSV_DECODER
1183 DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL)
1184 #endif
1185 
1186 #if CONFIG_VP8_QSV_DECODER
1187 DEFINE_QSV_DECODER(vp8, VP8, NULL)
1188 #endif
1189 
1190 #if CONFIG_VP9_QSV_DECODER
1191 DEFINE_QSV_DECODER(vp9, VP9, NULL)
1192 #endif
1193 
1194 #if CONFIG_AV1_QSV_DECODER
1195 DEFINE_QSV_DECODER(av1, AV1, NULL)
1196 #endif
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:60
AVCodecContext::hwaccel_context
void * hwaccel_context
Legacy hardware accelerator context.
Definition: avcodec.h:1393
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:40
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uid
UID uid
Definition: mxfenc.c:2201
opt.h
qsv_process_data
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: qsvdec.c:876
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:975
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1147
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:116
AVBufferPool
The buffer pool.
Definition: buffer_internal.h:88
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVFilmGrainAOMParams::uv_points
uint8_t uv_points[2][10][2]
Definition: film_grain_params.h:63
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
AVFilmGrainParams::aom
AVFilmGrainAOMParams aom
Definition: film_grain_params.h:236
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:334
ff_qsv_close_internal_session
int ff_qsv_close_internal_session(QSVSession *qs)
Definition: qsv.c:1116
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:379
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:968
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:248
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1004
AVOption
AVOption.
Definition: opt.h:251
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:348
LOAD_PLUGIN_NONE
@ LOAD_PLUGIN_NONE
Definition: qsvdec.c:953
AV_PIX_FMT_XV30
#define AV_PIX_FMT_XV30
Definition: pixfmt.h:514
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.c:89
LOAD_PLUGIN_HEVC_HW
@ LOAD_PLUGIN_HEVC_HW
Definition: qsvdec.c:955
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:81
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
QSVFrame::used
int used
Definition: qsv_internal.h:101
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins, int gpu_copy)
Definition: qsv.c:992
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
ff_qsv_map_frame_to_surface
int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
Definition: qsv.c:287
fifo.h
QSVContext::suggest_pool_size
int suggest_pool_size
Definition: qsvdec.c:99
DEFINE_QSV_DECODER
#define DEFINE_QSV_DECODER(x, X, bsf_name)
Definition: qsvdec.c:1137
fail
#define fail()
Definition: checkasm.h:134
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFilmGrainAOMParams::grain_scale_shift
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
Definition: film_grain_params.h:99
QSVDecContext::qsv
QSVContext qsv
Definition: qsvdec.c:960
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
AVFilmGrainAOMParams::limit_output_range
int limit_output_range
Signals to clip to limited color levels after film grain application.
Definition: film_grain_params.h:122
LOAD_PLUGIN_HEVC_SW
@ LOAD_PLUGIN_HEVC_SW
Definition: qsvdec.c:954
options
static const AVOption options[]
Definition: qsvdec.c:1160
DEFINE_QSV_DECODER_WITH_OPTION
#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt)
Definition: qsvdec.c:1101
AVFilmGrainAOMParams::num_y_points
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
Definition: film_grain_params.h:49
mfx_tb
static const AVRational mfx_tb
Definition: qsvdec.c:60
AVRational::num
int num
Numerator.
Definition: rational.h:59
QSVDecContext::packet_fifo
AVFifo * packet_fifo
Definition: qsvdec.c:964
QSVContext::async_fifo
AVFifo * async_fifo
Definition: qsvdec.c:91
QSVContext
Definition: qsvdec.c:75
qsv_internal.h
AVFilmGrainAOMParams
This structure describes how to handle film grain synthesis for AOM codecs.
Definition: film_grain_params.h:44
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:570
qsv_decode_header
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, const AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:385
AV_PIX_FMT_Y210
#define AV_PIX_FMT_Y210
Definition: pixfmt.h:512
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:961
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:194
ASYNC_DEPTH_DEFAULT
#define ASYNC_DEPTH_DEFAULT
Definition: qsv_internal.h:51
film_grain_params.h
av_cold
#define av_cold
Definition: attributes.h:90
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
qsv_decode_frame
static int qsv_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:1041
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
QSVDecContext
Definition: qsvdec.c:958
QSVContext::iopattern
int iopattern
Definition: qsvdec.c:104
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
qsv_decode_init
static av_cold int qsv_decode_init(AVCodecContext *avctx)
Definition: qsvdec.c:991
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:339
s
#define s(width, name)
Definition: cbs_vp9.c:256
hevc_options
static const AVOption hevc_options[]
Definition: videotoolboxenc.c:2775
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.c:93
AVFilmGrainParams::codec
union AVFilmGrainParams::@318 codec
Additional fields may be added both here and in any structure included.
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.c:84
QSVContext::internal_qs
QSVSession internal_qs
Definition: qsvdec.c:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:376
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
QSVContext::ver
mfxVersion ver
Definition: qsvdec.c:78
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:64
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:367
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:408
if
if(ret)
Definition: filter_design.txt:179
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque, int gpu_copy)
Definition: qsv.c:1069
QSVFrame
Definition: qsv_internal.h:80
AVFilmGrainAOMParams::uv_mult_luma
int uv_mult_luma[2]
Definition: film_grain_params.h:106
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:982
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:283
qsv.h
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
QSV_HAVE_OPAQUE
#define QSV_HAVE_OPAQUE
Definition: qsv_internal.h:69
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_qsv_print_iopattern
int ff_qsv_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsv.c:100
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.c:110
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:82
time.h
QSVFramesContext::mids_buf
AVBufferRef * mids_buf
Definition: qsv_internal.h:123
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:240
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:430
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.c:107
AVCodecContext::level
int level
level
Definition: avcodec.h:1676
QSVContext::initialized
int initialized
Definition: qsvdec.c:100
qsv_clear_buffers
static void qsv_clear_buffers(QSVDecContext *s)
Definition: qsvdec.c:969
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.c:96
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.c:109
AVFilmGrainAOMParams::num_uv_points
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
Definition: film_grain_params.h:62
PTS_TO_MFX_PTS
#define PTS_TO_MFX_PTS(pts, pts_tb)
Definition: qsvdec.c:62
qsv_decode_close_qsvcontext
static void qsv_decode_close_qsvcontext(QSVContext *q)
Definition: qsvdec.c:848
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.c:97
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
AVPacket::size
int size
Definition: packet.h:375
AVFifo
Definition: fifo.c:35
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
AVCodecContext::extra_hw_frames
int extra_hw_frames
Definition: avcodec.h:1983
codec_internal.h
AV_PIX_FMT_P012
#define AV_PIX_FMT_P012
Definition: pixfmt.h:509
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
LoadPlugin
LoadPlugin
Definition: qsvdec.c:952
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:1749
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:449
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, const AVPacket *avpkt)
Definition: qsvdec.c:695
AVCodecHWConfigInternal
Definition: hwconfig.h:29
qsv_decode_close
static av_cold int qsv_decode_close(AVCodecContext *avctx)
Definition: qsvdec.c:978
frame.h
AV_PIX_FMT_Y212
#define AV_PIX_FMT_Y212
Definition: pixfmt.h:513
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:529
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
AVFilmGrainAOMParams::ar_coeffs_y
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
Definition: film_grain_params.h:80
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
QSVFramesContext::mids
QSVMid * mids
Definition: qsv_internal.h:124
QSVAsyncFrame::frame
QSVFrame * frame
Definition: qsvdec.c:72
hwcontext_qsv.h
QSVContext::pool
AVBufferPool * pool
Definition: qsvdec.c:98
log.h
ff_qsv_map_picstruct
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:361
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
QSVDecContext::buffer_pkt
AVPacket buffer_pkt
Definition: qsvdec.c:966
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.c:77
qsv_decode_preinit
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:263
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVFilmGrainAOMParams::scaling_shift
int scaling_shift
Specifies the shift applied to the chroma components.
Definition: film_grain_params.h:69
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1933
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
QSVDecContext::load_plugin
int load_plugin
Definition: qsvdec.c:962
OFFSET
#define OFFSET(x)
Definition: qsvdec.c:1098
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1883
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:179
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
pixfmt.h
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSVFrame::queued
int queued
Definition: qsv_internal.h:100
QSVContext::async_depth
int async_depth
Definition: qsvdec.c:103
MFX_PTS_TO_PTS
#define MFX_PTS_TO_PTS(mfx_pts, pts_tb)
Definition: qsvdec.c:66
QSVSession
Definition: qsv_internal.h:106
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:54
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.c:92
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1294
AVCodecContext
main external API structure.
Definition: avcodec.h:398
AVFilmGrainAOMParams::ar_coeff_lag
int ar_coeff_lag
Specifies the auto-regression lag.
Definition: film_grain_params.h:74
MFXUnload
#define MFXUnload(a)
Definition: qsvdec.c:57
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.c:95
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
AVFilmGrainAOMParams::y_points
uint8_t y_points[14][2]
Definition: film_grain_params.h:50
AVFilmGrainAOMParams::uv_offset
int uv_offset[2]
Offset used for component scaling function.
Definition: film_grain_params.h:112
qsv_decode_init_context
static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:359
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1550
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
QSVSession::session
mfxSession session
Definition: qsv_internal.h:107
ff_qsv_map_fourcc
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:203
AVFilmGrainAOMParams::uv_mult
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
Definition: film_grain_params.h:105
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2009
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:508
qsv_get_continuous_buffer
static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferPool *pool)
Definition: qsvdec.c:126
AVFilmGrainAOMParams::overlap_flag
int overlap_flag
Signals whether to overlap film grain blocks.
Definition: film_grain_params.h:117
VD
#define VD
Definition: qsvdec.c:1099
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:53
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:586
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:280
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:84
mastering_display_metadata.h
ff_attach_decode_data
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1426
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
QSVAsyncFrame::sync
mfxSyncPoint * sync
Definition: qsvdec.c:71
QSVFramesContext
Definition: qsv_internal.h:115
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVPacket
This structure stores compressed data.
Definition: packet.h:351
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:517
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
imgutils.h
AV_PIX_FMT_XV36
#define AV_PIX_FMT_XV36
Definition: pixfmt.h:515
AV_CODEC_ID_VP8
@ AV_CODEC_ID_VP8
Definition: codec_id.h:192
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
QSVContext::gpu_copy
int gpu_copy
Definition: qsvdec.c:105
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1742
QSVAsyncFrame
Definition: qsvdec.c:70
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
AVFilmGrainAOMParams::chroma_scaling_from_luma
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
Definition: film_grain_params.h:56
AV_PIX_FMT_VUYX
@ AV_PIX_FMT_VUYX
packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
Definition: pixfmt.h:403
QSVSession::loader
void * loader
Definition: qsv_internal.h:112
ff_qsv_frame_add_ext_param
void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame, mfxExtBuffer *param)
Definition: qsv.c:1134
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
AV_FILM_GRAIN_PARAMS_AV1
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
Definition: film_grain_params.h:30
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:103
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:185
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:220
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs, const char *load_plugins, int gpu_copy)
Definition: qsv.c:681
qsv_decode_flush
static void qsv_decode_flush(AVCodecContext *avctx)
Definition: qsvdec.c:1088
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:371
qsv_hw_configs
static const AVCodecHWConfigInternal *const qsv_hw_configs[]
Definition: qsvdec.c:113
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
AVFilmGrainAOMParams::ar_coeff_shift
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
Definition: film_grain_params.h:93
AVFilmGrainAOMParams::ar_coeffs_uv
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.
Definition: film_grain_params.h:86