FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 #include <sys/types.h>
26 
27 #include <mfx/mfxvideo.h>
28 
29 #include "libavutil/common.h"
30 #include "libavutil/hwcontext.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/log.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/pixfmt.h"
36 #include "libavutil/time.h"
37 
38 #include "avcodec.h"
39 #include "internal.h"
40 #include "qsv.h"
41 #include "qsv_internal.h"
42 #include "qsvdec.h"
43 
45  &(const AVCodecHWConfigInternal) {
46  .public = {
50  .device_type = AV_HWDEVICE_TYPE_QSV,
51  },
52  .hwaccel = NULL,
53  },
54  NULL
55 };
56 
57 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
58  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
59 {
60  int ret;
61 
62  if (session) {
63  q->session = session;
64  } else if (hw_frames_ref) {
65  if (q->internal_session) {
66  MFXClose(q->internal_session);
68  }
70 
71  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
72  if (!q->frames_ctx.hw_frames_ctx)
73  return AVERROR(ENOMEM);
74 
76  &q->frames_ctx, q->load_plugins,
77  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY);
78  if (ret < 0) {
80  return ret;
81  }
82 
83  q->session = q->internal_session;
84  } else if (hw_device_ref) {
85  if (q->internal_session) {
86  MFXClose(q->internal_session);
88  }
89 
91  hw_device_ref, q->load_plugins);
92  if (ret < 0)
93  return ret;
94 
95  q->session = q->internal_session;
96  } else {
97  if (!q->internal_session) {
99  q->load_plugins);
100  if (ret < 0)
101  return ret;
102  }
103 
104  q->session = q->internal_session;
105  }
106 
107  /* make sure the decoder is uninitialized */
108  MFXVideoDECODE_Close(q->session);
109 
110  return 0;
111 }
112 
113 static inline unsigned int qsv_fifo_item_size(void)
114 {
115  return sizeof(mfxSyncPoint*) + sizeof(QSVFrame*);
116 }
117 
118 static inline unsigned int qsv_fifo_size(const AVFifoBuffer* fifo)
119 {
120  return av_fifo_size(fifo) / qsv_fifo_item_size();
121 }
122 
123 static int check_dec_param(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param_in)
124 {
125  mfxVideoParam param_out = { .mfx.CodecId = param_in->mfx.CodecId };
126  mfxStatus ret;
127 
128 #define CHECK_MATCH(x) \
129  do { \
130  if (param_out.mfx.x != param_in->mfx.x) { \
131  av_log(avctx, AV_LOG_WARNING, "Required "#x" %d is unsupported\n", \
132  param_in->mfx.x); \
133  } \
134  } while (0)
135 
136  ret = MFXVideoDECODE_Query(q->session, param_in, &param_out);
137 
138  if (ret < 0) {
139  CHECK_MATCH(CodecId);
140  CHECK_MATCH(CodecProfile);
141  CHECK_MATCH(CodecLevel);
142  CHECK_MATCH(FrameInfo.Width);
143  CHECK_MATCH(FrameInfo.Height);
144 #undef CHECK_MATCH
145  return 0;
146  }
147  return 1;
148 }
149 
151 {
152  const AVPixFmtDescriptor *desc;
153  mfxSession session = NULL;
154  int iopattern = 0;
155  mfxVideoParam param = { 0 };
156  int frame_width = avctx->coded_width;
157  int frame_height = avctx->coded_height;
158  int ret;
159 
161  if (!desc)
162  return AVERROR_BUG;
163 
164  if (!q->async_fifo) {
166  if (!q->async_fifo)
167  return AVERROR(ENOMEM);
168  }
169 
170  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
171  AVQSVContext *user_ctx = avctx->hwaccel_context;
172  session = user_ctx->session;
173  iopattern = user_ctx->iopattern;
174  q->ext_buffers = user_ctx->ext_buffers;
175  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
176  }
177 
178  if (avctx->hw_frames_ctx) {
179  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
180  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
181 
182  if (!iopattern) {
183  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
184  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
185  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
186  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
187  }
188  }
189 
190  if (!iopattern)
191  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
192  q->iopattern = iopattern;
193 
194  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
195  if (ret < 0) {
196  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
197  return ret;
198  }
199 
201  if (ret < 0)
202  return ret;
203 
204  param.mfx.CodecId = ret;
205  param.mfx.CodecProfile = ff_qsv_profile_to_mfx(avctx->codec_id, avctx->profile);
206  param.mfx.CodecLevel = ff_qsv_level_to_mfx(avctx->codec_id, avctx->level);
207 
208  param.mfx.FrameInfo.BitDepthLuma = desc->comp[0].depth;
209  param.mfx.FrameInfo.BitDepthChroma = desc->comp[0].depth;
210  param.mfx.FrameInfo.Shift = desc->comp[0].depth > 8;
211  param.mfx.FrameInfo.FourCC = q->fourcc;
212  param.mfx.FrameInfo.Width = frame_width;
213  param.mfx.FrameInfo.Height = frame_height;
214  param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
215 
216  switch (avctx->field_order) {
218  param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
219  break;
220  case AV_FIELD_TT:
221  param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_TFF;
222  break;
223  case AV_FIELD_BB:
224  param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_BFF;
225  break;
226  default:
227  param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_UNKNOWN;
228  break;
229  }
230 
231  param.IOPattern = q->iopattern;
232  param.AsyncDepth = q->async_depth;
233  param.ExtParam = q->ext_buffers;
234  param.NumExtParam = q->nb_ext_buffers;
235 
236  if (!check_dec_param(avctx, q, &param)) {
237  //Just give a warning instead of an error since it is still decodable possibly.
238  av_log(avctx, AV_LOG_WARNING,
239  "Current input bitstream is not supported by QSV decoder.\n");
240  }
241 
242  ret = MFXVideoDECODE_Init(q->session, &param);
243  if (ret < 0)
244  return ff_qsv_print_error(avctx, ret,
245  "Error initializing the MFX video decoder");
246 
247  q->frame_info = param.mfx.FrameInfo;
248 
249  return 0;
250 }
251 
253 {
254  int ret;
255 
256  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
257  if (ret < 0)
258  return ret;
259 
260  if (frame->frame->format == AV_PIX_FMT_QSV) {
261  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
262  } else {
263  frame->surface.Info = q->frame_info;
264 
265  frame->surface.Data.PitchLow = frame->frame->linesize[0];
266  frame->surface.Data.Y = frame->frame->data[0];
267  frame->surface.Data.UV = frame->frame->data[1];
268  }
269 
270  if (q->frames_ctx.mids) {
272  if (ret < 0)
273  return ret;
274 
275  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
276  }
277  frame->surface.Data.ExtParam = &frame->ext_param;
278  frame->surface.Data.NumExtParam = 1;
279  frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
280  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
281  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
282 
283  frame->used = 1;
284 
285  return 0;
286 }
287 
289 {
290  QSVFrame *cur = q->work_frames;
291  while (cur) {
292  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
293  cur->used = 0;
294  av_frame_unref(cur->frame);
295  }
296  cur = cur->next;
297  }
298 }
299 
300 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
301 {
302  QSVFrame *frame, **last;
303  int ret;
304 
306 
307  frame = q->work_frames;
308  last = &q->work_frames;
309  while (frame) {
310  if (!frame->used) {
311  ret = alloc_frame(avctx, q, frame);
312  if (ret < 0)
313  return ret;
314  *surf = &frame->surface;
315  return 0;
316  }
317 
318  last = &frame->next;
319  frame = frame->next;
320  }
321 
322  frame = av_mallocz(sizeof(*frame));
323  if (!frame)
324  return AVERROR(ENOMEM);
325  frame->frame = av_frame_alloc();
326  if (!frame->frame) {
327  av_freep(&frame);
328  return AVERROR(ENOMEM);
329  }
330  *last = frame;
331 
332  ret = alloc_frame(avctx, q, frame);
333  if (ret < 0)
334  return ret;
335 
336  *surf = &frame->surface;
337 
338  return 0;
339 }
340 
341 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
342 {
343  QSVFrame *cur = q->work_frames;
344  while (cur) {
345  if (surf == &cur->surface)
346  return cur;
347  cur = cur->next;
348  }
349  return NULL;
350 }
351 
352 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
353  AVFrame *frame, int *got_frame,
354  AVPacket *avpkt)
355 {
356  QSVFrame *out_frame;
357  mfxFrameSurface1 *insurf;
358  mfxFrameSurface1 *outsurf;
359  mfxSyncPoint *sync;
360  mfxBitstream bs = { { { 0 } } };
361  int ret;
362 
363  if (avpkt->size) {
364  bs.Data = avpkt->data;
365  bs.DataLength = avpkt->size;
366  bs.MaxLength = bs.DataLength;
367  bs.TimeStamp = avpkt->pts;
368  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
369  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
370  }
371 
372  sync = av_mallocz(sizeof(*sync));
373  if (!sync) {
374  av_freep(&sync);
375  return AVERROR(ENOMEM);
376  }
377 
378  do {
379  ret = get_surface(avctx, q, &insurf);
380  if (ret < 0) {
381  av_freep(&sync);
382  return ret;
383  }
384 
385  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
386  insurf, &outsurf, sync);
387  if (ret == MFX_WRN_DEVICE_BUSY)
388  av_usleep(500);
389 
390  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
391 
392  if (ret != MFX_ERR_NONE &&
393  ret != MFX_ERR_MORE_DATA &&
394  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
395  ret != MFX_ERR_MORE_SURFACE) {
396  av_freep(&sync);
397  return ff_qsv_print_error(avctx, ret,
398  "Error during QSV decoding.");
399  }
400 
401  /* make sure we do not enter an infinite loop if the SDK
402  * did not consume any data and did not return anything */
403  if (!*sync && !bs.DataOffset) {
404  bs.DataOffset = avpkt->size;
405  ++q->zero_consume_run;
406  if (q->zero_consume_run > 1)
407  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
408  } else if (!*sync && bs.DataOffset) {
409  ++q->buffered_count;
410  } else {
411  q->zero_consume_run = 0;
412  }
413 
414  if (*sync) {
415  QSVFrame *out_frame = find_frame(q, outsurf);
416 
417  if (!out_frame) {
418  av_log(avctx, AV_LOG_ERROR,
419  "The returned surface does not correspond to any frame\n");
420  av_freep(&sync);
421  return AVERROR_BUG;
422  }
423 
424  out_frame->queued = 1;
425  av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
426  av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
427  } else {
428  av_freep(&sync);
429  }
430 
431  if ((qsv_fifo_size(q->async_fifo) >= q->async_depth) ||
432  (!avpkt->size && av_fifo_size(q->async_fifo))) {
433  AVFrame *src_frame;
434 
435  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
436  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
437  out_frame->queued = 0;
438 
439  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
440  do {
441  ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
442  } while (ret == MFX_WRN_IN_EXECUTION);
443  }
444 
445  av_freep(&sync);
446 
447  src_frame = out_frame->frame;
448 
449  ret = av_frame_ref(frame, src_frame);
450  if (ret < 0)
451  return ret;
452 
453  outsurf = &out_frame->surface;
454 
455 #if FF_API_PKT_PTS
457  frame->pkt_pts = outsurf->Data.TimeStamp;
459 #endif
460  frame->pts = outsurf->Data.TimeStamp;
461 
462  frame->repeat_pict =
463  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
464  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
465  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
466  frame->top_field_first =
467  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
468  frame->interlaced_frame =
469  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
470  frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);
471  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
472  if (avctx->codec_id == AV_CODEC_ID_H264)
473  frame->key_frame = !!(out_frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
474 
475  /* update the surface properties */
476  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
477  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
478 
479  *got_frame = 1;
480  }
481 
482  return bs.DataOffset;
483 }
484 
486 {
487  QSVFrame *cur = q->work_frames;
488 
489  if (q->session)
490  MFXVideoDECODE_Close(q->session);
491 
492  while (q->async_fifo && av_fifo_size(q->async_fifo)) {
493  QSVFrame *out_frame;
494  mfxSyncPoint *sync;
495 
496  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
497  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
498 
499  av_freep(&sync);
500  }
501 
502  while (cur) {
503  q->work_frames = cur->next;
504  av_frame_free(&cur->frame);
505  av_freep(&cur);
506  cur = q->work_frames;
507  }
508 
510  q->async_fifo = NULL;
511 
514 
515  if (q->internal_session)
516  MFXClose(q->internal_session);
517 
520 
521  return 0;
522 }
523 
525  AVFrame *frame, int *got_frame, AVPacket *pkt)
526 {
527  uint8_t *dummy_data;
528  int dummy_size;
529  int ret;
530  const AVPixFmtDescriptor *desc;
531 
532  if (!q->avctx_internal) {
534  if (!q->avctx_internal)
535  return AVERROR(ENOMEM);
536 
537  q->avctx_internal->codec_id = avctx->codec_id;
538 
539  q->parser = av_parser_init(avctx->codec_id);
540  if (!q->parser)
541  return AVERROR(ENOMEM);
542 
545  }
546 
547  if (!pkt->size)
548  return qsv_decode(avctx, q, frame, got_frame, pkt);
549 
550  /* we assume the packets are already split properly and want
551  * just the codec parameters here */
553  &dummy_data, &dummy_size,
554  pkt->data, pkt->size, pkt->pts, pkt->dts,
555  pkt->pos);
556 
557  avctx->field_order = q->parser->field_order;
558  /* TODO: flush delayed frames on reinit */
559  if (q->parser->format != q->orig_pix_fmt ||
560  FFALIGN(q->parser->coded_width, 16) != FFALIGN(avctx->coded_width, 16) ||
561  FFALIGN(q->parser->coded_height, 16) != FFALIGN(avctx->coded_height, 16)) {
564  AV_PIX_FMT_NONE };
565  enum AVPixelFormat qsv_format;
566  AVPacket zero_pkt = {0};
567 
568  if (q->buffered_count) {
569  q->reinit_flag = 1;
570  /* decode zero-size pkt to flush the buffered pkt before reinit */
571  q->buffered_count--;
572  return qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
573  }
574 
575  q->reinit_flag = 0;
576 
577  qsv_format = ff_qsv_map_pixfmt(q->parser->format, &q->fourcc);
578  if (qsv_format < 0) {
579  av_log(avctx, AV_LOG_ERROR,
580  "Decoding pixel format '%s' is not supported\n",
582  ret = AVERROR(ENOSYS);
583  goto reinit_fail;
584  }
585 
586  q->orig_pix_fmt = q->parser->format;
587  avctx->pix_fmt = pix_fmts[1] = qsv_format;
588  avctx->width = q->parser->width;
589  avctx->height = q->parser->height;
590  avctx->coded_width = FFALIGN(q->parser->coded_width, 16);
591  avctx->coded_height = FFALIGN(q->parser->coded_height, 16);
592  avctx->level = q->avctx_internal->level;
593  avctx->profile = q->avctx_internal->profile;
594 
595  ret = ff_get_format(avctx, pix_fmts);
596  if (ret < 0)
597  goto reinit_fail;
598 
599  avctx->pix_fmt = ret;
600 
601  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
602  if (!desc)
603  goto reinit_fail;
604 
605  if (desc->comp[0].depth > 8) {
606  avctx->coded_width = FFALIGN(q->parser->coded_width, 32);
607  avctx->coded_height = FFALIGN(q->parser->coded_height, 32);
608  }
609 
610  ret = qsv_decode_init(avctx, q);
611  if (ret < 0)
612  goto reinit_fail;
613  }
614 
615  return qsv_decode(avctx, q, frame, got_frame, pkt);
616 
617 reinit_fail:
618  q->orig_pix_fmt = q->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
619  return ret;
620 }
621 
623 {
625 }
qsv_decode
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:352
ff_qsv_hw_configs
const AVCodecHWConfigInternal * ff_qsv_hw_configs[]
Definition: qsvdec.c:44
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
AVCodecContext::hwaccel_context
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2741
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ff_qsv_process_data
int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: qsvdec.c:524
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: avcodec.h:1545
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1371
QSVFramesContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:68
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
av_fifo_free
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
Definition: fifo.c:55
qsv_decode_init
static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
Definition: qsvdec.c:150
qsv_fifo_size
static unsigned int qsv_fifo_size(const AVFifoBuffer *fifo)
Definition: qsvdec.c:118
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
ff_qsv_map_pictype
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:248
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2222
ff_qsv_find_surface_idx
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:237
qsvdec.h
AVCodecParserContext::height
int height
Definition: avcodec.h:5256
QSVContext::work_frames
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.h:52
QSVContext::avctx_internal
AVCodecContext * avctx_internal
Definition: qsvdec.h:61
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
ff_qsv_decode_close
int ff_qsv_decode_close(QSVContext *q)
Definition: qsvdec.c:485
QSVFrame::frame
AVFrame * frame
Definition: qsv_internal.h:55
AVQSVContext::iopattern
int iopattern
The IO pattern to use.
Definition: qsv.h:46
AVCodecParserContext::coded_width
int coded_width
Dimensions of the coded video.
Definition: avcodec.h:5261
QSVFrame::used
int used
Definition: qsv_internal.h:62
AV_CODEC_HW_CONFIG_METHOD_AD_HOC
@ AV_CODEC_HW_CONFIG_METHOD_AD_HOC
The codec supports this format by some ad-hoc method.
Definition: avcodec.h:3452
AVFifoBuffer
Definition: fifo.h:31
QSVContext::internal_session
mfxSession internal_session
Definition: qsvdec.h:45
av_parser_init
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:34
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:1753
QSVContext
Definition: qsvdec.h:39
qsv_internal.h
CHECK_MATCH
#define CHECK_MATCH(x)
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
find_frame
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:341
ff_qsv_decode_flush
void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q)
Definition: qsvdec.c:622
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ff_qsv_print_warning
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:200
QSVContext::iopattern
int iopattern
Definition: qsvdec.h:68
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
QSVContext::reinit_flag
int reinit_flag
Definition: qsvdec.h:57
qsv_fifo_item_size
static unsigned int qsv_fifo_item_size(void)
Definition: qsvdec.c:113
QSVContext::frames_ctx
QSVFramesContext frames_ctx
Definition: qsvdec.h:47
ff_qsv_level_to_mfx
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level)
Definition: qsv.c:122
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVCodecParserContext::format
int format
The format of the coded data, corresponds to enum AVPixelFormat for video and for enum AVSampleFormat...
Definition: avcodec.h:5272
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3459
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: avcodec.h:245
AVQSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsv.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:1575
if
if(ret)
Definition: filter_design.txt:179
QSVFrame
Definition: qsv_internal.h:54
NULL
#define NULL
Definition: coverity.c:32
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
qsv.h
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:171
QSVContext::nb_ext_buffers
int nb_ext_buffers
Definition: qsvdec.h:73
QSVFrame::surface
mfxFrameSurface1 surface
Definition: qsv_internal.h:56
AVCodecParserContext::flags
int flags
Definition: avcodec.h:5141
time.h
QSVFramesContext::mids_buf
AVBufferRef * mids_buf
Definition: qsv_internal.h:75
QSVContext::async_fifo
AVFifoBuffer * async_fifo
Definition: qsvdec.h:54
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:222
QSVContext::load_plugins
char * load_plugins
Definition: qsvdec.h:70
AVCodecContext::level
int level
level
Definition: avcodec.h:3018
FrameInfo
Definition: af_amix.c:56
QSVContext::fourcc
uint32_t fourcc
Definition: qsvdec.h:63
ff_qsv_map_pixfmt
int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc)
Definition: qsv.c:220
QSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Definition: qsvdec.h:72
QSVContext::frame_info
mfxFrameInfo frame_info
Definition: qsvdec.h:64
desc
const char * desc
Definition: nvenc.c:68
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AVPacket::size
int size
Definition: avcodec.h:1478
QSVContext::buffered_count
int buffered_count
Definition: qsvdec.h:56
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
AVQSVContext::session
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
AVCodecParserContext::width
int width
Dimensions of the decoded video intended for presentation.
Definition: avcodec.h:5255
alloc_frame
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:252
AVCodecHWConfigInternal
Definition: hwaccel.h:29
AVQSVContext::ext_buffers
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: avcodec.h:1476
get_surface
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:300
AV_FIELD_TT
@ AV_FIELD_TT
Definition: avcodec.h:1546
ff_qsv_profile_to_mfx
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile)
Definition: qsv.c:106
QSVFramesContext::mids
QSVMid * mids
Definition: qsv_internal.h:76
ff_qsv_init_session_frames
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque)
Definition: qsv.c:692
hwcontext_qsv.h
log.h
PARSER_FLAG_COMPLETE_FRAMES
#define PARSER_FLAG_COMPLETE_FRAMES
Definition: avcodec.h:5142
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
common.h
QSVContext::session
mfxSession session
Definition: qsvdec.h:41
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
check_dec_param
static int check_dec_param(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param_in)
Definition: qsvdec.c:123
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3314
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:3262
qsv_init_session
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:57
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
ret
ret
Definition: filter_design.txt:187
pixfmt.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
QSVFrame::queued
int queued
Definition: qsv_internal.h:61
QSVContext::async_depth
int async_depth
Definition: qsvdec.h:67
AVCodecParserContext::coded_height
int coded_height
Definition: avcodec.h:5262
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:161
ff_qsv_codec_id_to_mfx
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:43
QSVContext::zero_consume_run
int zero_consume_run
Definition: qsvdec.h:55
AV_HWDEVICE_TYPE_QSV
@ AV_HWDEVICE_TYPE_QSV
Definition: hwcontext.h:33
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVCodecParserContext::field_order
enum AVFieldOrder field_order
Definition: avcodec.h:5232
QSVContext::orig_pix_fmt
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.h:62
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:2898
AVQSVContext
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX
The codec supports this format via the hw_frames_ctx interface.
Definition: avcodec.h:3436
QSVContext::parser
AVCodecParserContext * parser
Definition: qsvdec.h:60
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
av_parser_parse2
int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos)
Parse a packet.
Definition: parser.c:120
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
QSVFrame::dec_info
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:58
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1497
qsv_clear_unused_frames
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:288
AV_FIELD_BB
@ AV_FIELD_BB
Definition: avcodec.h:1547
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_qsv_init_session_device
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins)
Definition: qsv.c:622
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3112
AVCodecHWConfigInternal::public
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwaccel.h:34
QSVFrame::next
struct QSVFrame * next
Definition: qsv_internal.h:64
ff_qsv_print_error
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:190
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
av_parser_close
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:224
ff_qsv_init_internal_session
int ff_qsv_init_internal_session(AVCodecContext *avctx, mfxSession *session, const char *load_plugins)
Definition: qsv.c:328