FFmpeg
qsvdec.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV codec-independent code
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 #include <sys/types.h>
26 
27 #include <mfx/mfxvideo.h>
28 
29 #include "libavutil/common.h"
30 #include "libavutil/hwcontext.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/log.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/pixfmt.h"
36 #include "libavutil/time.h"
37 
38 #include "avcodec.h"
39 #include "internal.h"
40 #include "qsv.h"
41 #include "qsv_internal.h"
42 #include "qsvdec.h"
43 
45  &(const AVCodecHWConfigInternal) {
46  .public = {
50  .device_type = AV_HWDEVICE_TYPE_QSV,
51  },
52  .hwaccel = NULL,
53  },
54  NULL
55 };
56 
57 static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
58  AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
59 {
60  int ret;
61 
62  if (session) {
63  q->session = session;
64  } else if (hw_frames_ref) {
65  if (q->internal_session) {
66  MFXClose(q->internal_session);
68  }
70 
71  q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
72  if (!q->frames_ctx.hw_frames_ctx)
73  return AVERROR(ENOMEM);
74 
76  &q->frames_ctx, q->load_plugins,
77  q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY);
78  if (ret < 0) {
80  return ret;
81  }
82 
83  q->session = q->internal_session;
84  } else if (hw_device_ref) {
85  if (q->internal_session) {
86  MFXClose(q->internal_session);
88  }
89 
91  hw_device_ref, q->load_plugins);
92  if (ret < 0)
93  return ret;
94 
95  q->session = q->internal_session;
96  } else {
97  if (!q->internal_session) {
99  q->load_plugins);
100  if (ret < 0)
101  return ret;
102  }
103 
104  q->session = q->internal_session;
105  }
106 
107  /* make sure the decoder is uninitialized */
108  MFXVideoDECODE_Close(q->session);
109 
110  return 0;
111 }
112 
113 static inline unsigned int qsv_fifo_item_size(void)
114 {
115  return sizeof(mfxSyncPoint*) + sizeof(QSVFrame*);
116 }
117 
118 static inline unsigned int qsv_fifo_size(const AVFifoBuffer* fifo)
119 {
120  return av_fifo_size(fifo) / qsv_fifo_item_size();
121 }
122 
123 static int check_dec_param(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param_in)
124 {
125  mfxVideoParam param_out = { .mfx.CodecId = param_in->mfx.CodecId };
126  mfxStatus ret;
127 
128 #define CHECK_MATCH(x) \
129  do { \
130  if (param_out.mfx.x != param_in->mfx.x) { \
131  av_log(avctx, AV_LOG_WARNING, "Required "#x" %d is unsupported\n", \
132  param_in->mfx.x); \
133  } \
134  } while (0)
135 
136  ret = MFXVideoDECODE_Query(q->session, param_in, &param_out);
137 
138  if (ret < 0) {
139  CHECK_MATCH(CodecId);
140  CHECK_MATCH(CodecProfile);
141  CHECK_MATCH(CodecLevel);
142  CHECK_MATCH(FrameInfo.Width);
143  CHECK_MATCH(FrameInfo.Height);
144 #undef CHECK_MATCH
145  return 0;
146  }
147  return 1;
148 }
149 
150 static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
151 {
152  mfxSession session = NULL;
153  int iopattern = 0;
154  int ret;
155  enum AVPixelFormat pix_fmts[3] = {
156  AV_PIX_FMT_QSV, /* opaque format in case of video memory output */
157  pix_fmt, /* system memory format obtained from bitstream parser */
158  AV_PIX_FMT_NONE };
159 
160  ret = ff_get_format(avctx, pix_fmts);
161  if (ret < 0) {
162  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
163  return ret;
164  }
165 
166  if (!q->async_fifo) {
168  if (!q->async_fifo)
169  return AVERROR(ENOMEM);
170  }
171 
172  if (avctx->pix_fmt == AV_PIX_FMT_QSV && avctx->hwaccel_context) {
173  AVQSVContext *user_ctx = avctx->hwaccel_context;
174  session = user_ctx->session;
175  iopattern = user_ctx->iopattern;
176  q->ext_buffers = user_ctx->ext_buffers;
177  q->nb_ext_buffers = user_ctx->nb_ext_buffers;
178  }
179 
180  if (avctx->hw_frames_ctx) {
181  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
182  AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
183 
184  if (!iopattern) {
185  if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
186  iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
187  else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
188  iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
189  }
190  }
191 
192  if (!iopattern)
193  iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
194  q->iopattern = iopattern;
195 
196  ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
197  if (ret < 0) {
198  av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
199  return ret;
200  }
201 
202  param->IOPattern = q->iopattern;
203  param->AsyncDepth = q->async_depth;
204  param->ExtParam = q->ext_buffers;
205  param->NumExtParam = q->nb_ext_buffers;
206 
207  return 0;
208  }
209 
210 static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
211 {
212  int ret;
213 
214  avctx->width = param->mfx.FrameInfo.CropW;
215  avctx->height = param->mfx.FrameInfo.CropH;
216  avctx->coded_width = param->mfx.FrameInfo.Width;
217  avctx->coded_height = param->mfx.FrameInfo.Height;
218  avctx->level = param->mfx.CodecLevel;
219  avctx->profile = param->mfx.CodecProfile;
220  avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct);
221  avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC);
222 
223  ret = MFXVideoDECODE_Init(q->session, param);
224  if (ret < 0)
225  return ff_qsv_print_error(avctx, ret,
226  "Error initializing the MFX video decoder");
227 
228  q->frame_info = param->mfx.FrameInfo;
229 
230  return 0;
231 }
232 
233 static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
234 {
235  int ret;
236 
237  mfxBitstream bs = { 0 };
238 
239  if (avpkt->size) {
240  bs.Data = avpkt->data;
241  bs.DataLength = avpkt->size;
242  bs.MaxLength = bs.DataLength;
243  bs.TimeStamp = avpkt->pts;
244  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
245  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
246  } else
247  return AVERROR_INVALIDDATA;
248 
249 
250  if(!q->session) {
251  ret = qsv_decode_preinit(avctx, q, pix_fmt, param);
252  if (ret < 0)
253  return ret;
254  }
255 
256  ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
257  if (ret < 0)
258  return ret;
259 
260  param->mfx.CodecId = ret;
261  ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param);
262  if (MFX_ERR_MORE_DATA == ret) {
263  return AVERROR(EAGAIN);
264  }
265  if (ret < 0)
266  return ff_qsv_print_error(avctx, ret,
267  "Error decoding stream header");
268 
269  return 0;
270 }
271 
273 {
274  int ret;
275 
276  ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
277  if (ret < 0)
278  return ret;
279 
280  if (frame->frame->format == AV_PIX_FMT_QSV) {
281  frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
282  } else {
283  frame->surface.Info = q->frame_info;
284 
285  frame->surface.Data.PitchLow = frame->frame->linesize[0];
286  frame->surface.Data.Y = frame->frame->data[0];
287  frame->surface.Data.UV = frame->frame->data[1];
288  }
289 
290  if (q->frames_ctx.mids) {
291  ret = ff_qsv_find_surface_idx(&q->frames_ctx, frame);
292  if (ret < 0)
293  return ret;
294 
295  frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
296  }
297  frame->surface.Data.ExtParam = &frame->ext_param;
298  frame->surface.Data.NumExtParam = 1;
299  frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
300  frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
301  frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
302 
303  frame->used = 1;
304 
305  return 0;
306 }
307 
309 {
310  QSVFrame *cur = q->work_frames;
311  while (cur) {
312  if (cur->used && !cur->surface.Data.Locked && !cur->queued) {
313  cur->used = 0;
314  av_frame_unref(cur->frame);
315  }
316  cur = cur->next;
317  }
318 }
319 
320 static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
321 {
322  QSVFrame *frame, **last;
323  int ret;
324 
326 
327  frame = q->work_frames;
328  last = &q->work_frames;
329  while (frame) {
330  if (!frame->used) {
331  ret = alloc_frame(avctx, q, frame);
332  if (ret < 0)
333  return ret;
334  *surf = &frame->surface;
335  return 0;
336  }
337 
338  last = &frame->next;
339  frame = frame->next;
340  }
341 
342  frame = av_mallocz(sizeof(*frame));
343  if (!frame)
344  return AVERROR(ENOMEM);
345  frame->frame = av_frame_alloc();
346  if (!frame->frame) {
347  av_freep(&frame);
348  return AVERROR(ENOMEM);
349  }
350  *last = frame;
351 
352  ret = alloc_frame(avctx, q, frame);
353  if (ret < 0)
354  return ret;
355 
356  *surf = &frame->surface;
357 
358  return 0;
359 }
360 
361 static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
362 {
363  QSVFrame *cur = q->work_frames;
364  while (cur) {
365  if (surf == &cur->surface)
366  return cur;
367  cur = cur->next;
368  }
369  return NULL;
370 }
371 
372 static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
373  AVFrame *frame, int *got_frame,
374  AVPacket *avpkt)
375 {
376  QSVFrame *out_frame;
377  mfxFrameSurface1 *insurf;
378  mfxFrameSurface1 *outsurf;
379  mfxSyncPoint *sync;
380  mfxBitstream bs = { { { 0 } } };
381  int ret;
382 
383  if (avpkt->size) {
384  bs.Data = avpkt->data;
385  bs.DataLength = avpkt->size;
386  bs.MaxLength = bs.DataLength;
387  bs.TimeStamp = avpkt->pts;
388  if (avctx->field_order == AV_FIELD_PROGRESSIVE)
389  bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
390  }
391 
392  sync = av_mallocz(sizeof(*sync));
393  if (!sync) {
394  av_freep(&sync);
395  return AVERROR(ENOMEM);
396  }
397 
398  do {
399  ret = get_surface(avctx, q, &insurf);
400  if (ret < 0) {
401  av_freep(&sync);
402  return ret;
403  }
404 
405  ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
406  insurf, &outsurf, sync);
407  if (ret == MFX_WRN_DEVICE_BUSY)
408  av_usleep(500);
409 
410  } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
411 
412  if (ret != MFX_ERR_NONE &&
413  ret != MFX_ERR_MORE_DATA &&
414  ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
415  ret != MFX_ERR_MORE_SURFACE) {
416  av_freep(&sync);
417  return ff_qsv_print_error(avctx, ret,
418  "Error during QSV decoding.");
419  }
420 
421  /* make sure we do not enter an infinite loop if the SDK
422  * did not consume any data and did not return anything */
423  if (!*sync && !bs.DataOffset) {
424  bs.DataOffset = avpkt->size;
425  ++q->zero_consume_run;
426  if (q->zero_consume_run > 1)
427  ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data");
428  } else if (!*sync && bs.DataOffset) {
429  ++q->buffered_count;
430  } else {
431  q->zero_consume_run = 0;
432  }
433 
434  if (*sync) {
435  QSVFrame *out_frame = find_frame(q, outsurf);
436 
437  if (!out_frame) {
438  av_log(avctx, AV_LOG_ERROR,
439  "The returned surface does not correspond to any frame\n");
440  av_freep(&sync);
441  return AVERROR_BUG;
442  }
443 
444  out_frame->queued = 1;
445  av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
446  av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
447  } else {
448  av_freep(&sync);
449  }
450 
451  if ((qsv_fifo_size(q->async_fifo) >= q->async_depth) ||
452  (!avpkt->size && av_fifo_size(q->async_fifo))) {
453  AVFrame *src_frame;
454 
455  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
456  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
457  out_frame->queued = 0;
458 
459  if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
460  do {
461  ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
462  } while (ret == MFX_WRN_IN_EXECUTION);
463  }
464 
465  av_freep(&sync);
466 
467  src_frame = out_frame->frame;
468 
469  ret = av_frame_ref(frame, src_frame);
470  if (ret < 0)
471  return ret;
472 
473  outsurf = &out_frame->surface;
474 
475 #if FF_API_PKT_PTS
477  frame->pkt_pts = outsurf->Data.TimeStamp;
479 #endif
480  frame->pts = outsurf->Data.TimeStamp;
481 
482  frame->repeat_pict =
483  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
484  outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
485  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
486  frame->top_field_first =
487  outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
488  frame->interlaced_frame =
489  !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
490  frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);
491  //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
492  if (avctx->codec_id == AV_CODEC_ID_H264)
493  frame->key_frame = !!(out_frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
494 
495  /* update the surface properties */
496  if (avctx->pix_fmt == AV_PIX_FMT_QSV)
497  ((mfxFrameSurface1*)frame->data[3])->Info = outsurf->Info;
498 
499  *got_frame = 1;
500  }
501 
502  return bs.DataOffset;
503 }
504 
506 {
507  QSVFrame *cur = q->work_frames;
508 
509  if (q->session)
510  MFXVideoDECODE_Close(q->session);
511 
512  while (q->async_fifo && av_fifo_size(q->async_fifo)) {
513  QSVFrame *out_frame;
514  mfxSyncPoint *sync;
515 
516  av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
517  av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
518 
519  av_freep(&sync);
520  }
521 
522  while (cur) {
523  q->work_frames = cur->next;
524  av_frame_free(&cur->frame);
525  av_freep(&cur);
526  cur = q->work_frames;
527  }
528 
530  q->async_fifo = NULL;
531 
532  if (q->internal_session)
533  MFXClose(q->internal_session);
534 
537 
538  return 0;
539 }
540 
542  AVFrame *frame, int *got_frame, AVPacket *pkt)
543 {
544  int ret;
545  mfxVideoParam param = { 0 };
547 
548  if (!pkt->size)
549  return qsv_decode(avctx, q, frame, got_frame, pkt);
550 
551  /* TODO: flush delayed frames on reinit */
552 
553  // sw_pix_fmt, coded_width/height should be set for ff_get_format(),
554  // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
555  // the assumption may be not corret but will be updated after header decoded if not true.
556  if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
557  pix_fmt = q->orig_pix_fmt;
558  if (!avctx->coded_width)
559  avctx->coded_width = 1280;
560  if (!avctx->coded_height)
561  avctx->coded_height = 720;
562 
563  ret = qsv_decode_header(avctx, q, pkt, pix_fmt, &param);
564 
565  if (ret >= 0 && (q->orig_pix_fmt != ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC) ||
566  avctx->coded_width != param.mfx.FrameInfo.Width ||
567  avctx->coded_height != param.mfx.FrameInfo.Height)) {
568  AVPacket zero_pkt = {0};
569 
570  if (q->buffered_count) {
571  q->reinit_flag = 1;
572  /* decode zero-size pkt to flush the buffered pkt before reinit */
573  q->buffered_count--;
574  return qsv_decode(avctx, q, frame, got_frame, &zero_pkt);
575  }
576  q->reinit_flag = 0;
577 
578  q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC);
579 
580  avctx->coded_width = param.mfx.FrameInfo.Width;
581  avctx->coded_height = param.mfx.FrameInfo.Height;
582 
583  ret = qsv_decode_preinit(avctx, q, pix_fmt, &param);
584  if (ret < 0)
585  goto reinit_fail;
586  q->initialized = 0;
587  }
588 
589  if (!q->initialized) {
590  ret = qsv_decode_init(avctx, q, &param);
591  if (ret < 0)
592  goto reinit_fail;
593  q->initialized = 1;
594  }
595 
596  return qsv_decode(avctx, q, frame, got_frame, pkt);
597 
598 reinit_fail:
599  q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE;
600  return ret;
601 }
602 
604 {
606  q->initialized = 0;
607 }
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwaccel.h:34
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1371
int iopattern
Definition: qsvdec.h:67
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
static unsigned int qsv_fifo_size(const AVFifoBuffer *fifo)
Definition: qsvdec.c:118
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1753
Memory handling functions.
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:437
int size
Definition: avcodec.h:1478
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
mfxExtBuffer ** ext_buffers
Definition: qsvdec.h:71
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
static AVPacket pkt
static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: qsvdec.c:372
int profile
profile
Definition: avcodec.h:2898
static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param)
Definition: qsvdec.c:210
AVBufferRef * hw_frames_ctx
Definition: qsv_internal.h:68
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
int ff_qsv_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsv.c:195
mfxExtDecodedFrameInfo dec_info
Definition: qsv_internal.h:58
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2741
AVBufferRef * mids_buf
Definition: qsv_internal.h:75
The codec supports this format by some ad-hoc method.
Definition: avcodec.h:3452
int ff_qsv_decode_close(QSVContext *q)
Definition: qsvdec.c:505
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3459
int queued
Definition: qsv_internal.h:61
uint8_t * data
Definition: avcodec.h:1477
void av_fifo_free(AVFifoBuffer *f)
Free an AVFifoBuffer.
Definition: fifo.c:55
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
int ff_qsv_init_session_frames(AVCodecContext *avctx, mfxSession *psession, QSVFramesContext *qsv_frames_ctx, const char *load_plugins, int opaque)
Definition: qsv.c:715
#define av_log(a,...)
static QSVFrame * find_frame(QSVContext *q, mfxFrameSurface1 *surf)
Definition: qsvdec.c:361
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
Definition: qsv.c:242
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
int reinit_flag
Definition: qsvdec.h:57
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
int iopattern
The IO pattern to use.
Definition: qsv.h:46
enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
Definition: qsv.c:271
int nb_ext_buffers
Definition: qsv.h:52
#define CHECK_MATCH(x)
void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q)
Definition: qsvdec.c:603
int buffered_count
Definition: qsvdec.h:56
int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsv.c:205
mfxExtBuffer * ext_param
Definition: qsv_internal.h:59
int zero_consume_run
Definition: qsvdec.h:55
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
static unsigned int qsv_fifo_item_size(void)
Definition: qsvdec.c:113
int width
picture width / height.
Definition: avcodec.h:1738
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3262
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
Definition: qsv.c:43
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int level
level
Definition: avcodec.h:3018
mfxFrameSurface1 surface
Definition: qsv_internal.h:56
if(ret)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
enum AVPixelFormat orig_pix_fmt
Definition: qsvdec.h:59
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:1575
mfxSession internal_session
Definition: qsvdec.h:45
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
mfxExtBuffer ** ext_buffers
Extra buffers to pass to encoder or decoder initialization.
Definition: qsv.h:51
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int check_dec_param(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param_in)
Definition: qsvdec.c:123
static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
Definition: qsvdec.c:272
main external API structure.
Definition: avcodec.h:1565
uint8_t * data
The data buffer.
Definition: buffer.h:89
struct QSVFrame * next
Definition: qsv_internal.h:64
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:161
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
Definition: qsvdec.c:320
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:1753
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:222
char * load_plugins
Definition: qsvdec.h:69
This struct is used for communicating QSV parameters between libavcodec and the caller.
Definition: qsv.h:36
static void qsv_clear_unused_frames(QSVContext *q)
Definition: qsvdec.c:308
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
The codec supports this format via the hw_frames_ctx interface.
Definition: avcodec.h:3436
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:396
A reference to a data buffer.
Definition: buffer.h:81
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
common internal and external API header
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
Definition: qsvdec.c:57
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
int ff_qsv_init_internal_session(AVCodecContext *avctx, mfxSession *session, const char *load_plugins)
Definition: qsv.c:351
mfxFrameInfo frame_info
Definition: qsvdec.h:61
pixel format definitions
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
QSVFramesContext frames_ctx
Definition: qsvdec.h:47
static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:150
mfxSession session
Definition: qsvdec.h:41
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
AVFifoBuffer * async_fifo
Definition: qsvdec.h:54
int initialized
Definition: qsvdec.h:63
enum AVFieldOrder ff_qsv_map_picstruct(int mfx_pic_struct)
Definition: qsv.c:253
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2222
An API-specific header for AV_HWDEVICE_TYPE_QSV.
AVFrame * frame
Definition: qsv_internal.h:55
const AVCodecHWConfigInternal * ff_qsv_hw_configs[]
Definition: qsvdec.c:44
int async_depth
Definition: qsvdec.h:66
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3314
int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: qsvdec.c:541
QSVFrame * work_frames
a linked list of frames currently being used by QSV
Definition: qsvdec.h:52
static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt, enum AVPixelFormat pix_fmt, mfxVideoParam *param)
Definition: qsvdec.c:233
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
mfxSession session
If non-NULL, the session to use for encoding or decoding.
Definition: qsv.h:41
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
int ff_qsv_init_session_device(AVCodecContext *avctx, mfxSession *psession, AVBufferRef *device_ref, const char *load_plugins)
Definition: qsv.c:645
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc)
Definition: qsv.c:215
int nb_ext_buffers
Definition: qsvdec.h:72