FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qsv_h264.c
Go to the documentation of this file.
1 /*
2  * Intel MediaSDK QSV based H.264 decoder
3  *
4  * copyright (c) 2013 Luca Barbato
5  * copyright (c) 2015 Anton Khirnov
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 
25 #include <stdint.h>
26 #include <string.h>
27 
28 #include <mfx/mfxvideo.h>
29 
30 #include "libavutil/common.h"
31 #include "libavutil/fifo.h"
32 #include "libavutil/opt.h"
33 
34 #include "avcodec.h"
35 #include "internal.h"
36 #include "qsv_internal.h"
37 #include "qsv.h"
38 
39 typedef struct QSVH264Context {
40  AVClass *class;
42 
43  // the internal parser and codec context for parsing the data
47 
48  // the filter for converting to Annex B
50 
52 
57 
59 {
60  AVPacket pkt;
61  while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) {
62  av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL);
63  av_packet_unref(&pkt);
64  }
65 
66  if (s->filtered_data != s->input_ref.data)
68  s->filtered_data = NULL;
70 }
71 
73 {
74  QSVH264Context *s = avctx->priv_data;
75 
76  ff_qsv_close(&s->qsv);
77 
79 
81 
85 
86  return 0;
87 }
88 
90 {
91  QSVH264Context *s = avctx->priv_data;
92  int ret;
93 
95 
96  s->packet_fifo = av_fifo_alloc(sizeof(AVPacket));
97  if (!s->packet_fifo) {
98  ret = AVERROR(ENOMEM);
99  goto fail;
100  }
101 
102  s->bsf = av_bitstream_filter_init("h264_mp4toannexb");
103  if (!s->bsf) {
104  ret = AVERROR(ENOMEM);
105  goto fail;
106  }
107 
109  if (!s->avctx_internal) {
110  ret = AVERROR(ENOMEM);
111  goto fail;
112  }
113 
114  if (avctx->extradata) {
116  if (!s->avctx_internal->extradata) {
117  ret = AVERROR(ENOMEM);
118  goto fail;
119  }
120  memcpy(s->avctx_internal->extradata, avctx->extradata,
121  avctx->extradata_size);
123  }
124 
126  if (!s->parser) {
127  ret = AVERROR(ENOMEM);
128  goto fail;
129  }
131 
132  s->qsv.iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
133 
134  return 0;
135 fail:
136  qsv_decode_close(avctx);
137  return ret;
138 }
139 
141  int *got_frame, AVPacket *pkt)
142 {
143  QSVH264Context *s = avctx->priv_data;
144  uint8_t *dummy_data;
145  int dummy_size;
146  int ret;
147 
148  /* we assume the packets are already split properly and want
149  * just the codec parameters here */
151  &dummy_data, &dummy_size,
152  pkt->data, pkt->size, pkt->pts, pkt->dts,
153  pkt->pos);
154 
155  /* TODO: flush delayed frames on reinit */
156  if (s->parser->format != s->orig_pix_fmt ||
157  s->parser->coded_width != avctx->coded_width ||
158  s->parser->coded_height != avctx->coded_height) {
159  mfxSession session = NULL;
160 
161  enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
163  AV_PIX_FMT_NONE };
164  enum AVPixelFormat qsv_format;
165 
166  qsv_format = ff_qsv_map_pixfmt(s->parser->format);
167  if (qsv_format < 0) {
168  av_log(avctx, AV_LOG_ERROR,
169  "Only 8-bit YUV420 streams are supported.\n");
170  ret = AVERROR(ENOSYS);
171  goto reinit_fail;
172  }
173 
174  s->orig_pix_fmt = s->parser->format;
175  avctx->pix_fmt = pix_fmts[1] = qsv_format;
176  avctx->width = s->parser->width;
177  avctx->height = s->parser->height;
178  avctx->coded_width = s->parser->coded_width;
179  avctx->coded_height = s->parser->coded_height;
180  avctx->level = s->avctx_internal->level;
181  avctx->profile = s->avctx_internal->profile;
182 
183  ret = ff_get_format(avctx, pix_fmts);
184  if (ret < 0)
185  goto reinit_fail;
186 
187  avctx->pix_fmt = ret;
188 
189  if (avctx->hwaccel_context) {
190  AVQSVContext *user_ctx = avctx->hwaccel_context;
191  session = user_ctx->session;
192  s->qsv.iopattern = user_ctx->iopattern;
193  s->qsv.ext_buffers = user_ctx->ext_buffers;
194  s->qsv.nb_ext_buffers = user_ctx->nb_ext_buffers;
195  }
196 
197  ret = ff_qsv_init(avctx, &s->qsv, session);
198  if (ret < 0)
199  goto reinit_fail;
200  }
201 
202  return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, &s->pkt_filtered);
203 
204 reinit_fail:
205  s->orig_pix_fmt = s->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
206  return ret;
207 }
208 
209 static int qsv_decode_frame(AVCodecContext *avctx, void *data,
210  int *got_frame, AVPacket *avpkt)
211 {
212  QSVH264Context *s = avctx->priv_data;
213  AVFrame *frame = data;
214  int ret;
215 
216  /* buffer the input packet */
217  if (avpkt->size) {
218  AVPacket input_ref = { 0 };
219 
220  if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) {
221  ret = av_fifo_realloc2(s->packet_fifo,
222  av_fifo_size(s->packet_fifo) + sizeof(input_ref));
223  if (ret < 0)
224  return ret;
225  }
226 
227  ret = av_packet_ref(&input_ref, avpkt);
228  if (ret < 0)
229  return ret;
230  av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL);
231  }
232 
233  /* process buffered data */
234  while (!*got_frame) {
235  /* prepare the input data -- convert to Annex B if needed */
236  if (s->pkt_filtered.size <= 0) {
237  int size;
238 
239  /* no more data */
240  if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket))
241  return avpkt->size ? avpkt->size : ff_qsv_decode(avctx, &s->qsv, frame, got_frame, avpkt);
242 
243  if (s->filtered_data != s->input_ref.data)
244  av_freep(&s->filtered_data);
245  s->filtered_data = NULL;
247 
249  ret = av_bitstream_filter_filter(s->bsf, avctx, NULL,
250  &s->filtered_data, &size,
251  s->input_ref.data, s->input_ref.size, 0);
252  if (ret < 0) {
253  s->filtered_data = s->input_ref.data;
254  size = s->input_ref.size;
255  }
256  s->pkt_filtered = s->input_ref;
258  s->pkt_filtered.size = size;
259  }
260 
261  ret = qsv_process_data(avctx, frame, got_frame, &s->pkt_filtered);
262  if (ret < 0)
263  return ret;
264 
265  s->pkt_filtered.size -= ret;
266  s->pkt_filtered.data += ret;
267  }
268 
269  return avpkt->size;
270 }
271 
272 static void qsv_decode_flush(AVCodecContext *avctx)
273 {
274  QSVH264Context *s = avctx->priv_data;
275 
278 }
279 
281  .name = "h264_qsv",
282  .type = AVMEDIA_TYPE_VIDEO,
283  .id = AV_CODEC_ID_H264,
284  .pix_fmt = AV_PIX_FMT_QSV,
285 };
286 
287 #define OFFSET(x) offsetof(QSVH264Context, x)
288 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
289 static const AVOption options[] = {
290  { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 0, INT_MAX, VD },
291  { NULL },
292 };
293 
294 static const AVClass class = {
295  .class_name = "h264_qsv",
296  .item_name = av_default_item_name,
297  .option = options,
299 };
300 
302  .name = "h264_qsv",
303  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration)"),
304  .priv_data_size = sizeof(QSVH264Context),
306  .id = AV_CODEC_ID_H264,
310  .close = qsv_decode_close,
311  .capabilities = CODEC_CAP_DELAY,
312  .priv_class = &class,
313 };