FFmpeg
vf_stack_qsv.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * Hardware accelerated hstack, vstack and xstack filters based on Intel Quick Sync Video VPP
22  */
23 
24 #include "config_components.h"
25 
26 #include "libavutil/mem.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/common.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/eval.h"
31 #include "libavutil/hwcontext.h"
32 #include "libavutil/avstring.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/parseutils.h"
37 
38 #include "filters.h"
39 #include "formats.h"
40 #include "video.h"
41 
42 #include "framesync.h"
43 #include "qsvvpp.h"
44 
45 #define HSTACK_NAME "hstack_qsv"
46 #define VSTACK_NAME "vstack_qsv"
47 #define XSTACK_NAME "xstack_qsv"
48 #define HWContext QSVVPPContext
49 #define StackHWContext StackQSVContext
50 #include "stack_internal.h"
51 
52 typedef struct StackQSVContext {
54 
56  mfxExtVPPComposite comp_conf;
58 
59 static void rgb2yuv(float r, float g, float b, int *y, int *u, int *v, int depth)
60 {
61  *y = ((0.21260*219.0/255.0) * r + (0.71520*219.0/255.0) * g +
62  (0.07220*219.0/255.0) * b) * ((1 << depth) - 1);
63  *u = (-(0.11457*224.0/255.0) * r - (0.38543*224.0/255.0) * g +
64  (0.50000*224.0/255.0) * b + 0.5) * ((1 << depth) - 1);
65  *v = ((0.50000*224.0/255.0) * r - (0.45415*224.0/255.0) * g -
66  (0.04585*224.0/255.0) * b + 0.5) * ((1 << depth) - 1);
67 }
68 
70 {
71  AVFilterContext *ctx = fs->parent;
72  QSVVPPContext *qsv = fs->opaque;
73  AVFrame *frame = NULL, *propref = NULL;
74  int ret = 0;
75 
76  for (int i = 0; i < ctx->nb_inputs; i++) {
78  if (ret == 0) {
79  if (i == 0)
80  propref = frame;
81  ret = ff_qsvvpp_filter_frame(qsv, ctx->inputs[i], frame, propref);
82  }
83  if (ret < 0 && ret != AVERROR(EAGAIN))
84  break;
85  }
86 
87  if (ret == 0 && qsv->got_frame == 0) {
88  for (int i = 0; i < ctx->nb_inputs; i++)
89  FF_FILTER_FORWARD_WANTED(ctx->outputs[0], ctx->inputs[i]);
90 
92  }
93 
94  return ret;
95 }
96 
97 static int config_output(AVFilterLink *outlink)
98 {
99  AVFilterContext *ctx = outlink->src;
100  StackQSVContext *sctx = ctx->priv;
101  AVFilterLink *inlink0 = ctx->inputs[0];
102  FilterLink *inl0 = ff_filter_link(inlink0);
103  enum AVPixelFormat in_format;
104  int depth = 8, ret;
105  mfxVPPCompInputStream *is = sctx->comp_conf.InputStream;
106 
107  if (inlink0->format == AV_PIX_FMT_QSV) {
108  if (!inl0->hw_frames_ctx || !inl0->hw_frames_ctx->data)
109  return AVERROR(EINVAL);
110 
111  in_format = ((AVHWFramesContext*)inl0->hw_frames_ctx->data)->sw_format;
112  } else
113  in_format = inlink0->format;
114 
115  sctx->qsv_param.out_sw_format = in_format;
116 
117  for (int i = 1; i < sctx->base.nb_inputs; i++) {
118  AVFilterLink *inlink = ctx->inputs[i];
120 
121  if (inlink0->format == AV_PIX_FMT_QSV) {
124 
125  if (inlink0->format != inlink->format) {
126  av_log(ctx, AV_LOG_ERROR, "Mixing hardware and software pixel formats is not supported.\n");
127 
128  return AVERROR(EINVAL);
129  } else if (hwfc0->device_ctx != hwfc->device_ctx) {
130  av_log(ctx, AV_LOG_ERROR, "Inputs with different underlying QSV devices are forbidden.\n");
131 
132  return AVERROR(EINVAL);
133  }
134  }
135  }
136 
137  if (in_format == AV_PIX_FMT_P010)
138  depth = 10;
139 
140  if (sctx->base.fillcolor_enable) {
141  int Y, U, V;
142 
143  rgb2yuv(sctx->base.fillcolor[0] / 255.0, sctx->base.fillcolor[1] / 255.0,
144  sctx->base.fillcolor[2] / 255.0, &Y, &U, &V, depth);
145  sctx->comp_conf.Y = Y;
146  sctx->comp_conf.U = U;
147  sctx->comp_conf.V = V;
148  }
149 
150  ret = config_comm_output(outlink);
151  if (ret < 0)
152  return ret;
153 
154  for (int i = 0; i < sctx->base.nb_inputs; i++) {
155  is[i].DstX = sctx->base.regions[i].x;
156  is[i].DstY = sctx->base.regions[i].y;
157  is[i].DstW = sctx->base.regions[i].width;
158  is[i].DstH = sctx->base.regions[i].height;
159  is[i].GlobalAlpha = 255;
160  is[i].GlobalAlphaEnable = 0;
161  is[i].PixelAlphaEnable = 0;
162  }
163 
164  return ff_qsvvpp_init(ctx, &sctx->qsv_param);
165 }
166 
167 /*
168  * Callback for qsvvpp
169  * @Note: qsvvpp composition does not generate PTS for result frame.
170  * so we assign the PTS from framesync to the output frame.
171  */
172 
174 {
175  StackQSVContext *sctx = outlink->src->priv;
176 
177  frame->pts = av_rescale_q(sctx->base.fs.pts,
178  sctx->base.fs.time_base, outlink->time_base);
179  return ff_filter_frame(outlink, frame);
180 }
181 
182 
184 {
185  StackQSVContext *sctx = ctx->priv;
186  int ret;
187 
188  ret = stack_init(ctx);
189  if (ret)
190  return ret;
191 
192  /* fill composite config */
193  sctx->comp_conf.Header.BufferId = MFX_EXTBUFF_VPP_COMPOSITE;
194  sctx->comp_conf.Header.BufferSz = sizeof(sctx->comp_conf);
195  sctx->comp_conf.NumInputStream = sctx->base.nb_inputs;
196  sctx->comp_conf.InputStream = av_calloc(sctx->base.nb_inputs,
197  sizeof(*sctx->comp_conf.InputStream));
198  if (!sctx->comp_conf.InputStream)
199  return AVERROR(ENOMEM);
200 
201  /* initialize QSVVPP params */
203  sctx->qsv_param.ext_buf = av_mallocz(sizeof(*sctx->qsv_param.ext_buf));
204 
205  if (!sctx->qsv_param.ext_buf)
206  return AVERROR(ENOMEM);
207 
208  sctx->qsv_param.ext_buf[0] = (mfxExtBuffer *)&sctx->comp_conf;
209  sctx->qsv_param.num_ext_buf = 1;
210  sctx->qsv_param.num_crop = 0;
211 
212  return 0;
213 }
214 
216 {
217  StackQSVContext *sctx = ctx->priv;
218 
219  stack_uninit(ctx);
220 
222  av_freep(&sctx->comp_conf.InputStream);
223  av_freep(&sctx->qsv_param.ext_buf);
224 }
225 
226 static const enum AVPixelFormat qsv_stack_pix_fmts[] = {
231 };
232 
233 #include "stack_internal.c"
234 
235 #if CONFIG_HSTACK_QSV_FILTER
236 
238 DEFINE_STACK_FILTER(hstack, qsv, "Quick Sync Video", AVFILTER_FLAG_HWDEVICE);
239 
240 #endif
241 
242 #if CONFIG_VSTACK_QSV_FILTER
243 
245 DEFINE_STACK_FILTER(vstack, qsv, "Quick Sync Video", AVFILTER_FLAG_HWDEVICE);
246 
247 #endif
248 
249 #if CONFIG_XSTACK_QSV_FILTER
250 
252 DEFINE_STACK_FILTER(xstack, qsv, "Quick Sync Video", AVFILTER_FLAG_HWDEVICE);
253 
254 #endif
qsv_stack_uninit
static av_cold void qsv_stack_uninit(AVFilterContext *ctx)
Definition: vf_stack_qsv.c:215
StackItemRegion::x
int x
Definition: stack_internal.h:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
StackItemRegion::y
int y
Definition: stack_internal.h:30
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
QSVVPPParam::out_sw_format
enum AVPixelFormat out_sw_format
Definition: qsvvpp.h:120
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1061
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
ff_framesync_get_frame
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
Definition: framesync.c:269
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
FFFrameSync::time_base
AVRational time_base
Time base for the output events.
Definition: framesync.h:184
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
pixdesc.h
b
#define b
Definition: input.c:41
mathematics.h
process_frame
static int process_frame(FFFrameSync *fs)
Definition: vf_stack_qsv.c:69
FFFrameSync
Frame sync structure.
Definition: framesync.h:168
qsv_stack_pix_fmts
static enum AVPixelFormat qsv_stack_pix_fmts[]
Definition: vf_stack_qsv.c:226
video.h
StackItemRegion::height
int height
Definition: stack_internal.h:32
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_stack_qsv.c:97
formats.h
StackBaseContext::fs
FFFrameSync fs
Definition: stack_internal.h:38
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:472
qsvvpp.h
StackItemRegion::width
int width
Definition: stack_internal.h:31
DEFINE_STACK_FILTER
#define DEFINE_STACK_FILTER(category, api, capi, filter_flags)
Definition: stack_internal.c:338
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
stack_uninit
static av_cold void stack_uninit(AVFilterContext *avctx)
Definition: stack_internal.c:288
g
const char * g
Definition: vf_curves.c:128
filters.h
DEFINE_HSTACK_OPTIONS
#define DEFINE_HSTACK_OPTIONS(api)
Definition: stack_internal.c:314
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
StackBaseContext::fillcolor
uint8_t fillcolor[4]
Definition: stack_internal.h:40
QSVVPPContext
Definition: qsvvpp.h:63
ff_qsvvpp_close
int ff_qsvvpp_close(AVFilterContext *avctx)
Definition: qsvvpp.c:940
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
QSVVPPParam::num_crop
int num_crop
Definition: qsvvpp.h:123
QSVVPPParam
Definition: qsvvpp.h:110
V
#define V
Definition: avdct.c:31
parseutils.h
DEFINE_XSTACK_OPTIONS
#define DEFINE_XSTACK_OPTIONS(api)
Definition: stack_internal.c:328
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:247
QSVVPPContext::got_frame
int got_frame
Definition: qsvvpp.h:95
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
StackQSVContext
Definition: vf_stack_qsv.c:52
FFFrameSync::pts
int64_t pts
Timestamp of the current event.
Definition: framesync.h:189
stack_internal.h
eval.h
ff_qsvvpp_filter_frame
int ff_qsvvpp_filter_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref, AVFrame *propref)
Definition: qsvvpp.c:964
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:173
DEFINE_VSTACK_OPTIONS
#define DEFINE_VSTACK_OPTIONS(api)
Definition: stack_internal.c:321
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
Y
#define Y
Definition: boxblur.h:37
StackBaseContext::fillcolor_enable
int fillcolor_enable
Definition: stack_internal.h:41
StackBaseContext
Definition: stack_internal.h:35
StackBaseContext::nb_inputs
int nb_inputs
Definition: stack_internal.h:45
StackBaseContext::regions
StackItemRegion * regions
Definition: stack_internal.h:42
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
common.h
QSVVPPParam::num_ext_buf
int num_ext_buf
Definition: qsvvpp.h:116
config_comm_output
static int config_comm_output(AVFilterLink *outlink)
Definition: stack_internal.c:53
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
QSVVPPParam::filter_frame
int(* filter_frame)(AVFilterLink *outlink, AVFrame *frame)
Definition: qsvvpp.h:112
ff_qsvvpp_init
int ff_qsvvpp_init(AVFilterContext *avctx, QSVVPPParam *param)
Definition: qsvvpp.c:748
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:115
filter_callback
static int filter_callback(AVFilterLink *outlink, AVFrame *frame)
Definition: vf_stack_qsv.c:173
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:134
U
#define U(x)
Definition: vpx_arith.h:37
rgb2yuv
static void rgb2yuv(float r, float g, float b, int *y, int *u, int *v, int depth)
Definition: vf_stack_qsv.c:59
framesync.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
stack_init
static int stack_init(AVFilterContext *avctx)
Definition: stack_internal.c:225
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
AV_PIX_FMT_P010
#define AV_PIX_FMT_P010
Definition: pixfmt.h:552
qsv_stack_init
static int qsv_stack_init(AVFilterContext *ctx)
Definition: vf_stack_qsv.c:183
mem.h
StackQSVContext::base
StackBaseContext base
Definition: vf_stack_qsv.c:53
stack_internal.c
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
imgutils.h
hwcontext.h
StackQSVContext::comp_conf
mfxExtVPPComposite comp_conf
Definition: vf_stack_qsv.c:56
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
avstring.h
StackQSVContext::qsv_param
QSVVPPParam qsv_param
Definition: vf_stack_qsv.c:55
QSVVPPParam::ext_buf
mfxExtBuffer ** ext_buf
Definition: qsvvpp.h:117