FFmpeg
vf_overlay_qsv.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * A hardware accelerated overlay filter based on Intel Quick Sync Video VPP
22  */
23 
24 #include "libavutil/opt.h"
25 #include "libavutil/common.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/mathematics.h"
33 
34 #include "internal.h"
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "video.h"
38 
39 #include "framesync.h"
40 #include "qsvvpp.h"
41 
42 #define MAIN 0
43 #define OVERLAY 1
44 
45 #define OFFSET(x) offsetof(QSVOverlayContext, x)
46 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
47 
48 enum var_name {
58 };
59 
60 typedef struct QSVOverlayContext {
61  const AVClass *class;
62 
66  mfxExtVPPComposite comp_conf;
68 
71 
73 
74 static const char *const var_names[] = {
75  "main_w", "W", /* input width of the main layer */
76  "main_h", "H", /* input height of the main layer */
77  "overlay_iw", /* input width of the overlay layer */
78  "overlay_ih", /* input height of the overlay layer */
79  "overlay_x", "x", /* x position of the overlay layer inside of main */
80  "overlay_y", "y", /* y position of the overlay layer inside of main */
81  "overlay_w", "w", /* output width of overlay layer */
82  "overlay_h", "h", /* output height of overlay layer */
83  NULL
84 };
85 
86 static const AVOption overlay_qsv_options[] = {
87  { "x", "Overlay x position", OFFSET(overlay_ox), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
88  { "y", "Overlay y position", OFFSET(overlay_oy), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
89  { "w", "Overlay width", OFFSET(overlay_ow), AV_OPT_TYPE_STRING, { .str="overlay_iw"}, 0, 255, .flags = FLAGS},
90  { "h", "Overlay height", OFFSET(overlay_oh), AV_OPT_TYPE_STRING, { .str="overlay_ih*w/overlay_iw"}, 0, 255, .flags = FLAGS},
91  { "alpha", "Overlay global alpha", OFFSET(overlay_alpha), AV_OPT_TYPE_INT, { .i64 = 255}, 0, 255, .flags = FLAGS},
92  { "eof_action", "Action to take when encountering EOF from secondary input ",
94  EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
95  { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
96  { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
97  { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
98  { "shortest", "force termination when the shortest input terminates", OFFSET(fs.opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
99  { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(fs.opt_repeatlast), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
100  { NULL }
101 };
102 
104 
106 {
107  QSVOverlayContext *vpp = ctx->priv;
108  double *var_values = vpp->var_values;
109  int ret = 0;
110  AVExpr *ox_expr = NULL, *oy_expr = NULL;
111  AVExpr *ow_expr = NULL, *oh_expr = NULL;
112 
113 #define PASS_EXPR(e, s) {\
114  ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
115  if (ret < 0) {\
116  av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
117  goto release;\
118  }\
119 }
120  PASS_EXPR(ox_expr, vpp->overlay_ox);
121  PASS_EXPR(oy_expr, vpp->overlay_oy);
122  PASS_EXPR(ow_expr, vpp->overlay_ow);
123  PASS_EXPR(oh_expr, vpp->overlay_oh);
124 #undef PASS_EXPR
125 
126  var_values[VAR_OVERLAY_W] =
127  var_values[VAR_OW] = av_expr_eval(ow_expr, var_values, NULL);
128  var_values[VAR_OVERLAY_H] =
129  var_values[VAR_OH] = av_expr_eval(oh_expr, var_values, NULL);
130 
131  /* calc again in case ow is relative to oh */
132  var_values[VAR_OVERLAY_W] =
133  var_values[VAR_OW] = av_expr_eval(ow_expr, var_values, NULL);
134 
135  var_values[VAR_OVERLAY_X] =
136  var_values[VAR_OX] = av_expr_eval(ox_expr, var_values, NULL);
137  var_values[VAR_OVERLAY_Y] =
138  var_values[VAR_OY] = av_expr_eval(oy_expr, var_values, NULL);
139 
140  /* calc again in case ox is relative to oy */
141  var_values[VAR_OVERLAY_X] =
142  var_values[VAR_OX] = av_expr_eval(ox_expr, var_values, NULL);
143 
144  /* calc overlay_w and overlay_h again incase relative to ox,oy */
145  var_values[VAR_OVERLAY_W] =
146  var_values[VAR_OW] = av_expr_eval(ow_expr, var_values, NULL);
147  var_values[VAR_OVERLAY_H] =
148  var_values[VAR_OH] = av_expr_eval(oh_expr, var_values, NULL);
149  var_values[VAR_OVERLAY_W] =
150  var_values[VAR_OW] = av_expr_eval(ow_expr, var_values, NULL);
151 
152 release:
153  av_expr_free(ox_expr);
154  av_expr_free(oy_expr);
155  av_expr_free(ow_expr);
156  av_expr_free(oh_expr);
157 
158  return ret;
159 }
160 
162 {
163  enum AVPixelFormat pix_fmt = link->format;
164  const AVPixFmtDescriptor *desc;
165  AVHWFramesContext *fctx;
166 
167  if (link->format == AV_PIX_FMT_QSV) {
168  fctx = (AVHWFramesContext *)link->hw_frames_ctx->data;
169  pix_fmt = fctx->sw_format;
170  }
171 
172  desc = av_pix_fmt_desc_get(pix_fmt);
173  if (!desc)
174  return 0;
175 
176  return !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
177 }
178 
180 {
181  AVFilterContext *ctx = inlink->dst;
182  QSVOverlayContext *vpp = ctx->priv;
183  mfxVPPCompInputStream *st = &vpp->comp_conf.InputStream[0];
184 
185  av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
186  av_get_pix_fmt_name(inlink->format));
187 
188  vpp->var_values[VAR_MAIN_iW] =
189  vpp->var_values[VAR_MW] = inlink->w;
190  vpp->var_values[VAR_MAIN_iH] =
191  vpp->var_values[VAR_MH] = inlink->h;
192 
193  st->DstX = 0;
194  st->DstY = 0;
195  st->DstW = inlink->w;
196  st->DstH = inlink->h;
197  st->GlobalAlphaEnable = 0;
198  st->PixelAlphaEnable = 0;
199 
200  return 0;
201 }
202 
204 {
205  AVFilterContext *ctx = inlink->dst;
206  QSVOverlayContext *vpp = ctx->priv;
207  mfxVPPCompInputStream *st = &vpp->comp_conf.InputStream[1];
208  int ret = 0;
209 
210  av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
211  av_get_pix_fmt_name(inlink->format));
212 
213  vpp->var_values[VAR_OVERLAY_iW] = inlink->w;
214  vpp->var_values[VAR_OVERLAY_iH] = inlink->h;
215 
216  ret = eval_expr(ctx);
217  if (ret < 0)
218  return ret;
219 
220  st->DstX = vpp->var_values[VAR_OX];
221  st->DstY = vpp->var_values[VAR_OY];
222  st->DstW = vpp->var_values[VAR_OW];
223  st->DstH = vpp->var_values[VAR_OH];
224  st->GlobalAlpha = vpp->overlay_alpha;
225  st->GlobalAlphaEnable = (st->GlobalAlpha < 255);
226  st->PixelAlphaEnable = have_alpha_planar(inlink);
227 
228  return 0;
229 }
230 
232 {
233  AVFilterContext *ctx = fs->parent;
234  QSVOverlayContext *s = fs->opaque;
235  AVFrame *frame = NULL;
236  int ret = 0, i;
237 
238  for (i = 0; i < ctx->nb_inputs; i++) {
239  ret = ff_framesync_get_frame(fs, i, &frame, 0);
240  if (ret == 0)
241  ret = ff_qsvvpp_filter_frame(s->qsv, ctx->inputs[i], frame);
242  if (ret < 0 && ret != AVERROR(EAGAIN))
243  break;
244  }
245 
246  return ret;
247 }
248 
250 {
251  QSVOverlayContext *s = ctx->priv;
252  int ret, i;
253 
255  s->fs.opaque = s;
256  ret = ff_framesync_init(&s->fs, ctx, ctx->nb_inputs);
257  if (ret < 0)
258  return ret;
259 
260  for (i = 0; i < ctx->nb_inputs; i++) {
261  FFFrameSyncIn *in = &s->fs.in[i];
262  in->before = EXT_STOP;
263  in->after = EXT_INFINITY;
264  in->sync = i ? 1 : 2;
265  in->time_base = ctx->inputs[i]->time_base;
266  }
267 
268  return ff_framesync_configure(&s->fs);
269 }
270 
271 static int config_output(AVFilterLink *outlink)
272 {
273  AVFilterContext *ctx = outlink->src;
274  QSVOverlayContext *vpp = ctx->priv;
275  AVFilterLink *in0 = ctx->inputs[0];
276  AVFilterLink *in1 = ctx->inputs[1];
277  int ret;
278 
279  av_log(ctx, AV_LOG_DEBUG, "Output is of %s.\n", av_get_pix_fmt_name(outlink->format));
280  if ((in0->format == AV_PIX_FMT_QSV && in1->format != AV_PIX_FMT_QSV) ||
281  (in0->format != AV_PIX_FMT_QSV && in1->format == AV_PIX_FMT_QSV)) {
282  av_log(ctx, AV_LOG_ERROR, "Mixing hardware and software pixel formats is not supported.\n");
283  return AVERROR(EINVAL);
284  } else if (in0->format == AV_PIX_FMT_QSV) {
287 
288  if (hw_frame0->device_ctx != hw_frame1->device_ctx) {
289  av_log(ctx, AV_LOG_ERROR, "Inputs with different underlying QSV devices are forbidden.\n");
290  return AVERROR(EINVAL);
291  }
292  }
293 
294  outlink->w = vpp->var_values[VAR_MW];
295  outlink->h = vpp->var_values[VAR_MH];
296  outlink->frame_rate = in0->frame_rate;
297  outlink->time_base = av_inv_q(outlink->frame_rate);
298 
299  ret = init_framesync(ctx);
300  if (ret < 0)
301  return ret;
302 
303  return ff_qsvvpp_create(ctx, &vpp->qsv, &vpp->qsv_param);
304 }
305 
306 /*
307  * Callback for qsvvpp
308  * @Note: qsvvpp composition does not generate PTS for result frame.
309  * so we assign the PTS from framesync to the output frame.
310  */
311 
313 {
314  QSVOverlayContext *s = outlink->src->priv;
315  frame->pts = av_rescale_q(s->fs.pts,
316  s->fs.time_base, outlink->time_base);
317  return ff_filter_frame(outlink, frame);
318 }
319 
320 
322 {
323  QSVOverlayContext *vpp = ctx->priv;
324 
325  /* fill composite config */
326  vpp->comp_conf.Header.BufferId = MFX_EXTBUFF_VPP_COMPOSITE;
327  vpp->comp_conf.Header.BufferSz = sizeof(vpp->comp_conf);
328  vpp->comp_conf.NumInputStream = ctx->nb_inputs;
329  vpp->comp_conf.InputStream = av_mallocz_array(ctx->nb_inputs,
330  sizeof(*vpp->comp_conf.InputStream));
331  if (!vpp->comp_conf.InputStream)
332  return AVERROR(ENOMEM);
333 
334  /* initialize QSVVPP params */
336  vpp->qsv_param.ext_buf = av_mallocz(sizeof(*vpp->qsv_param.ext_buf));
337  if (!vpp->qsv_param.ext_buf)
338  return AVERROR(ENOMEM);
339 
340  vpp->qsv_param.ext_buf[0] = (mfxExtBuffer *)&vpp->comp_conf;
341  vpp->qsv_param.num_ext_buf = 1;
343  vpp->qsv_param.num_crop = 0;
344 
345  return 0;
346 }
347 
349 {
350  QSVOverlayContext *vpp = ctx->priv;
351 
352  ff_qsvvpp_free(&vpp->qsv);
353  ff_framesync_uninit(&vpp->fs);
354  av_freep(&vpp->comp_conf.InputStream);
355  av_freep(&vpp->qsv_param.ext_buf);
356 }
357 
359 {
360  QSVOverlayContext *s = ctx->priv;
361  return ff_framesync_activate(&s->fs);
362 }
363 
365 {
366  int i;
367  int ret;
368 
369  static const enum AVPixelFormat main_in_fmts[] = {
376  };
377  static const enum AVPixelFormat out_pix_fmts[] = {
380  AV_PIX_FMT_NONE
381  };
382 
383  for (i = 0; i < ctx->nb_inputs; i++) {
384  ret = ff_formats_ref(ff_make_format_list(main_in_fmts), &ctx->inputs[i]->out_formats);
385  if (ret < 0)
386  return ret;
387  }
388 
389  ret = ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats);
390  if (ret < 0)
391  return ret;
392 
393  return 0;
394 }
395 
396 static const AVFilterPad overlay_qsv_inputs[] = {
397  {
398  .name = "main",
399  .type = AVMEDIA_TYPE_VIDEO,
400  .config_props = config_main_input,
401  .needs_fifo = 1,
402  },
403  {
404  .name = "overlay",
405  .type = AVMEDIA_TYPE_VIDEO,
406  .config_props = config_overlay_input,
407  .needs_fifo = 1,
408  },
409  { NULL }
410 };
411 
413  {
414  .name = "default",
415  .type = AVMEDIA_TYPE_VIDEO,
416  .config_props = config_output,
417  },
418  { NULL }
419 };
420 
422  .name = "overlay_qsv",
423  .description = NULL_IF_CONFIG_SMALL("Quick Sync Video overlay."),
424  .priv_size = sizeof(QSVOverlayContext),
426  .preinit = overlay_qsv_framesync_preinit,
429  .activate = activate,
430  .inputs = overlay_qsv_inputs,
431  .outputs = overlay_qsv_outputs,
432  .priv_class = &overlay_qsv_class,
433  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
434 };
#define NULL
Definition: coverity.c:32
#define PASS_EXPR(e, s)
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:385
static enum AVPixelFormat pix_fmt
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
static int activate(AVFilterContext *ctx)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
AVOption.
Definition: opt.h:246
static int overlay_qsv_init(AVFilterContext *ctx)
misc image utilities
Main libavfilter public API header.
const char * desc
Definition: nvenc.c:68
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int eval_expr(AVFilterContext *ctx)
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:117
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static int config_output(AVFilterLink *outlink)
int64_t pts
Timestamp of the current event.
Definition: framesync.h:167
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
Definition: framesync.h:86
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
int ff_qsvvpp_create(AVFilterContext *avctx, QSVVPPContext **vpp, QSVVPPParam *param)
Definition: qsvvpp.c:561
#define av_cold
Definition: attributes.h:82
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:177
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
Definition: eval.c:157
FFFrameSyncIn * in
Pointer to array of inputs.
Definition: framesync.h:203
static int overlay_qsv_query_formats(AVFilterContext *ctx)
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
Definition: framesync.h:91
Input stream structure.
Definition: framesync.h:81
static const AVOption overlay_qsv_options[]
#define av_log(a,...)
int num_ext_buf
Definition: qsvvpp.h:54
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FRAMESYNC_DEFINE_CLASS(overlay_qsv, QSVOverlayContext, fs)
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:293
Frame sync structure.
Definition: framesync.h:146
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
simple assert() macros that are a bit more flexible than ISO C assert().
int opt_shortest
Definition: framesync.h:206
AVRational time_base
Time base for the incoming frames.
Definition: framesync.h:96
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:344
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
int opt_repeatlast
Definition: framesync.h:205
static av_cold void overlay_qsv_uninit(AVFilterContext *ctx)
static int filter_callback(AVFilterLink *outlink, AVFrame *frame)
var_name
Definition: aeval.c:46
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
mfxExtVPPComposite comp_conf
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
static const AVFilterPad overlay_qsv_outputs[]
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:148
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
#define OFFSET(x)
AVFormatContext * ctx
Definition: movenc.c:48
AVRational time_base
Time base for the output events.
Definition: framesync.h:162
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static int init_framesync(AVFilterContext *ctx)
int num_crop
Definition: qsvvpp.h:61
uint16_t overlay_alpha
void * opaque
Opaque pointer, not used by the API.
Definition: framesync.h:177
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
Extend the frame to infinity.
Definition: framesync.h:75
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
#define FLAGS
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
Definition: framesync.c:77
int ff_qsvvpp_filter_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
Definition: qsvvpp.c:688
QSVVPPParam qsv_param
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
uint8_t * data
The data buffer.
Definition: buffer.h:89
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events...
Definition: framesync.h:139
Describe the class of an AVClass context structure.
Definition: log.h:67
int ff_qsvvpp_free(QSVVPPContext **vpp)
Definition: qsvvpp.c:664
Filter definition.
Definition: avfilter.h:144
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:222
static int config_overlay_input(AVFilterLink *inlink)
const char * name
Filter name.
Definition: avfilter.h:148
static int process_frame(FFFrameSync *fs)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
AVFilter ff_vf_overlay_qsv
static const AVFilterPad overlay_qsv_inputs[]
mfxExtBuffer ** ext_buf
Definition: qsvvpp.h:55
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define FF_INLINK_IDX(link)
Find the index of a link.
Definition: internal.h:348
int opt_eof_action
Definition: framesync.h:207
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
static int have_alpha_planar(AVFilterLink *link)
QSVVPPContext * qsv
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
common internal and external API header
double var_values[VAR_VARS_NB]
static enum AVPixelFormat out_pix_fmts[]
Definition: vf_ciescope.c:130
uint16_t overlay_pixel_alpha
Completely stop all streams with this one.
Definition: framesync.h:65
static const char *const var_names[]
static int config_main_input(AVFilterLink *inlink)
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
An instance of a filter.
Definition: avfilter.h:338
enum AVPixelFormat out_sw_format
Definition: qsvvpp.h:58
Intel Quick Sync Video VPP base function.
#define av_freep(p)
int(* filter_frame)(AVFilterLink *outlink, AVFrame *frame)
Definition: qsvvpp.h:51
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
Definition: framesync.c:256
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:221
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
simple arithmetic expression evaluator
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191