FFmpeg
vf_scale_qsv.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * scale video filter - QSV
22  */
23 
24 #include <mfx/mfxvideo.h>
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/hwcontext.h"
34 #include "libavutil/internal.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/time.h"
39 #include "libavfilter/qsvvpp.h"
40 
41 #include "avfilter.h"
42 #include "formats.h"
43 #include "internal.h"
44 #include "video.h"
45 
46 static const char *const var_names[] = {
47  "PI",
48  "PHI",
49  "E",
50  "in_w", "iw",
51  "in_h", "ih",
52  "out_w", "ow",
53  "out_h", "oh",
54  "a", "dar",
55  "sar",
56  NULL
57 };
58 
59 enum var_name {
70 };
71 
72 #define QSV_HAVE_SCALING_CONFIG QSV_VERSION_ATLEAST(1, 19)
73 
74 typedef struct QSVScaleContext {
75  const AVClass *class;
76 
77  /* a clone of the main session, used internally for scaling */
78  mfxSession session;
79 
80  mfxMemId *mem_ids_in;
82 
83  mfxMemId *mem_ids_out;
85 
86  mfxFrameSurface1 **surface_ptrs_in;
88 
89  mfxFrameSurface1 **surface_ptrs_out;
91 
92  mfxExtOpaqueSurfaceAlloc opaque_alloc;
93 
94 #if QSV_HAVE_SCALING_CONFIG
95  mfxExtVPPScaling scale_conf;
96 #endif
97  int mode;
98 
101 
103 
104  /**
105  * New dimensions. Special values are:
106  * 0 = original width/height
107  * -1 = keep original aspect
108  */
109  int w, h;
110 
111  /**
112  * Output sw format. AV_PIX_FMT_NONE for no conversion.
113  */
115 
116  char *w_expr; ///< width expression string
117  char *h_expr; ///< height expression string
118  char *format_str;
120 
122 {
123  QSVScaleContext *s = ctx->priv;
124 
125  if (!strcmp(s->format_str, "same")) {
126  s->format = AV_PIX_FMT_NONE;
127  } else {
128  s->format = av_get_pix_fmt(s->format_str);
129  if (s->format == AV_PIX_FMT_NONE) {
130  av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", s->format_str);
131  return AVERROR(EINVAL);
132  }
133  }
134 
135  return 0;
136 }
137 
139 {
140  QSVScaleContext *s = ctx->priv;
141 
142  if (s->session) {
143  MFXClose(s->session);
144  s->session = NULL;
145  }
146 
147  av_freep(&s->mem_ids_in);
148  av_freep(&s->mem_ids_out);
149  s->nb_mem_ids_in = 0;
150  s->nb_mem_ids_out = 0;
151 
152  av_freep(&s->surface_ptrs_in);
153  av_freep(&s->surface_ptrs_out);
154  s->nb_surface_ptrs_in = 0;
155  s->nb_surface_ptrs_out = 0;
156 }
157 
159 {
160  static const enum AVPixelFormat pixel_formats[] = {
162  };
163  AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats);
164  int ret;
165 
166  if ((ret = ff_set_common_formats(ctx, pix_fmts)) < 0)
167  return ret;
168 
169  return 0;
170 }
171 
173  int out_width, int out_height)
174 {
175  QSVScaleContext *s = ctx->priv;
176  AVFilterLink *outlink = ctx->outputs[0];
177 
178  AVHWFramesContext *in_frames_ctx;
179  AVHWFramesContext *out_frames_ctx;
180  AVQSVFramesContext *in_frames_hwctx;
181  AVQSVFramesContext *out_frames_hwctx;
182  enum AVPixelFormat in_format;
183  enum AVPixelFormat out_format;
184  int i, ret;
185 
186  /* check that we have a hw context */
187  if (!ctx->inputs[0]->hw_frames_ctx) {
188  av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
189  return AVERROR(EINVAL);
190  }
191  in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
192  in_frames_hwctx = in_frames_ctx->hwctx;
193 
194  in_format = in_frames_ctx->sw_format;
195  out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
196 
197  outlink->hw_frames_ctx = av_hwframe_ctx_alloc(in_frames_ctx->device_ref);
198  if (!outlink->hw_frames_ctx)
199  return AVERROR(ENOMEM);
200  out_frames_ctx = (AVHWFramesContext*)outlink->hw_frames_ctx->data;
201  out_frames_hwctx = out_frames_ctx->hwctx;
202 
203  out_frames_ctx->format = AV_PIX_FMT_QSV;
204  out_frames_ctx->width = FFALIGN(out_width, 32);
205  out_frames_ctx->height = FFALIGN(out_height, 32);
206  out_frames_ctx->sw_format = out_format;
207  out_frames_ctx->initial_pool_size = 4;
208 
209  out_frames_hwctx->frame_type = in_frames_hwctx->frame_type;
210 
211  ret = ff_filter_init_hw_frames(ctx, outlink, 32);
212  if (ret < 0)
213  return ret;
214 
216  if (ret < 0)
217  return ret;
218 
219  for (i = 0; i < out_frames_hwctx->nb_surfaces; i++) {
220  mfxFrameInfo *info = &out_frames_hwctx->surfaces[i].Info;
221  info->CropW = out_width;
222  info->CropH = out_height;
223  }
224 
225  return 0;
226 }
227 
228 static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
229  mfxFrameAllocResponse *resp)
230 {
231  AVFilterContext *ctx = pthis;
232  QSVScaleContext *s = ctx->priv;
233 
234  if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
235  !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
236  !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
237  return MFX_ERR_UNSUPPORTED;
238 
239  if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
240  resp->mids = s->mem_ids_in;
241  resp->NumFrameActual = s->nb_mem_ids_in;
242  } else {
243  resp->mids = s->mem_ids_out;
244  resp->NumFrameActual = s->nb_mem_ids_out;
245  }
246 
247  return MFX_ERR_NONE;
248 }
249 
250 static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
251 {
252  return MFX_ERR_NONE;
253 }
254 
255 static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
256 {
257  return MFX_ERR_UNSUPPORTED;
258 }
259 
260 static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
261 {
262  return MFX_ERR_UNSUPPORTED;
263 }
264 
265 static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
266 {
267  *hdl = mid;
268  return MFX_ERR_NONE;
269 }
270 
271 static const mfxHandleType handle_types[] = {
272  MFX_HANDLE_VA_DISPLAY,
273  MFX_HANDLE_D3D9_DEVICE_MANAGER,
274  MFX_HANDLE_D3D11_DEVICE,
275 };
276 
278 {
279 
280  QSVScaleContext *s = ctx->priv;
281  AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
282  AVHWFramesContext *out_frames_ctx = (AVHWFramesContext*)ctx->outputs[0]->hw_frames_ctx->data;
283  AVQSVFramesContext *in_frames_hwctx = in_frames_ctx->hwctx;
284  AVQSVFramesContext *out_frames_hwctx = out_frames_ctx->hwctx;
285  AVQSVDeviceContext *device_hwctx = in_frames_ctx->device_ctx->hwctx;
286 
287  int opaque = !!(in_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
288 
289  mfxHDL handle = NULL;
290  mfxHandleType handle_type;
291  mfxVersion ver;
292  mfxIMPL impl;
293  mfxVideoParam par;
294  mfxStatus err;
295  int i;
296 
297  s->num_ext_buf = 0;
298 
299  /* extract the properties of the "master" session given to us */
300  err = MFXQueryIMPL(device_hwctx->session, &impl);
301  if (err == MFX_ERR_NONE)
302  err = MFXQueryVersion(device_hwctx->session, &ver);
303  if (err != MFX_ERR_NONE) {
304  av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
305  return AVERROR_UNKNOWN;
306  }
307 
308  for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) {
309  err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle);
310  if (err == MFX_ERR_NONE) {
312  break;
313  }
314  }
315 
316  if (err != MFX_ERR_NONE) {
317  av_log(ctx, AV_LOG_ERROR, "Error getting the session handle\n");
318  return AVERROR_UNKNOWN;
319  }
320 
321  /* create a "slave" session with those same properties, to be used for
322  * actual scaling */
323  err = MFXInit(impl, &ver, &s->session);
324  if (err != MFX_ERR_NONE) {
325  av_log(ctx, AV_LOG_ERROR, "Error initializing a session for scaling\n");
326  return AVERROR_UNKNOWN;
327  }
328 
329  if (handle) {
330  err = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
331  if (err != MFX_ERR_NONE)
332  return AVERROR_UNKNOWN;
333  }
334 
335  if (QSV_RUNTIME_VERSION_ATLEAST(ver, 1, 25)) {
336  err = MFXJoinSession(device_hwctx->session, s->session);
337  if (err != MFX_ERR_NONE)
338  return AVERROR_UNKNOWN;
339  }
340 
341  memset(&par, 0, sizeof(par));
342 
343  if (opaque) {
344  s->surface_ptrs_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
345  sizeof(*s->surface_ptrs_in));
346  if (!s->surface_ptrs_in)
347  return AVERROR(ENOMEM);
348  for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
349  s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
350  s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
351 
352  s->surface_ptrs_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
353  sizeof(*s->surface_ptrs_out));
354  if (!s->surface_ptrs_out)
355  return AVERROR(ENOMEM);
356  for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
357  s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
358  s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
359 
360  s->opaque_alloc.In.Surfaces = s->surface_ptrs_in;
361  s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
362  s->opaque_alloc.In.Type = in_frames_hwctx->frame_type;
363 
364  s->opaque_alloc.Out.Surfaces = s->surface_ptrs_out;
365  s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
366  s->opaque_alloc.Out.Type = out_frames_hwctx->frame_type;
367 
368  s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
369  s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
370 
371  s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->opaque_alloc;
372 
373  par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
374  } else {
375  mfxFrameAllocator frame_allocator = {
376  .pthis = ctx,
377  .Alloc = frame_alloc,
378  .Lock = frame_lock,
379  .Unlock = frame_unlock,
380  .GetHDL = frame_get_hdl,
381  .Free = frame_free,
382  };
383 
384  s->mem_ids_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
385  sizeof(*s->mem_ids_in));
386  if (!s->mem_ids_in)
387  return AVERROR(ENOMEM);
388  for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
389  s->mem_ids_in[i] = in_frames_hwctx->surfaces[i].Data.MemId;
390  s->nb_mem_ids_in = in_frames_hwctx->nb_surfaces;
391 
392  s->mem_ids_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
393  sizeof(*s->mem_ids_out));
394  if (!s->mem_ids_out)
395  return AVERROR(ENOMEM);
396  for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
397  s->mem_ids_out[i] = out_frames_hwctx->surfaces[i].Data.MemId;
398  s->nb_mem_ids_out = out_frames_hwctx->nb_surfaces;
399 
400  err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
401  if (err != MFX_ERR_NONE)
402  return AVERROR_UNKNOWN;
403 
404  par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
405  }
406 
407 #if QSV_HAVE_SCALING_CONFIG
408  memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
409  s->scale_conf.Header.BufferId = MFX_EXTBUFF_VPP_SCALING;
410  s->scale_conf.Header.BufferSz = sizeof(mfxExtVPPScaling);
411  s->scale_conf.ScalingMode = s->mode;
412  s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->scale_conf;
413  av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %"PRIu16"\n", s->mode);
414 #endif
415 
416  par.ExtParam = s->ext_buffers;
417  par.NumExtParam = s->num_ext_buf;
418 
419  par.AsyncDepth = 1; // TODO async
420 
421  par.vpp.In = in_frames_hwctx->surfaces[0].Info;
422  par.vpp.Out = out_frames_hwctx->surfaces[0].Info;
423 
424  /* Apparently VPP requires the frame rate to be set to some value, otherwise
425  * init will fail (probably for the framerate conversion filter). Since we
426  * are only doing scaling here, we just invent an arbitrary
427  * value */
428  par.vpp.In.FrameRateExtN = 25;
429  par.vpp.In.FrameRateExtD = 1;
430  par.vpp.Out.FrameRateExtN = 25;
431  par.vpp.Out.FrameRateExtD = 1;
432 
433  err = MFXVideoVPP_Init(s->session, &par);
434  if (err != MFX_ERR_NONE) {
435  av_log(ctx, AV_LOG_ERROR, "Error opening the VPP for scaling\n");
436  return AVERROR_UNKNOWN;
437  }
438 
439  return 0;
440 }
441 
442 static int init_scale_session(AVFilterContext *ctx, int in_width, int in_height,
443  int out_width, int out_height)
444 {
445  int ret;
446 
448 
449  ret = init_out_pool(ctx, out_width, out_height);
450  if (ret < 0)
451  return ret;
452 
454  if (ret < 0)
455  return ret;
456 
457  return 0;
458 }
459 
461 {
462  AVFilterContext *ctx = outlink->src;
463  AVFilterLink *inlink = outlink->src->inputs[0];
464  QSVScaleContext *s = ctx->priv;
465  int64_t w, h;
466  double var_values[VARS_NB], res;
467  char *expr;
468  int ret;
469 
470  var_values[VAR_PI] = M_PI;
471  var_values[VAR_PHI] = M_PHI;
472  var_values[VAR_E] = M_E;
473  var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
474  var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
475  var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
476  var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
477  var_values[VAR_A] = (double) inlink->w / inlink->h;
478  var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
479  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
480  var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
481 
482  /* evaluate width and height */
483  av_expr_parse_and_eval(&res, (expr = s->w_expr),
484  var_names, var_values,
485  NULL, NULL, NULL, NULL, NULL, 0, ctx);
486  s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
487  if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
488  var_names, var_values,
489  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
490  goto fail;
491  s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
492  /* evaluate again the width, as it may depend on the output height */
493  if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
494  var_names, var_values,
495  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
496  goto fail;
497  s->w = res;
498 
499  w = s->w;
500  h = s->h;
501 
502  /* sanity check params */
503  if (w < -1 || h < -1) {
504  av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
505  return AVERROR(EINVAL);
506  }
507  if (w == -1 && h == -1)
508  s->w = s->h = 0;
509 
510  if (!(w = s->w))
511  w = inlink->w;
512  if (!(h = s->h))
513  h = inlink->h;
514  if (w == -1)
515  w = av_rescale(h, inlink->w, inlink->h);
516  if (h == -1)
517  h = av_rescale(w, inlink->h, inlink->w);
518 
519  if (w > INT_MAX || h > INT_MAX ||
520  (h * inlink->w) > INT_MAX ||
521  (w * inlink->h) > INT_MAX)
522  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
523 
524  outlink->w = w;
525  outlink->h = h;
526 
527  ret = init_scale_session(ctx, inlink->w, inlink->h, w, h);
528  if (ret < 0)
529  return ret;
530 
531  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
532  inlink->w, inlink->h, outlink->w, outlink->h);
533 
534  if (inlink->sample_aspect_ratio.num)
535  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
536  outlink->w*inlink->h},
537  inlink->sample_aspect_ratio);
538  else
539  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
540 
541  return 0;
542 
543 fail:
545  "Error when evaluating the expression '%s'\n", expr);
546  return ret;
547 }
548 
550 {
551  AVFilterContext *ctx = link->dst;
552  QSVScaleContext *s = ctx->priv;
553  AVFilterLink *outlink = ctx->outputs[0];
554 
555  mfxSyncPoint sync = NULL;
556  mfxStatus err;
557 
558  AVFrame *out = NULL;
559  int ret = 0;
560 
561  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
562  if (!out) {
563  ret = AVERROR(ENOMEM);
564  goto fail;
565  }
566 
567  do {
568  err = MFXVideoVPP_RunFrameVPPAsync(s->session,
569  (mfxFrameSurface1*)in->data[3],
570  (mfxFrameSurface1*)out->data[3],
571  NULL, &sync);
572  if (err == MFX_WRN_DEVICE_BUSY)
573  av_usleep(1);
574  } while (err == MFX_WRN_DEVICE_BUSY);
575 
576  if (err < 0 || !sync) {
577  av_log(ctx, AV_LOG_ERROR, "Error during scaling\n");
579  goto fail;
580  }
581 
582  do {
583  err = MFXVideoCORE_SyncOperation(s->session, sync, 1000);
584  } while (err == MFX_WRN_IN_EXECUTION);
585  if (err < 0) {
586  av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation: %d\n", err);
588  goto fail;
589  }
590 
592  if (ret < 0)
593  goto fail;
594 
595  out->width = outlink->w;
596  out->height = outlink->h;
597 
598  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
599  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
600  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
601  INT_MAX);
602 
603  av_frame_free(&in);
604  return ff_filter_frame(outlink, out);
605 fail:
606  av_frame_free(&in);
607  av_frame_free(&out);
608  return ret;
609 }
610 
611 #define OFFSET(x) offsetof(QSVScaleContext, x)
612 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
613 static const AVOption options[] = {
614  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
615  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
616  { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
617 
618 #if QSV_HAVE_SCALING_CONFIG
619  { "mode", "set scaling mode", OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, FLAGS, "mode"},
620  { "low_power", "low power mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "mode"},
621  { "hq", "high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX, FLAGS, "mode"},
622 #else
623  { "mode", "(not supported)", OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = 0}, 0, INT_MAX, FLAGS, "mode"},
624  { "low_power", "", 0, AV_OPT_TYPE_CONST, { .i64 = 1}, 0, 0, FLAGS, "mode"},
625  { "hq", "", 0, AV_OPT_TYPE_CONST, { .i64 = 2}, 0, 0, FLAGS, "mode"},
626 #endif
627 
628  { NULL },
629 };
630 
631 static const AVClass qsvscale_class = {
632  .class_name = "qsvscale",
633  .item_name = av_default_item_name,
634  .option = options,
635  .version = LIBAVUTIL_VERSION_INT,
636 };
637 
638 static const AVFilterPad qsvscale_inputs[] = {
639  {
640  .name = "default",
641  .type = AVMEDIA_TYPE_VIDEO,
642  .filter_frame = qsvscale_filter_frame,
643  },
644  { NULL }
645 };
646 
647 static const AVFilterPad qsvscale_outputs[] = {
648  {
649  .name = "default",
650  .type = AVMEDIA_TYPE_VIDEO,
651  .config_props = qsvscale_config_props,
652  },
653  { NULL }
654 };
655 
657  .name = "scale_qsv",
658  .description = NULL_IF_CONFIG_SMALL("QuickSync video scaling and format conversion"),
659 
660  .init = qsvscale_init,
661  .uninit = qsvscale_uninit,
662  .query_formats = qsvscale_query_formats,
663 
664  .priv_size = sizeof(QSVScaleContext),
665  .priv_class = &qsvscale_class,
666 
669 
670  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
671 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:91
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:49
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
QSVScaleContext::num_ext_buf
int num_ext_buf
Definition: vf_scale_qsv.c:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
var_names
static const char *const var_names[]
Definition: vf_scale_qsv.c:46
QSV_HAVE_SCALING_CONFIG
#define QSV_HAVE_SCALING_CONFIG
Definition: vf_scale_qsv.c:72
VAR_IW
@ VAR_IW
Definition: vf_scale_qsv.c:63
M_PHI
#define M_PHI
Definition: mathematics.h:49
out
FILE * out
Definition: movenc.c:54
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:385
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:208
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
QSVScaleContext::session
mfxSession session
Definition: vf_scale_qsv.c:78
qsvscale_init
static av_cold int qsvscale_init(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:121
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:329
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
AVQSVDeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_qsv.h:35
w
uint8_t w
Definition: llviddspenc.c:38
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:243
qsvscale_outputs
static const AVFilterPad qsvscale_outputs[]
Definition: vf_scale_qsv.c:647
AVOption
AVOption.
Definition: opt.h:246
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
frame_alloc
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req, mfxFrameAllocResponse *resp)
Definition: vf_scale_qsv.c:228
mathematics.h
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:228
video.h
qsvscale_query_formats
static int qsvscale_query_formats(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:158
VAR_OH
@ VAR_OH
Definition: vf_scale_qsv.c:66
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale_qsv.c:66
formats.h
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale_qsv.c:65
QSVScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale_qsv.c:117
qsvvpp.h
fail
#define fail()
Definition: checkasm.h:120
OFFSET
#define OFFSET(x)
Definition: vf_scale_qsv.c:611
QSVScaleContext::nb_mem_ids_in
int nb_mem_ids_in
Definition: vf_scale_qsv.c:81
QSVScaleContext::h
int h
Definition: vf_scale_qsv.c:109
QSVScaleContext::mem_ids_out
mfxMemId * mem_ids_out
Definition: vf_scale_qsv.c:83
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
init_out_session
static int init_out_session(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:277
handle_type
mfxHandleType handle_type
Definition: hwcontext_qsv.c:85
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
QSVScaleContext::nb_surface_ptrs_in
int nb_surface_ptrs_in
Definition: vf_scale_qsv.c:87
VAR_PHI
@ VAR_PHI
Definition: vf_scale_qsv.c:61
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
AVHWFramesContext::height
int height
Definition: hwcontext.h:228
VAR_OW
@ VAR_OW
Definition: vf_scale_qsv.c:65
s
#define s(width, name)
Definition: cbs_vp9.c:257
M_E
#define M_E
Definition: mathematics.h:37
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:41
info
MIPS optimizations info
Definition: mips.txt:2
QSVScaleContext::surface_ptrs_in
mfxFrameSurface1 ** surface_ptrs_in
Definition: vf_scale_qsv.c:86
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
ctx
AVFormatContext * ctx
Definition: movenc.c:48
QSVScaleContext::surface_ptrs_out
mfxFrameSurface1 ** surface_ptrs_out
Definition: vf_scale_qsv.c:89
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
NAN
#define NAN
Definition: mathematics.h:64
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
QSVScaleContext::format
enum AVPixelFormat format
Output sw format.
Definition: vf_scale_qsv.c:114
frame_free
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
Definition: vf_scale_qsv.c:250
FLAGS
#define FLAGS
Definition: vf_scale_qsv.c:612
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
AVQSVFramesContext::surfaces
mfxFrameSurface1 * surfaces
Definition: hwcontext_qsv.h:43
frame_unlock
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
Definition: vf_scale_qsv.c:260
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:221
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
VAR_DAR
@ VAR_DAR
Definition: vf_scale_qsv.c:67
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:140
QSVScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale_qsv.c:116
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
time.h
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:222
QSVScaleContext::ext_buffers
mfxExtBuffer * ext_buffers[1+QSV_HAVE_SCALING_CONFIG]
Definition: vf_scale_qsv.c:99
frame_lock
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
Definition: vf_scale_qsv.c:255
QSVScaleContext::mode
int mode
Definition: vf_scale_qsv.c:97
QSVScaleContext::shift_height
int shift_height
Definition: vf_scale_qsv.c:102
eval.h
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale_qsv.c:63
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:744
var_name
var_name
Definition: aeval.c:46
VAR_IH
@ VAR_IH
Definition: vf_scale_qsv.c:64
QSVScaleContext::nb_mem_ids_out
int nb_mem_ids_out
Definition: vf_scale_qsv.c:84
frame_get_hdl
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
Definition: vf_scale_qsv.c:265
AVQSVFramesContext::nb_surfaces
int nb_surfaces
Definition: hwcontext_qsv.h:44
qsvscale_config_props
static int qsvscale_config_props(AVFilterLink *outlink)
Definition: vf_scale_qsv.c:460
M_PI
#define M_PI
Definition: mathematics.h:52
VARS_NB
@ VARS_NB
Definition: vf_scale_qsv.c:69
internal.h
options
static const AVOption options[]
Definition: vf_scale_qsv.c:613
QSVScaleContext::shift_width
int shift_width
Definition: vf_scale_qsv.c:102
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
init_out_pool
static int init_out_pool(AVFilterContext *ctx, int out_width, int out_height)
Definition: vf_scale_qsv.c:172
hwcontext_qsv.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
internal.h
VAR_A
@ VAR_A
Definition: vf_scale_qsv.c:67
common.h
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale_qsv.c:64
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AVFilter
Filter definition.
Definition: avfilter.h:144
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:148
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:161
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
av_get_pix_fmt
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
Definition: pixdesc.c:2450
qsvscale_class
static const AVClass qsvscale_class
Definition: vf_scale_qsv.c:631
AVQSVDeviceContext::session
mfxSession session
Definition: hwcontext_qsv.h:36
qsvscale_inputs
static const AVFilterPad qsvscale_inputs[]
Definition: vf_scale_qsv.c:638
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
QSVScaleContext::mem_ids_in
mfxMemId * mem_ids_in
Definition: vf_scale_qsv.c:80
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
ff_vf_scale_qsv
AVFilter ff_vf_scale_qsv
Definition: vf_scale_qsv.c:656
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:198
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
qsvscale_filter_frame
static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_qsv.c:549
init_scale_session
static int init_scale_session(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height)
Definition: vf_scale_qsv.c:442
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
handle_types
static const mfxHandleType handle_types[]
Definition: vf_scale_qsv.c:271
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
QSVScaleContext
Definition: vf_scale_qsv.c:74
qsvscale_uninit
static av_cold void qsvscale_uninit(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:138
h
h
Definition: vp9dsp_template.c:2038
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
QSVScaleContext::w
int w
New dimensions.
Definition: vf_scale_qsv.c:109
QSVScaleContext::format_str
char * format_str
Definition: vf_scale_qsv.c:118
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
VAR_SAR
@ VAR_SAR
Definition: vf_scale_qsv.c:68
VAR_PI
@ VAR_PI
Definition: vf_scale_qsv.c:60
QSVScaleContext::nb_surface_ptrs_out
int nb_surface_ptrs_out
Definition: vf_scale_qsv.c:90
VAR_E
@ VAR_E
Definition: vf_scale_qsv.c:62
QSVScaleContext::opaque_alloc
mfxExtOpaqueSurfaceAlloc opaque_alloc
Definition: vf_scale_qsv.c:92
ff_filter_init_hw_frames
int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link, int default_pool_size)
Perform any additional setup required for hardware frames.
Definition: avfilter.c:1640