FFmpeg
vf_scale_npp.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * scale video filter
22  */
23 
24 #include <nppi.h>
25 #include <stdio.h>
26 #include <string.h>
27 
28 #include "libavutil/hwcontext.h"
30 #include "libavutil/cuda_check.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/parseutils.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/pixdesc.h"
36 
37 #include "avfilter.h"
38 #include "formats.h"
39 #include "internal.h"
40 #include "scale_eval.h"
41 #include "video.h"
42 
43 #define CHECK_CU(x) FF_CUDA_CHECK_DL(ctx, device_hwctx->internal->cuda_dl, x)
44 
45 static const enum AVPixelFormat supported_formats[] = {
50 };
51 
52 static const enum AVPixelFormat deinterleaved_formats[][2] = {
54 };
55 
56 enum ScaleStage {
61 };
62 
63 typedef struct NPPScaleStageContext {
67 
68  struct {
69  int width;
70  int height;
71  } planes_in[4], planes_out[4];
72 
76 
77 static const char *const var_names[] = {
78  "in_w", "iw",
79  "in_h", "ih",
80  "out_w", "ow",
81  "out_h", "oh",
82  "a",
83  "sar",
84  "dar",
85  "n",
86  "t",
87  "pos",
88  "main_w",
89  "main_h",
90  "main_a",
91  "main_sar",
92  "main_dar", "mdar",
93  "main_n",
94  "main_t",
95  "main_pos",
96  NULL
97 };
98 
99 enum var_name {
119 };
120 
121 enum EvalMode {
125 };
126 
127 typedef struct NPPScaleContext {
128  const AVClass *class;
129 
133 
135 
136  /**
137  * New dimensions. Special values are:
138  * 0 = original width/height
139  * -1 = keep original aspect
140  */
141  int w, h;
142 
143  /**
144  * Output sw format. AV_PIX_FMT_NONE for no conversion.
145  */
147 
148  char *w_expr; ///< width expression string
149  char *h_expr; ///< height expression string
150  char *format_str;
151 
154 
156 
157  char* size_str;
158 
161 
163 
166 
168 
169 static int config_props(AVFilterLink *outlink);
170 
172 {
173  NPPScaleContext* scale = ctx->priv;
174  unsigned vars_w[VARS_NB] = {0}, vars_h[VARS_NB] = {0};
175 
176  if (!scale->w_pexpr && !scale->h_pexpr)
177  return AVERROR(EINVAL);
178 
179  if (scale->w_pexpr)
180  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
181  if (scale->h_pexpr)
182  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
183 
184  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
185  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
186  return AVERROR(EINVAL);
187  }
188 
189  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
190  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
191  return AVERROR(EINVAL);
192  }
193 
194  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
195  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
196  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
197  }
198 
199  if (ctx->filter != &ff_vf_scale2ref_npp &&
200  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
201  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
202  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
203  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
204  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
205  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
206  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
207  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
208  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS])) {
209  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref_npp variables are not valid in scale_npp filter.\n");
210  return AVERROR(EINVAL);
211  }
212 
213  if (scale->eval_mode == EVAL_MODE_INIT &&
214  (vars_w[VAR_N] || vars_h[VAR_N] ||
215  vars_w[VAR_T] || vars_h[VAR_T] ||
216  vars_w[VAR_POS] || vars_h[VAR_POS] ||
217  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
218  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
219  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
220  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
221  return AVERROR(EINVAL);
222  }
223 
224  return 0;
225 }
226 
227 static int nppscale_parse_expr(AVFilterContext* ctx, char* str_expr,
228  AVExpr** pexpr_ptr, const char* var,
229  const char* args)
230 {
231  NPPScaleContext* scale = ctx->priv;
232  int ret, is_inited = 0;
233  char* old_str_expr = NULL;
234  AVExpr* old_pexpr = NULL;
235 
236  if (str_expr) {
237  old_str_expr = av_strdup(str_expr);
238  if (!old_str_expr)
239  return AVERROR(ENOMEM);
240  av_opt_set(scale, var, args, 0);
241  }
242 
243  if (*pexpr_ptr) {
244  old_pexpr = *pexpr_ptr;
245  *pexpr_ptr = NULL;
246  is_inited = 1;
247  }
248 
249  ret = av_expr_parse(pexpr_ptr, args, var_names, NULL, NULL, NULL, NULL, 0,
250  ctx);
251  if (ret < 0) {
252  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var,
253  args);
254  goto revert;
255  }
256 
257  ret = check_exprs(ctx);
258  if (ret < 0)
259  goto revert;
260 
261  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
262  goto revert;
263 
264  av_expr_free(old_pexpr);
265  old_pexpr = NULL;
266  av_freep(&old_str_expr);
267 
268  return 0;
269 
270 revert:
271  av_expr_free(*pexpr_ptr);
272  *pexpr_ptr = NULL;
273  if (old_str_expr) {
274  av_opt_set(scale, var, old_str_expr, 0);
275  av_free(old_str_expr);
276  }
277  if (old_pexpr)
278  *pexpr_ptr = old_pexpr;
279 
280  return ret;
281 }
282 
284 {
285  NPPScaleContext* scale = ctx->priv;
286  int i, ret;
287 
288  if (!strcmp(scale->format_str, "same")) {
289  scale->format = AV_PIX_FMT_NONE;
290  } else {
291  scale->format = av_get_pix_fmt(scale->format_str);
292  if (scale->format == AV_PIX_FMT_NONE) {
293  av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", scale->format_str);
294  return AVERROR(EINVAL);
295  }
296  }
297 
298  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
300  "Size and width/height exprs cannot be set at the same time.\n");
301  return AVERROR(EINVAL);
302  }
303 
304  if (scale->w_expr && !scale->h_expr)
305  FFSWAP(char*, scale->w_expr, scale->size_str);
306 
307  if (scale->size_str) {
308  char buf[32];
309  ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str);
310  if (0 > ret) {
311  av_log(ctx, AV_LOG_ERROR, "Invalid size '%s'\n", scale->size_str);
312  return ret;
313  }
314 
315  snprintf(buf, sizeof(buf) - 1, "%d", scale->w);
316  ret = av_opt_set(scale, "w", buf, 0);
317  if (ret < 0)
318  return ret;
319 
320  snprintf(buf, sizeof(buf) - 1, "%d", scale->h);
321  ret = av_opt_set(scale, "h", buf, 0);
322  if (ret < 0)
323  return ret;
324  }
325 
326  if (!scale->w_expr) {
327  ret = av_opt_set(scale, "w", "iw", 0);
328  if (ret < 0)
329  return ret;
330  }
331 
332  if (!scale->h_expr) {
333  ret = av_opt_set(scale, "h", "ih", 0);
334  if (ret < 0)
335  return ret;
336  }
337 
338  ret = nppscale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
339  if (ret < 0)
340  return ret;
341 
342  ret = nppscale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
343  if (ret < 0)
344  return ret;
345 
346  for (i = 0; i < FF_ARRAY_ELEMS(scale->stages); i++) {
347  scale->stages[i].frame = av_frame_alloc();
348  if (!scale->stages[i].frame)
349  return AVERROR(ENOMEM);
350  }
351  scale->tmp_frame = av_frame_alloc();
352  if (!scale->tmp_frame)
353  return AVERROR(ENOMEM);
354 
355  return 0;
356 }
357 
359 {
360  NPPScaleContext* scale = ctx->priv;
361  const char scale2ref = ctx->filter == &ff_vf_scale2ref_npp;
362  const AVFilterLink* inlink = ctx->inputs[scale2ref ? 1 : 0];
363  char* expr;
364  int eval_w, eval_h;
365  int ret;
366  double res;
367 
368  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
369  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
370  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
371  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
372  scale->var_values[VAR_A] = (double)inlink->w / inlink->h;
373  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
374  (double)inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
375  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
376 
377  if (scale2ref) {
378  const AVFilterLink* main_link = ctx->inputs[0];
379 
380  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
381  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
382  scale->var_values[VAR_S2R_MAIN_A] = (double)main_link->w / main_link->h;
383  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
384  (double)main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
385  scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
386  scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
387  }
388 
389  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
390  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
391 
392  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
393  if (isnan(res)) {
394  expr = scale->h_expr;
395  ret = AVERROR(EINVAL);
396  goto fail;
397  }
398  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int)res == 0 ? inlink->h : (int)res;
399 
400  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
401  if (isnan(res)) {
402  expr = scale->w_expr;
403  ret = AVERROR(EINVAL);
404  goto fail;
405  }
406  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
407 
408  scale->w = eval_w;
409  scale->h = eval_h;
410 
411  return 0;
412 
413 fail:
414  av_log(ctx, AV_LOG_ERROR, "Error when evaluating the expression '%s'.\n",
415  expr);
416  return ret;
417 }
418 
420 {
421  NPPScaleContext *s = ctx->priv;
422  int i;
423 
424  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
425  av_frame_free(&s->stages[i].frame);
426  av_buffer_unref(&s->stages[i].frames_ctx);
427  }
428  av_frame_free(&s->tmp_frame);
429 
430  av_expr_free(s->w_pexpr);
431  av_expr_free(s->h_pexpr);
432  s->w_pexpr = s->h_pexpr = NULL;
433 }
434 
435 static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
436 {
437  AVBufferRef *out_ref = NULL;
438  AVHWFramesContext *out_ctx;
439  int in_sw, in_sh, out_sw, out_sh;
440  int ret, i;
441 
442  av_pix_fmt_get_chroma_sub_sample(stage->in_fmt, &in_sw, &in_sh);
443  av_pix_fmt_get_chroma_sub_sample(stage->out_fmt, &out_sw, &out_sh);
444  if (!stage->planes_out[0].width) {
445  stage->planes_out[0].width = stage->planes_in[0].width;
446  stage->planes_out[0].height = stage->planes_in[0].height;
447  }
448 
449  for (i = 1; i < FF_ARRAY_ELEMS(stage->planes_in); i++) {
450  stage->planes_in[i].width = stage->planes_in[0].width >> in_sw;
451  stage->planes_in[i].height = stage->planes_in[0].height >> in_sh;
452  stage->planes_out[i].width = stage->planes_out[0].width >> out_sw;
453  stage->planes_out[i].height = stage->planes_out[0].height >> out_sh;
454  }
455 
456  if (AV_PIX_FMT_YUVA420P == stage->in_fmt) {
457  stage->planes_in[3].width = stage->planes_in[0].width;
458  stage->planes_in[3].height = stage->planes_in[0].height;
459  stage->planes_out[3].width = stage->planes_out[0].width;
460  stage->planes_out[3].height = stage->planes_out[0].height;
461  }
462 
463  out_ref = av_hwframe_ctx_alloc(device_ctx);
464  if (!out_ref)
465  return AVERROR(ENOMEM);
466  out_ctx = (AVHWFramesContext*)out_ref->data;
467 
468  out_ctx->format = AV_PIX_FMT_CUDA;
469  out_ctx->sw_format = stage->out_fmt;
470  out_ctx->width = FFALIGN(stage->planes_out[0].width, 32);
471  out_ctx->height = FFALIGN(stage->planes_out[0].height, 32);
472 
473  ret = av_hwframe_ctx_init(out_ref);
474  if (ret < 0)
475  goto fail;
476 
477  av_frame_unref(stage->frame);
478  ret = av_hwframe_get_buffer(out_ref, stage->frame, 0);
479  if (ret < 0)
480  goto fail;
481 
482  stage->frame->width = stage->planes_out[0].width;
483  stage->frame->height = stage->planes_out[0].height;
484 
485  av_buffer_unref(&stage->frames_ctx);
486  stage->frames_ctx = out_ref;
487 
488  return 0;
489 fail:
490  av_buffer_unref(&out_ref);
491  return ret;
492 }
493 
494 static int format_is_supported(enum AVPixelFormat fmt)
495 {
496  int i;
497 
498  for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
499  if (supported_formats[i] == fmt)
500  return 1;
501  return 0;
502 }
503 
505 {
507  int i, planes;
508 
510  if (planes == desc->nb_components)
511  return fmt;
512  for (i = 0; i < FF_ARRAY_ELEMS(deinterleaved_formats); i++)
513  if (deinterleaved_formats[i][0] == fmt)
514  return deinterleaved_formats[i][1];
515  return AV_PIX_FMT_NONE;
516 }
517 
518 static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height,
519  int out_width, int out_height)
520 {
521  NPPScaleContext *s = ctx->priv;
522 
523  AVHWFramesContext *in_frames_ctx;
524 
525  enum AVPixelFormat in_format;
526  enum AVPixelFormat out_format;
527  enum AVPixelFormat in_deinterleaved_format;
528  enum AVPixelFormat out_deinterleaved_format;
529 
530  int i, ret, last_stage = -1;
531 
532  /* check that we have a hw context */
533  if (!ctx->inputs[0]->hw_frames_ctx) {
534  av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
535  return AVERROR(EINVAL);
536  }
537  in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
538  in_format = in_frames_ctx->sw_format;
539  out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
540 
541  if (!format_is_supported(in_format)) {
542  av_log(ctx, AV_LOG_ERROR, "Unsupported input format: %s\n",
543  av_get_pix_fmt_name(in_format));
544  return AVERROR(ENOSYS);
545  }
546  if (!format_is_supported(out_format)) {
547  av_log(ctx, AV_LOG_ERROR, "Unsupported output format: %s\n",
548  av_get_pix_fmt_name(out_format));
549  return AVERROR(ENOSYS);
550  }
551 
552  in_deinterleaved_format = get_deinterleaved_format(in_format);
553  out_deinterleaved_format = get_deinterleaved_format(out_format);
554  if (in_deinterleaved_format == AV_PIX_FMT_NONE ||
555  out_deinterleaved_format == AV_PIX_FMT_NONE)
556  return AVERROR_BUG;
557 
558  /* figure out which stages need to be done */
559  if (in_width != out_width || in_height != out_height ||
560  in_deinterleaved_format != out_deinterleaved_format) {
561  s->stages[STAGE_RESIZE].stage_needed = 1;
562 
563  if (s->interp_algo == NPPI_INTER_SUPER &&
564  (out_width > in_width && out_height > in_height)) {
565  s->interp_algo = NPPI_INTER_LANCZOS;
566  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using lanczos instead.\n");
567  }
568  if (s->interp_algo == NPPI_INTER_SUPER &&
569  !(out_width < in_width && out_height < in_height)) {
570  s->interp_algo = NPPI_INTER_CUBIC;
571  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using cubic instead.\n");
572  }
573  }
574 
575  if (!s->stages[STAGE_RESIZE].stage_needed && in_format == out_format)
576  s->passthrough = 1;
577 
578  if (!s->passthrough) {
579  if (in_format != in_deinterleaved_format)
580  s->stages[STAGE_DEINTERLEAVE].stage_needed = 1;
581  if (out_format != out_deinterleaved_format)
582  s->stages[STAGE_INTERLEAVE].stage_needed = 1;
583  }
584 
585  s->stages[STAGE_DEINTERLEAVE].in_fmt = in_format;
586  s->stages[STAGE_DEINTERLEAVE].out_fmt = in_deinterleaved_format;
587  s->stages[STAGE_DEINTERLEAVE].planes_in[0].width = in_width;
588  s->stages[STAGE_DEINTERLEAVE].planes_in[0].height = in_height;
589 
590  s->stages[STAGE_RESIZE].in_fmt = in_deinterleaved_format;
591  s->stages[STAGE_RESIZE].out_fmt = out_deinterleaved_format;
592  s->stages[STAGE_RESIZE].planes_in[0].width = in_width;
593  s->stages[STAGE_RESIZE].planes_in[0].height = in_height;
594  s->stages[STAGE_RESIZE].planes_out[0].width = out_width;
595  s->stages[STAGE_RESIZE].planes_out[0].height = out_height;
596 
597  s->stages[STAGE_INTERLEAVE].in_fmt = out_deinterleaved_format;
598  s->stages[STAGE_INTERLEAVE].out_fmt = out_format;
599  s->stages[STAGE_INTERLEAVE].planes_in[0].width = out_width;
600  s->stages[STAGE_INTERLEAVE].planes_in[0].height = out_height;
601 
602  /* init the hardware contexts */
603  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
604  if (!s->stages[i].stage_needed)
605  continue;
606 
607  ret = init_stage(&s->stages[i], in_frames_ctx->device_ref);
608  if (ret < 0)
609  return ret;
610 
611  last_stage = i;
612  }
613 
614  if (last_stage >= 0)
615  ctx->outputs[0]->hw_frames_ctx = av_buffer_ref(s->stages[last_stage].frames_ctx);
616  else
617  ctx->outputs[0]->hw_frames_ctx = av_buffer_ref(ctx->inputs[0]->hw_frames_ctx);
618 
619  if (!ctx->outputs[0]->hw_frames_ctx)
620  return AVERROR(ENOMEM);
621 
622  return 0;
623 }
624 
625 static int config_props(AVFilterLink *outlink)
626 {
627  AVFilterContext *ctx = outlink->src;
628  AVFilterLink *inlink0 = outlink->src->inputs[0];
629  AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref_npp ?
630  outlink->src->inputs[1] :
631  outlink->src->inputs[0];
632  NPPScaleContext *s = ctx->priv;
633  int ret;
634 
635  if ((ret = nppscale_eval_dimensions(ctx)) < 0)
636  goto fail;
637 
639  s->force_original_aspect_ratio,
640  s->force_divisible_by);
641 
642  if (s->w > INT_MAX || s->h > INT_MAX ||
643  (s->h * inlink->w) > INT_MAX ||
644  (s->w * inlink->h) > INT_MAX)
645  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
646 
647  outlink->w = s->w;
648  outlink->h = s->h;
649 
650  ret = init_processing_chain(ctx, inlink0->w, inlink0->h, outlink->w, outlink->h);
651  if (ret < 0)
652  return ret;
653 
654  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
655  inlink->w, inlink->h, outlink->w, outlink->h);
656 
657  if (inlink->sample_aspect_ratio.num)
658  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
659  outlink->w*inlink->h},
660  inlink->sample_aspect_ratio);
661  else
662  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
663 
664  return 0;
665 
666 fail:
667  return ret;
668 }
669 
670 static int config_props_ref(AVFilterLink *outlink)
671 {
672  AVFilterLink *inlink = outlink->src->inputs[1];
673  AVFilterContext *ctx = outlink->src;
674 
675  outlink->w = inlink->w;
676  outlink->h = inlink->h;
677  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
678  outlink->time_base = inlink->time_base;
679  outlink->frame_rate = inlink->frame_rate;
680 
681  ctx->outputs[1]->hw_frames_ctx = av_buffer_ref(ctx->inputs[1]->hw_frames_ctx);
682 
683  return 0;
684 }
685 
687  AVFrame *out, AVFrame *in)
688 {
689  AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)in->hw_frames_ctx->data;
690  NppStatus err;
691 
692  switch (in_frames_ctx->sw_format) {
693  case AV_PIX_FMT_NV12:
694  err = nppiYCbCr420_8u_P2P3R(in->data[0], in->linesize[0],
695  in->data[1], in->linesize[1],
696  out->data, out->linesize,
697  (NppiSize){ in->width, in->height });
698  break;
699  default:
700  return AVERROR_BUG;
701  }
702  if (err != NPP_SUCCESS) {
703  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
704  return AVERROR_UNKNOWN;
705  }
706 
707  return 0;
708 }
709 
711  AVFrame *out, AVFrame *in)
712 {
713  NPPScaleContext *s = ctx->priv;
714  NppStatus err;
715  int i;
716 
717  for (i = 0; i < FF_ARRAY_ELEMS(stage->planes_in) && i < FF_ARRAY_ELEMS(in->data) && in->data[i]; i++) {
718  int iw = stage->planes_in[i].width;
719  int ih = stage->planes_in[i].height;
720  int ow = stage->planes_out[i].width;
721  int oh = stage->planes_out[i].height;
722 
723  err = nppiResizeSqrPixel_8u_C1R(in->data[i], (NppiSize){ iw, ih },
724  in->linesize[i], (NppiRect){ 0, 0, iw, ih },
725  out->data[i], out->linesize[i],
726  (NppiRect){ 0, 0, ow, oh },
727  (double)ow / iw, (double)oh / ih,
728  0.0, 0.0, s->interp_algo);
729  if (err != NPP_SUCCESS) {
730  av_log(ctx, AV_LOG_ERROR, "NPP resize error: %d\n", err);
731  return AVERROR_UNKNOWN;
732  }
733  }
734 
735  return 0;
736 }
737 
739  AVFrame *out, AVFrame *in)
740 {
741  AVHWFramesContext *out_frames_ctx = (AVHWFramesContext*)out->hw_frames_ctx->data;
742  NppStatus err;
743 
744  switch (out_frames_ctx->sw_format) {
745  case AV_PIX_FMT_NV12:
746  err = nppiYCbCr420_8u_P3P2R((const uint8_t**)in->data,
747  in->linesize,
748  out->data[0], out->linesize[0],
749  out->data[1], out->linesize[1],
750  (NppiSize){ in->width, in->height });
751  break;
752  default:
753  return AVERROR_BUG;
754  }
755  if (err != NPP_SUCCESS) {
756  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
757  return AVERROR_UNKNOWN;
758  }
759 
760  return 0;
761 }
762 
764  AVFrame *out, AVFrame *in) = {
768 };
769 
771 {
772  AVFilterContext *ctx = link->dst;
773  NPPScaleContext *s = ctx->priv;
774  AVFilterLink *outlink = ctx->outputs[0];
775  AVFrame *src = in;
776  char buf[32];
777  int i, ret, last_stage = -1;
778  int frame_changed;
779 
780  frame_changed = in->width != link->w ||
781  in->height != link->h ||
782  in->format != link->format ||
785 
786  if (s->eval_mode == EVAL_MODE_FRAME || frame_changed) {
787  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
788 
789  av_expr_count_vars(s->w_pexpr, vars_w, VARS_NB);
790  av_expr_count_vars(s->h_pexpr, vars_h, VARS_NB);
791 
792  if (s->eval_mode == EVAL_MODE_FRAME && !frame_changed && ctx->filter != &ff_vf_scale2ref_npp &&
793  !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) &&
794  !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) &&
795  s->w && s->h)
796  goto scale;
797 
798  if (s->eval_mode == EVAL_MODE_INIT) {
799  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
800  av_opt_set(s, "w", buf, 0);
801  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
802  av_opt_set(s, "h", buf, 0);
803 
804  ret = nppscale_parse_expr(ctx, NULL, &s->w_pexpr, "width", s->w_expr);
805  if (ret < 0)
806  return ret;
807 
808  ret = nppscale_parse_expr(ctx, NULL, &s->h_pexpr, "height", s->h_expr);
809  if (ret < 0)
810  return ret;
811  }
812 
813  if (ctx->filter == &ff_vf_scale2ref_npp) {
814  s->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
815  s->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
816  s->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
817  } else {
818  s->var_values[VAR_N] = link->frame_count_out;
819  s->var_values[VAR_T] = TS2T(in->pts, link->time_base);
820  s->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
821  }
822 
823  link->format = in->format;
824  link->w = in->width;
825  link->h = in->height;
826 
829 
830  if ((ret = config_props(outlink)) < 0)
831  return ret;
832  }
833 
834 scale:
835  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
836  if (!s->stages[i].stage_needed)
837  continue;
838 
839  ret = nppscale_process[i](ctx, &s->stages[i], s->stages[i].frame, src);
840  if (ret < 0)
841  return ret;
842 
843  src = s->stages[i].frame;
844  last_stage = i;
845  }
846  if (last_stage < 0)
847  return AVERROR_BUG;
848 
849  ret = av_hwframe_get_buffer(src->hw_frames_ctx, s->tmp_frame, 0);
850  if (ret < 0)
851  return ret;
852 
853  s->tmp_frame->width = src->width;
854  s->tmp_frame->height = src->height;
855 
857  av_frame_move_ref(src, s->tmp_frame);
858 
859  ret = av_frame_copy_props(out, in);
860  if (ret < 0)
861  return ret;
862 
863  return 0;
864 }
865 
867 {
868  AVFilterContext *ctx = link->dst;
869  NPPScaleContext *s = ctx->priv;
870  AVFilterLink *outlink = ctx->outputs[0];
871  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)outlink->hw_frames_ctx->data;
872  AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx;
873 
874  AVFrame *out = NULL;
875  CUcontext dummy;
876  int ret = 0;
877 
878  if (s->passthrough)
879  return ff_filter_frame(outlink, in);
880 
881  out = av_frame_alloc();
882  if (!out) {
883  ret = AVERROR(ENOMEM);
884  goto fail;
885  }
886 
887  ret = CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPushCurrent(device_hwctx->cuda_ctx));
888  if (ret < 0)
889  goto fail;
890 
891  ret = nppscale_scale(link, out, in);
892 
893  CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPopCurrent(&dummy));
894  if (ret < 0)
895  goto fail;
896 
897  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
898  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
899  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
900  INT_MAX);
901 
902  av_frame_free(&in);
903  return ff_filter_frame(outlink, out);
904 fail:
905  av_frame_free(&in);
906  av_frame_free(&out);
907  return ret;
908 }
909 
911 {
912  NPPScaleContext *scale = link->dst->priv;
913  AVFilterLink *outlink = link->dst->outputs[1];
914  int frame_changed;
915 
916  frame_changed = in->width != link->w ||
917  in->height != link->h ||
918  in->format != link->format ||
921 
922  if (frame_changed) {
923  link->format = in->format;
924  link->w = in->width;
925  link->h = in->height;
928 
929  config_props_ref(outlink);
930  }
931 
932  if (scale->eval_mode == EVAL_MODE_FRAME) {
933  scale->var_values[VAR_N] = link->frame_count_out;
934  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
935  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
936  }
937 
938  return ff_filter_frame(outlink, in);
939 }
940 
941 static int request_frame(AVFilterLink *outlink)
942 {
943  return ff_request_frame(outlink->src->inputs[0]);
944 }
945 
946 static int request_frame_ref(AVFilterLink *outlink)
947 {
948  return ff_request_frame(outlink->src->inputs[1]);
949 }
950 
951 #define OFFSET(x) offsetof(NPPScaleContext, x)
952 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
953 static const AVOption options[] = {
954  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
955  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
956  { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
957  { "s", "Output video size", OFFSET(size_str), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
958 
959  { "interp_algo", "Interpolation algorithm used for resizing", OFFSET(interp_algo), AV_OPT_TYPE_INT, { .i64 = NPPI_INTER_CUBIC }, 0, INT_MAX, FLAGS, "interp_algo" },
960  { "nn", "nearest neighbour", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_NN }, 0, 0, FLAGS, "interp_algo" },
961  { "linear", "linear", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LINEAR }, 0, 0, FLAGS, "interp_algo" },
962  { "cubic", "cubic", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC }, 0, 0, FLAGS, "interp_algo" },
963  { "cubic2p_bspline", "2-parameter cubic (B=1, C=0)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_BSPLINE }, 0, 0, FLAGS, "interp_algo" },
964  { "cubic2p_catmullrom", "2-parameter cubic (B=0, C=1/2)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_CATMULLROM }, 0, 0, FLAGS, "interp_algo" },
965  { "cubic2p_b05c03", "2-parameter cubic (B=1/2, C=3/10)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_B05C03 }, 0, 0, FLAGS, "interp_algo" },
966  { "super", "supersampling", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_SUPER }, 0, 0, FLAGS, "interp_algo" },
967  { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LANCZOS }, 0, 0, FLAGS, "interp_algo" },
968  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, FLAGS, "force_oar" },
969  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
970  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
971  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
972  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 256, FLAGS },
973  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, { .i64 = EVAL_MODE_INIT }, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
974  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_INIT }, 0, 0, FLAGS, "eval" },
975  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_FRAME }, 0, 0, FLAGS, "eval" },
976  { NULL },
977 };
978 
979 static const AVClass nppscale_class = {
980  .class_name = "nppscale",
981  .item_name = av_default_item_name,
982  .option = options,
983  .version = LIBAVUTIL_VERSION_INT,
984  .category = AV_CLASS_CATEGORY_FILTER,
985 };
986 
987 static const AVFilterPad nppscale_inputs[] = {
988  {
989  .name = "default",
990  .type = AVMEDIA_TYPE_VIDEO,
991  .filter_frame = nppscale_filter_frame,
992  }
993 };
994 
995 static const AVFilterPad nppscale_outputs[] = {
996  {
997  .name = "default",
998  .type = AVMEDIA_TYPE_VIDEO,
999  .config_props = config_props,
1000  }
1001 };
1002 
1004  .name = "scale_npp",
1005  .description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1006  "scaling and format conversion"),
1007 
1008  .init = nppscale_init,
1009  .uninit = nppscale_uninit,
1010 
1011  .priv_size = sizeof(NPPScaleContext),
1012  .priv_class = &nppscale_class,
1013 
1016 
1018 
1019  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1020 };
1021 
1023  {
1024  .name = "default",
1025  .type = AVMEDIA_TYPE_VIDEO,
1026  .filter_frame = nppscale_filter_frame,
1027  },
1028  {
1029  .name = "ref",
1030  .type = AVMEDIA_TYPE_VIDEO,
1031  .filter_frame = nppscale_filter_frame_ref,
1032  }
1033 };
1034 
1036  {
1037  .name = "default",
1038  .type = AVMEDIA_TYPE_VIDEO,
1039  .config_props = config_props,
1040  .request_frame= request_frame,
1041  },
1042  {
1043  .name = "ref",
1044  .type = AVMEDIA_TYPE_VIDEO,
1045  .config_props = config_props_ref,
1046  .request_frame= request_frame_ref,
1047  }
1048 };
1049 
1051  .name = "scale2ref_npp",
1052  .description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1053  "scaling and format conversion to the "
1054  "given reference."),
1055 
1056  .init = nppscale_init,
1057  .uninit = nppscale_uninit,
1058 
1059  .priv_size = sizeof(NPPScaleContext),
1060  .priv_class = &nppscale_class,
1061 
1064 
1066 
1067  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1068 };
format_is_supported
static int format_is_supported(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:494
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:92
NPPScaleContext::passthrough
int passthrough
Definition: vf_scale_npp.c:132
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
VAR_OW
@ VAR_OW
Definition: vf_scale_npp.c:102
nppscale_inputs
static const AVFilterPad nppscale_inputs[]
Definition: vf_scale_npp.c:987
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nppscale2ref_inputs
static const AVFilterPad nppscale2ref_inputs[]
Definition: vf_scale_npp.c:1022
opt.h
hwcontext_cuda_internal.h
out
FILE * out
Definition: movenc.c:54
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:371
NPPScaleContext::h_pexpr
AVExpr * h_pexpr
Definition: vf_scale_npp.c:160
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
CHECK_CU
#define CHECK_CU(x)
Definition: vf_scale_npp.c:43
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ScaleStage
ScaleStage
Definition: vf_scale_npp.c:56
nppscale_class
static const AVClass nppscale_class
Definition: vf_scale_npp.c:979
VAR_S2R_MAIN_N
@ VAR_S2R_MAIN_N
Definition: vf_scale_npp.c:115
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
AVFrame::width
int width
Definition: frame.h:389
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
NPPScaleStageContext::stage_needed
int stage_needed
Definition: vf_scale_npp.c:64
VAR_A
@ VAR_A
Definition: vf_scale_npp.c:104
AVOption
AVOption.
Definition: opt.h:247
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:420
supported_formats
static enum AVPixelFormat supported_formats[]
Definition: vf_scale_npp.c:45
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
STAGE_NB
@ STAGE_NB
Definition: vf_scale_npp.c:60
VAR_SAR
@ VAR_SAR
Definition: vf_scale_npp.c:105
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
VAR_S2R_MAIN_T
@ VAR_S2R_MAIN_T
Definition: vf_scale_npp.c:116
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
video.h
NPPScaleStageContext::planes_out
struct NPPScaleStageContext::@234 planes_out[4]
NPPScaleStageContext::frame
AVFrame * frame
Definition: vf_scale_npp.c:74
VAR_OH
@ VAR_OH
Definition: vf_scale_npp.c:103
NPPScaleContext::size_str
char * size_str
Definition: vf_scale_npp.c:157
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2700
fail
#define fail()
Definition: checkasm.h:127
get_deinterleaved_format
static enum AVPixelFormat get_deinterleaved_format(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:504
STAGE_RESIZE
@ STAGE_RESIZE
Definition: vf_scale_npp.c:58
NPPScaleContext::tmp_frame
AVFrame * tmp_frame
Definition: vf_scale_npp.c:131
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2688
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:468
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
NPPScaleContext::shift_width
int shift_width
Definition: vf_scale_npp.c:134
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
TS2T
#define TS2T(ts, tb)
Definition: internal.h:264
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
VAR_IH
@ VAR_IH
Definition: vf_scale_npp.c:101
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
VAR_POS
@ VAR_POS
Definition: vf_scale_npp.c:109
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
nppscale_uninit
static void nppscale_uninit(AVFilterContext *ctx)
Definition: vf_scale_npp.c:419
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:593
av_expr_count_vars
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:756
NPPScaleContext::stages
NPPScaleStageContext stages[STAGE_NB]
Definition: vf_scale_npp.c:130
var_name
var_name
Definition: noise_bsf.c:47
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_scale_npp.c:625
NPPScaleContext::eval_mode
int eval_mode
Definition: vf_scale_npp.c:164
nppscale_eval_dimensions
static int nppscale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale_npp.c:358
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
NPPScaleContext::force_original_aspect_ratio
int force_original_aspect_ratio
Definition: vf_scale_npp.c:152
AVExpr
Definition: eval.c:157
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
check_exprs
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale_npp.c:171
NAN
#define NAN
Definition: mathematics.h:64
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
nppscale2ref_outputs
static const AVFilterPad nppscale2ref_outputs[]
Definition: vf_scale_npp.c:1035
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
nppscale_scale
static int nppscale_scale(AVFilterLink *link, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:770
planes
static const struct @321 planes[]
request_frame_ref
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:946
NPPScaleContext
Definition: vf_scale_npp.c:127
if
if(ret)
Definition: filter_design.txt:179
FLAGS
#define FLAGS
Definition: vf_scale_npp.c:952
var_names
static const char *const var_names[]
Definition: vf_scale_npp.c:77
init_stage
static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
Definition: vf_scale_npp.c:435
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
nppscale_interleave
static int nppscale_interleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:738
NULL
#define NULL
Definition: coverity.c:32
ff_vf_scale2ref_npp
const AVFilter ff_vf_scale2ref_npp
Definition: vf_scale_npp.c:167
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
isnan
#define isnan(x)
Definition: libm.h:340
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:410
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_scale_npp.c:122
VAR_IW
@ VAR_IW
Definition: vf_scale_npp.c:100
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
src
#define src
Definition: vp8dsp.c:255
parseutils.h
NPPScaleStageContext::planes_in
struct NPPScaleStageContext::@234 planes_in[4]
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale_npp.c:941
NPPScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale_npp.c:149
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
nppscale_parse_expr
static int nppscale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale_npp.c:227
NPPScaleContext::shift_height
int shift_height
Definition: vf_scale_npp.c:134
NPPScaleStageContext::height
int height
Definition: vf_scale_npp.c:70
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale_npp.c:101
VAR_S2R_MAIN_H
@ VAR_S2R_MAIN_H
Definition: vf_scale_npp.c:111
eval.h
VAR_S2R_MAIN_A
@ VAR_S2R_MAIN_A
Definition: vf_scale_npp.c:112
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
NPPScaleStageContext::out_fmt
enum AVPixelFormat out_fmt
Definition: vf_scale_npp.c:66
OFFSET
#define OFFSET(x)
Definition: vf_scale_npp.c:951
NPPScaleStageContext::frames_ctx
AVBufferRef * frames_ctx
Definition: vf_scale_npp.c:73
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:439
nppscale_filter_frame_ref
static int nppscale_filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:910
nppscale_filter_frame
static int nppscale_filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:866
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale_npp.c:100
scale_eval.h
init_processing_chain
static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height)
Definition: vf_scale_npp.c:518
VARS_NB
@ VARS_NB
Definition: vf_scale_npp.c:118
av_parse_video_size
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:150
VAR_S2R_MAIN_POS
@ VAR_S2R_MAIN_POS
Definition: vf_scale_npp.c:117
VAR_S2R_MAIN_W
@ VAR_S2R_MAIN_W
Definition: vf_scale_npp.c:110
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale_npp.c:102
nppscale_resize
static int nppscale_resize(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:710
internal.h
VAR_S2R_MAIN_DAR
@ VAR_S2R_MAIN_DAR
Definition: vf_scale_npp.c:114
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:181
VAR_S2R_MAIN_SAR
@ VAR_S2R_MAIN_SAR
Definition: vf_scale_npp.c:113
NPPScaleContext::h
int h
Definition: vf_scale_npp.c:141
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
internal.h
ff_vf_scale_npp
const AVFilter ff_vf_scale_npp
Definition: vf_scale_npp.c:1003
EvalMode
EvalMode
Definition: af_volume.h:39
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_scale_npp.c:123
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:462
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
NPPScaleContext::w
int w
New dimensions.
Definition: vf_scale_npp.c:141
AVFilter
Filter definition.
Definition: avfilter.h:165
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
config_props_ref
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:670
ret
ret
Definition: filter_design.txt:187
NPPScaleContext::format
enum AVPixelFormat format
Output sw format.
Definition: vf_scale_npp.c:146
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:149
cuda_check.h
NPPScaleContext::interp_algo
int interp_algo
Definition: vf_scale_npp.c:155
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:419
ff_scale_adjust_dimensions
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
NPPScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale_npp.c:148
av_get_pix_fmt
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
Definition: pixdesc.c:2592
NPPScaleStageContext::in_fmt
enum AVPixelFormat in_fmt
Definition: vf_scale_npp.c:65
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:643
nppscale_init
static av_cold int nppscale_init(AVFilterContext *ctx)
Definition: vf_scale_npp.c:283
AVFrame::height
int height
Definition: frame.h:389
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_scale_npp.c:124
NPPScaleContext::format_str
char * format_str
Definition: vf_scale_npp.c:150
VAR_DAR
@ VAR_DAR
Definition: vf_scale_npp.c:106
STAGE_INTERLEAVE
@ STAGE_INTERLEAVE
Definition: vf_scale_npp.c:59
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dummy
int dummy
Definition: motion.c:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
nppscale_process
static int(*const nppscale_process[])(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:763
nppscale_outputs
static const AVFilterPad nppscale_outputs[]
Definition: vf_scale_npp.c:995
NPPScaleContext::force_divisible_by
int force_divisible_by
Definition: vf_scale_npp.c:153
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
NPPScaleStageContext::width
int width
Definition: vf_scale_npp.c:69
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
VAR_S2R_MDAR
@ VAR_S2R_MDAR
Definition: vf_scale_npp.c:114
NPPScaleStageContext
Definition: vf_scale_npp.c:63
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
STAGE_DEINTERLEAVE
@ STAGE_DEINTERLEAVE
Definition: vf_scale_npp.c:57
VAR_T
@ VAR_T
Definition: vf_scale_npp.c:108
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:279
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
options
static const AVOption options[]
Definition: vf_scale_npp.c:953
nppscale_deinterleave
static int nppscale_deinterleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:686
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale_npp.c:103
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
deinterleaved_formats
static enum AVPixelFormat deinterleaved_formats[][2]
Definition: vf_scale_npp.c:52
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
NPPScaleContext::var_values
double var_values[VARS_NB]
Definition: vf_scale_npp.c:162
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
int
int
Definition: ffmpeg_filter.c:153
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
snprintf
#define snprintf
Definition: snprintf.h:34
NPPScaleContext::w_pexpr
AVExpr * w_pexpr
Definition: vf_scale_npp.c:159
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2580
VAR_N
@ VAR_N
Definition: vf_scale_npp.c:107