FFmpeg
vf_scale.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Bobby Bingham
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * scale video filter
24  */
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "scale_eval.h"
33 #include "video.h"
34 #include "libavutil/avstring.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/avassert.h"
43 #include "libswscale/swscale.h"
44 
45 static const char *const var_names[] = {
46  "in_w", "iw",
47  "in_h", "ih",
48  "out_w", "ow",
49  "out_h", "oh",
50  "a",
51  "sar",
52  "dar",
53  "hsub",
54  "vsub",
55  "ohsub",
56  "ovsub",
57  "n",
58  "t",
59  "pos",
60  "main_w",
61  "main_h",
62  "main_a",
63  "main_sar",
64  "main_dar", "mdar",
65  "main_hsub",
66  "main_vsub",
67  "main_n",
68  "main_t",
69  "main_pos",
70  NULL
71 };
72 
73 enum var_name {
99 };
100 
101 enum EvalMode {
105 };
106 
107 typedef struct ScaleContext {
108  const AVClass *class;
109  struct SwsContext *sws; ///< software scaler context
110  struct SwsContext *isws[2]; ///< software scaler context for interlaced material
112 
113  /**
114  * New dimensions. Special values are:
115  * 0 = original width/height
116  * -1 = keep original aspect
117  * -N = try to keep aspect but make sure it is divisible by N
118  */
119  int w, h;
120  char *size_str;
121  unsigned int flags; ///sws flags
122  double param[2]; // sws params
123 
124  int hsub, vsub; ///< chroma subsampling
125  int slice_y; ///< top of current output slice
126  int input_is_pal; ///< set to 1 if the input format is paletted
127  int output_is_pal; ///< set to 1 if the output format is paletted
129 
130  char *w_expr; ///< width expression string
131  char *h_expr; ///< height expression string
135 
136  char *flags_str;
137 
140 
141  int in_range;
143 
148 
151 
153 
154  int eval_mode; ///< expression evaluation mode
155 
156 } ScaleContext;
157 
159 
160 static int config_props(AVFilterLink *outlink);
161 
163 {
164  ScaleContext *scale = ctx->priv;
165  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
166 
167  if (!scale->w_pexpr && !scale->h_pexpr)
168  return AVERROR(EINVAL);
169 
170  if (scale->w_pexpr)
171  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
172  if (scale->h_pexpr)
173  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
174 
175  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
176  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
177  return AVERROR(EINVAL);
178  }
179 
180  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
181  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
182  return AVERROR(EINVAL);
183  }
184 
185  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
186  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
187  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
188  }
189 
190  if (ctx->filter != &ff_vf_scale2ref &&
191  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
192  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
193  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
194  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
195  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
196  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
197  vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] ||
198  vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] ||
199  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
200  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
201  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
202  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n");
203  return AVERROR(EINVAL);
204  }
205 
206  if (scale->eval_mode == EVAL_MODE_INIT &&
207  (vars_w[VAR_N] || vars_h[VAR_N] ||
208  vars_w[VAR_T] || vars_h[VAR_T] ||
209  vars_w[VAR_POS] || vars_h[VAR_POS] ||
210  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
211  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
212  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
213  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
214  return AVERROR(EINVAL);
215  }
216 
217  return 0;
218 }
219 
220 static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
221 {
222  ScaleContext *scale = ctx->priv;
223  int ret, is_inited = 0;
224  char *old_str_expr = NULL;
225  AVExpr *old_pexpr = NULL;
226 
227  if (str_expr) {
228  old_str_expr = av_strdup(str_expr);
229  if (!old_str_expr)
230  return AVERROR(ENOMEM);
231  av_opt_set(scale, var, args, 0);
232  }
233 
234  if (*pexpr_ptr) {
235  old_pexpr = *pexpr_ptr;
236  *pexpr_ptr = NULL;
237  is_inited = 1;
238  }
239 
240  ret = av_expr_parse(pexpr_ptr, args, var_names,
241  NULL, NULL, NULL, NULL, 0, ctx);
242  if (ret < 0) {
243  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args);
244  goto revert;
245  }
246 
247  ret = check_exprs(ctx);
248  if (ret < 0)
249  goto revert;
250 
251  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
252  goto revert;
253 
254  av_expr_free(old_pexpr);
255  old_pexpr = NULL;
256  av_freep(&old_str_expr);
257 
258  return 0;
259 
260 revert:
261  av_expr_free(*pexpr_ptr);
262  *pexpr_ptr = NULL;
263  if (old_str_expr) {
264  av_opt_set(scale, var, old_str_expr, 0);
265  av_free(old_str_expr);
266  }
267  if (old_pexpr)
268  *pexpr_ptr = old_pexpr;
269 
270  return ret;
271 }
272 
274 {
275  ScaleContext *scale = ctx->priv;
276  int ret;
277 
278  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
280  "Size and width/height expressions cannot be set at the same time.\n");
281  return AVERROR(EINVAL);
282  }
283 
284  if (scale->w_expr && !scale->h_expr)
285  FFSWAP(char *, scale->w_expr, scale->size_str);
286 
287  if (scale->size_str) {
288  char buf[32];
289  if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
291  "Invalid size '%s'\n", scale->size_str);
292  return ret;
293  }
294  snprintf(buf, sizeof(buf)-1, "%d", scale->w);
295  av_opt_set(scale, "w", buf, 0);
296  snprintf(buf, sizeof(buf)-1, "%d", scale->h);
297  av_opt_set(scale, "h", buf, 0);
298  }
299  if (!scale->w_expr)
300  av_opt_set(scale, "w", "iw", 0);
301  if (!scale->h_expr)
302  av_opt_set(scale, "h", "ih", 0);
303 
304  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
305  if (ret < 0)
306  return ret;
307 
308  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
309  if (ret < 0)
310  return ret;
311 
312  av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
313  scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
314 
315  scale->flags = 0;
316 
317  if (scale->flags_str) {
318  const AVClass *class = sws_get_class();
319  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
321  int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
322  if (ret < 0)
323  return ret;
324  }
325  scale->opts = *opts;
326  *opts = NULL;
327 
328  return 0;
329 }
330 
332 {
333  ScaleContext *scale = ctx->priv;
334  av_expr_free(scale->w_pexpr);
335  av_expr_free(scale->h_pexpr);
336  scale->w_pexpr = scale->h_pexpr = NULL;
337  sws_freeContext(scale->sws);
338  sws_freeContext(scale->isws[0]);
339  sws_freeContext(scale->isws[1]);
340  scale->sws = NULL;
341  av_dict_free(&scale->opts);
342 }
343 
345 {
347  enum AVPixelFormat pix_fmt;
348  int ret;
349 
350  if (ctx->inputs[0]) {
351  const AVPixFmtDescriptor *desc = NULL;
352  formats = NULL;
353  while ((desc = av_pix_fmt_desc_next(desc))) {
357  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
358  return ret;
359  }
360  }
361  if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats)) < 0)
362  return ret;
363  }
364  if (ctx->outputs[0]) {
365  const AVPixFmtDescriptor *desc = NULL;
366  formats = NULL;
367  while ((desc = av_pix_fmt_desc_next(desc))) {
371  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
372  return ret;
373  }
374  }
375  if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats)) < 0)
376  return ret;
377  }
378 
379  return 0;
380 }
381 
382 static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
383 {
384  if (!s)
385  s = "bt601";
386 
387  if (s && strstr(s, "bt709")) {
388  colorspace = AVCOL_SPC_BT709;
389  } else if (s && strstr(s, "fcc")) {
390  colorspace = AVCOL_SPC_FCC;
391  } else if (s && strstr(s, "smpte240m")) {
392  colorspace = AVCOL_SPC_SMPTE240M;
393  } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
394  colorspace = AVCOL_SPC_BT470BG;
395  } else if (s && strstr(s, "bt2020")) {
396  colorspace = AVCOL_SPC_BT2020_NCL;
397  }
398 
399  if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
400  colorspace = AVCOL_SPC_BT470BG;
401  }
402 
403  return sws_getCoefficients(colorspace);
404 }
405 
407 {
408  ScaleContext *scale = ctx->priv;
409  const char scale2ref = ctx->filter == &ff_vf_scale2ref;
410  const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0];
411  const AVFilterLink *outlink = ctx->outputs[0];
413  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
414  char *expr;
415  int eval_w, eval_h;
416  int ret;
417  double res;
418  const AVPixFmtDescriptor *main_desc;
419  const AVFilterLink *main_link;
420 
421  if (scale2ref) {
422  main_link = ctx->inputs[0];
423  main_desc = av_pix_fmt_desc_get(main_link->format);
424  }
425 
426  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
427  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
428  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
429  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
430  scale->var_values[VAR_A] = (double) inlink->w / inlink->h;
431  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
432  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
433  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
434  scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
435  scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
436  scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
437  scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
438 
439  if (scale2ref) {
440  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
441  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
442  scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h;
443  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
444  (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
447  scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w;
448  scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h;
449  }
450 
451  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
452  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
453 
454  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
455  if (isnan(res)) {
456  expr = scale->h_expr;
457  ret = AVERROR(EINVAL);
458  goto fail;
459  }
460  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res;
461 
462  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
463  if (isnan(res)) {
464  expr = scale->w_expr;
465  ret = AVERROR(EINVAL);
466  goto fail;
467  }
468  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
469 
470  scale->w = eval_w;
471  scale->h = eval_h;
472 
473  return 0;
474 
475 fail:
477  "Error when evaluating the expression '%s'.\n", expr);
478  return ret;
479 }
480 
481 static int config_props(AVFilterLink *outlink)
482 {
483  AVFilterContext *ctx = outlink->src;
484  AVFilterLink *inlink0 = outlink->src->inputs[0];
485  AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
486  outlink->src->inputs[1] :
487  outlink->src->inputs[0];
488  enum AVPixelFormat outfmt = outlink->format;
490  ScaleContext *scale = ctx->priv;
491  int ret;
492 
493  if ((ret = scale_eval_dimensions(ctx)) < 0)
494  goto fail;
495 
496  ff_scale_adjust_dimensions(inlink, &scale->w, &scale->h,
498  scale->force_divisible_by);
499 
500  if (scale->w > INT_MAX ||
501  scale->h > INT_MAX ||
502  (scale->h * inlink->w) > INT_MAX ||
503  (scale->w * inlink->h) > INT_MAX)
504  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
505 
506  outlink->w = scale->w;
507  outlink->h = scale->h;
508 
509  /* TODO: make algorithm configurable */
510 
511  scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL;
512  if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
514 
515  if (scale->sws)
516  sws_freeContext(scale->sws);
517  if (scale->isws[0])
518  sws_freeContext(scale->isws[0]);
519  if (scale->isws[1])
520  sws_freeContext(scale->isws[1]);
521  scale->isws[0] = scale->isws[1] = scale->sws = NULL;
522  if (inlink0->w == outlink->w &&
523  inlink0->h == outlink->h &&
524  !scale->out_color_matrix &&
525  scale->in_range == scale->out_range &&
526  inlink0->format == outlink->format)
527  ;
528  else {
529  struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
530  int i;
531 
532  for (i = 0; i < 3; i++) {
533  int in_v_chr_pos = scale->in_v_chr_pos, out_v_chr_pos = scale->out_v_chr_pos;
534  struct SwsContext **s = swscs[i];
535  *s = sws_alloc_context();
536  if (!*s)
537  return AVERROR(ENOMEM);
538 
539  av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
540  av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
541  av_opt_set_int(*s, "src_format", inlink0->format, 0);
542  av_opt_set_int(*s, "dstw", outlink->w, 0);
543  av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
544  av_opt_set_int(*s, "dst_format", outfmt, 0);
545  av_opt_set_int(*s, "sws_flags", scale->flags, 0);
546  av_opt_set_int(*s, "param0", scale->param[0], 0);
547  av_opt_set_int(*s, "param1", scale->param[1], 0);
548  if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
549  av_opt_set_int(*s, "src_range",
550  scale->in_range == AVCOL_RANGE_JPEG, 0);
551  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
552  av_opt_set_int(*s, "dst_range",
553  scale->out_range == AVCOL_RANGE_JPEG, 0);
554 
555  if (scale->opts) {
556  AVDictionaryEntry *e = NULL;
557  while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
558  if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
559  return ret;
560  }
561  }
562  /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
563  * MPEG-2 chroma positions are used by convention
564  * XXX: support other 4:2:0 pixel formats */
565  if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
566  in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
567  }
568 
569  if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
570  out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
571  }
572 
573  av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
574  av_opt_set_int(*s, "src_v_chr_pos", in_v_chr_pos, 0);
575  av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
576  av_opt_set_int(*s, "dst_v_chr_pos", out_v_chr_pos, 0);
577 
578  if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
579  return ret;
580  if (!scale->interlaced)
581  break;
582  }
583  }
584 
585  if (inlink0->sample_aspect_ratio.num){
586  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink0->w, outlink->w * inlink0->h}, inlink0->sample_aspect_ratio);
587  } else
588  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
589 
590  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
591  inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
592  inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
593  outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
594  outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
595  scale->flags);
596  return 0;
597 
598 fail:
599  return ret;
600 }
601 
602 static int config_props_ref(AVFilterLink *outlink)
603 {
604  AVFilterLink *inlink = outlink->src->inputs[1];
605 
606  outlink->w = inlink->w;
607  outlink->h = inlink->h;
608  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
609  outlink->time_base = inlink->time_base;
610  outlink->frame_rate = inlink->frame_rate;
611 
612  return 0;
613 }
614 
615 static int request_frame(AVFilterLink *outlink)
616 {
617  return ff_request_frame(outlink->src->inputs[0]);
618 }
619 
620 static int request_frame_ref(AVFilterLink *outlink)
621 {
622  return ff_request_frame(outlink->src->inputs[1]);
623 }
624 
625 static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
626 {
627  ScaleContext *scale = link->dst->priv;
628  const uint8_t *in[4];
629  uint8_t *out[4];
630  int in_stride[4],out_stride[4];
631  int i;
632 
633  for (i=0; i<4; i++) {
634  int vsub= ((i+1)&2) ? scale->vsub : 0;
635  ptrdiff_t in_offset = ((y>>vsub)+field) * cur_pic->linesize[i];
636  ptrdiff_t out_offset = field * out_buf->linesize[i];
637  in_stride[i] = cur_pic->linesize[i] * mul;
638  out_stride[i] = out_buf->linesize[i] * mul;
639  in[i] = FF_PTR_ADD(cur_pic->data[i], in_offset);
640  out[i] = FF_PTR_ADD(out_buf->data[i], out_offset);
641  }
642  if (scale->input_is_pal)
643  in[1] = cur_pic->data[1];
644  if (scale->output_is_pal)
645  out[1] = out_buf->data[1];
646 
647  return sws_scale(sws, in, in_stride, y/mul, h,
648  out,out_stride);
649 }
650 
651 static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
652 {
653  AVFilterContext *ctx = link->dst;
654  ScaleContext *scale = ctx->priv;
655  AVFilterLink *outlink = ctx->outputs[0];
656  AVFrame *out;
658  char buf[32];
659  int in_range;
660  int frame_changed;
661 
662  *frame_out = NULL;
663  if (in->colorspace == AVCOL_SPC_YCGCO)
664  av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
665 
666  frame_changed = in->width != link->w ||
667  in->height != link->h ||
668  in->format != link->format ||
671 
672  if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) {
673  int ret;
674  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
675 
676  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
677  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
678 
679  if (scale->eval_mode == EVAL_MODE_FRAME &&
680  !frame_changed &&
681  ctx->filter != &ff_vf_scale2ref &&
682  !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) &&
683  !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) &&
684  scale->w && scale->h)
685  goto scale;
686 
687  if (scale->eval_mode == EVAL_MODE_INIT) {
688  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
689  av_opt_set(scale, "w", buf, 0);
690  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
691  av_opt_set(scale, "h", buf, 0);
692 
693  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
694  if (ret < 0)
695  return ret;
696 
697  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
698  if (ret < 0)
699  return ret;
700  }
701 
702  if (ctx->filter == &ff_vf_scale2ref) {
703  scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
704  scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
705  scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
706  } else {
707  scale->var_values[VAR_N] = link->frame_count_out;
708  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
709  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
710  }
711 
712  link->dst->inputs[0]->format = in->format;
713  link->dst->inputs[0]->w = in->width;
714  link->dst->inputs[0]->h = in->height;
715 
716  link->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
717  link->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
718 
719  if ((ret = config_props(outlink)) < 0)
720  return ret;
721  }
722 
723 scale:
724  if (!scale->sws) {
725  *frame_out = in;
726  return 0;
727  }
728 
729  scale->hsub = desc->log2_chroma_w;
730  scale->vsub = desc->log2_chroma_h;
731 
732  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
733  if (!out) {
734  av_frame_free(&in);
735  return AVERROR(ENOMEM);
736  }
737  *frame_out = out;
738 
740  out->width = outlink->w;
741  out->height = outlink->h;
742 
743  if (scale->output_is_pal)
744  avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
745 
746  in_range = in->color_range;
747 
748  if ( scale->in_color_matrix
749  || scale->out_color_matrix
750  || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
751  || in_range != AVCOL_RANGE_UNSPECIFIED
752  || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
753  int in_full, out_full, brightness, contrast, saturation;
754  const int *inv_table, *table;
755 
756  sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
757  (int **)&table, &out_full,
759 
760  if (scale->in_color_matrix)
761  inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
762  if (scale->out_color_matrix)
764  else if (scale->in_color_matrix)
765  table = inv_table;
766 
767  if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
768  in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
769  else if (in_range != AVCOL_RANGE_UNSPECIFIED)
770  in_full = (in_range == AVCOL_RANGE_JPEG);
771  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
772  out_full = (scale->out_range == AVCOL_RANGE_JPEG);
773 
774  sws_setColorspaceDetails(scale->sws, inv_table, in_full,
775  table, out_full,
777  if (scale->isws[0])
778  sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
779  table, out_full,
781  if (scale->isws[1])
782  sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
783  table, out_full,
785 
786  out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
787  }
788 
789  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
790  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
791  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
792  INT_MAX);
793 
794  if (scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)) {
795  scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
796  scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
797  } else if (scale->nb_slices) {
798  int i, slice_h, slice_start, slice_end = 0;
799  const int nb_slices = FFMIN(scale->nb_slices, link->h);
800  for (i = 0; i < nb_slices; i++) {
801  slice_start = slice_end;
802  slice_end = (link->h * (i+1)) / nb_slices;
803  slice_h = slice_end - slice_start;
804  scale_slice(link, out, in, scale->sws, slice_start, slice_h, 1, 0);
805  }
806  } else {
807  scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
808  }
809 
810  av_frame_free(&in);
811  return 0;
812 }
813 
815 {
816  AVFilterContext *ctx = link->dst;
817  AVFilterLink *outlink = ctx->outputs[0];
818  AVFrame *out;
819  int ret;
820 
821  ret = scale_frame(link, in, &out);
822  if (out)
823  return ff_filter_frame(outlink, out);
824 
825  return ret;
826 }
827 
829 {
830  ScaleContext *scale = link->dst->priv;
831  AVFilterLink *outlink = link->dst->outputs[1];
832  int frame_changed;
833 
834  frame_changed = in->width != link->w ||
835  in->height != link->h ||
836  in->format != link->format ||
839 
840  if (frame_changed) {
841  link->format = in->format;
842  link->w = in->width;
843  link->h = in->height;
846 
847  config_props_ref(outlink);
848  }
849 
850  if (scale->eval_mode == EVAL_MODE_FRAME) {
851  scale->var_values[VAR_N] = link->frame_count_out;
852  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
853  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
854  }
855 
856  return ff_filter_frame(outlink, in);
857 }
858 
859 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
860  char *res, int res_len, int flags)
861 {
862  ScaleContext *scale = ctx->priv;
863  char *str_expr;
864  AVExpr **pexpr_ptr;
865  int ret, w, h;
866 
867  w = !strcmp(cmd, "width") || !strcmp(cmd, "w");
868  h = !strcmp(cmd, "height") || !strcmp(cmd, "h");
869 
870  if (w || h) {
871  str_expr = w ? scale->w_expr : scale->h_expr;
872  pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr;
873 
874  ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args);
875  } else
876  ret = AVERROR(ENOSYS);
877 
878  if (ret < 0)
879  av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n");
880 
881  return ret;
882 }
883 
884 static const AVClass *child_class_iterate(void **iter)
885 {
886  const AVClass *c = *iter ? NULL : sws_get_class();
887  *iter = (void*)(uintptr_t)c;
888  return c;
889 }
890 
891 #define OFFSET(x) offsetof(ScaleContext, x)
892 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
893 #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
894 
895 static const AVOption scale_options[] = {
896  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
897  { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
898  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
899  { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
900  { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
901  { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
902  { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
903  { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
904  { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS, "color" },
905  { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS, "color"},
906  { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .str = "auto" }, 0, 0, FLAGS, "color" },
907  { "bt601", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt601" }, 0, 0, FLAGS, "color" },
908  { "bt470", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt470" }, 0, 0, FLAGS, "color" },
909  { "smpte170m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte170m" }, 0, 0, FLAGS, "color" },
910  { "bt709", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt709" }, 0, 0, FLAGS, "color" },
911  { "fcc", NULL, 0, AV_OPT_TYPE_CONST, { .str = "fcc" }, 0, 0, FLAGS, "color" },
912  { "smpte240m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte240m" }, 0, 0, FLAGS, "color" },
913  { "bt2020", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt2020" }, 0, 0, FLAGS, "color" },
914  { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
915  { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
916  { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
917  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
918  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
919  { "limited",NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
920  { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
921  { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
922  { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
923  { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
924  { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
925  { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
926  { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
927  { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
928  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
929  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
930  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
931  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
932  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
933  { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
934  { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
935  { "nb_slices", "set the number of slices (debug purpose only)", OFFSET(nb_slices), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
936  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
937  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
938  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
939  { NULL }
940 };
941 
942 static const AVClass scale_class = {
943  .class_name = "scale",
944  .item_name = av_default_item_name,
945  .option = scale_options,
946  .version = LIBAVUTIL_VERSION_INT,
947  .category = AV_CLASS_CATEGORY_FILTER,
948  .child_class_iterate = child_class_iterate,
949 };
950 
952  {
953  .name = "default",
954  .type = AVMEDIA_TYPE_VIDEO,
955  .filter_frame = filter_frame,
956  },
957  { NULL }
958 };
959 
961  {
962  .name = "default",
963  .type = AVMEDIA_TYPE_VIDEO,
964  .config_props = config_props,
965  },
966  { NULL }
967 };
968 
970  .name = "scale",
971  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
972  .init_dict = init_dict,
973  .uninit = uninit,
974  .query_formats = query_formats,
975  .priv_size = sizeof(ScaleContext),
976  .priv_class = &scale_class,
980 };
981 
982 static const AVClass scale2ref_class = {
983  .class_name = "scale2ref",
984  .item_name = av_default_item_name,
985  .option = scale_options,
986  .version = LIBAVUTIL_VERSION_INT,
987  .category = AV_CLASS_CATEGORY_FILTER,
988  .child_class_iterate = child_class_iterate,
989 };
990 
992  {
993  .name = "default",
994  .type = AVMEDIA_TYPE_VIDEO,
995  .filter_frame = filter_frame,
996  },
997  {
998  .name = "ref",
999  .type = AVMEDIA_TYPE_VIDEO,
1000  .filter_frame = filter_frame_ref,
1001  },
1002  { NULL }
1003 };
1004 
1006  {
1007  .name = "default",
1008  .type = AVMEDIA_TYPE_VIDEO,
1009  .config_props = config_props,
1010  .request_frame= request_frame,
1011  },
1012  {
1013  .name = "ref",
1014  .type = AVMEDIA_TYPE_VIDEO,
1015  .config_props = config_props_ref,
1016  .request_frame= request_frame_ref,
1017  },
1018  { NULL }
1019 };
1020 
1022  .name = "scale2ref",
1023  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
1024  .init_dict = init_dict,
1025  .uninit = uninit,
1026  .query_formats = query_formats,
1027  .priv_size = sizeof(ScaleContext),
1028  .priv_class = &scale2ref_class,
1032 };
filter_frame_ref
static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:828
ScaleContext::param
double param[2]
sws flags
Definition: vf_scale.c:122
VAR_S2R_MAIN_SAR
@ VAR_S2R_MAIN_SAR
Definition: vf_scale.c:91
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
VAR_S2R_MAIN_A
@ VAR_S2R_MAIN_A
Definition: vf_scale.c:90
VAR_HSUB
@ VAR_HSUB
Definition: vf_scale.c:81
config_props_ref
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale.c:602
SwsContext::saturation
int saturation
Definition: swscale_internal.h:421
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:187
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:530
sws_isSupportedOutput
#define sws_isSupportedOutput(x)
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
TFLAGS
#define TFLAGS
Definition: vf_scale.c:893
sws_isSupportedInput
#define sws_isSupportedInput(x)
check_exprs
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale.c:162
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ScaleContext::input_is_pal
int input_is_pal
set to 1 if the input format is paletted
Definition: vf_scale.c:126
out
FILE * out
Definition: movenc.c:54
ScaleContext
Definition: vf_scale.c:107
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
ScaleContext::flags
unsigned int flags
Definition: vf_scale.c:121
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2541
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_scale.c:344
ScaleContext::force_divisible_by
int force_divisible_by
Definition: vf_scale.c:150
avfilter_vf_scale2ref_outputs
static const AVFilterPad avfilter_vf_scale2ref_outputs[]
Definition: vf_scale.c:1005
FLAGS
#define FLAGS
Definition: vf_scale.c:892
ScaleContext::flags_str
char * flags_str
Definition: vf_scale.c:136
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:541
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:396
AVFrame::width
int width
Definition: frame.h:361
w
uint8_t w
Definition: llviddspenc.c:39
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
VAR_A
@ VAR_A
Definition: vf_scale.c:78
request_frame_ref
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale.c:620
AVOption
AVOption.
Definition: opt.h:248
scale_parse_expr
static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale.c:220
scale2ref_class
static const AVClass scale2ref_class
Definition: vf_scale.c:982
table
static const uint16_t table[]
Definition: prosumer.c:206
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale.c:615
av_pix_fmt_desc_next
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
Definition: pixdesc.c:2548
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:395
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
VAR_S2R_MAIN_HSUB
@ VAR_S2R_MAIN_HSUB
Definition: vf_scale.c:93
ScaleContext::var_values
double var_values[VARS_NB]
Definition: vf_scale.c:134
ScaleContext::out_range
int out_range
Definition: vf_scale.c:142
VAR_S2R_MDAR
@ VAR_S2R_MDAR
Definition: vf_scale.c:92
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:197
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_scale.c:103
VAR_S2R_MAIN_H
@ VAR_S2R_MAIN_H
Definition: vf_scale.c:89
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:745
AVDictionary
Definition: dict.c:30
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
ScaleContext::in_h_chr_pos
int in_h_chr_pos
Definition: vf_scale.c:146
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale.c:77
ScaleContext::opts
AVDictionary * opts
Definition: vf_scale.c:111
video.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
ScaleContext::nb_slices
int nb_slices
Definition: vf_scale.c:152
VAR_S2R_MAIN_POS
@ VAR_S2R_MAIN_POS
Definition: vf_scale.c:97
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:508
VAR_DAR
@ VAR_DAR
Definition: vf_scale.c:80
avfilter_vf_scale_inputs
static const AVFilterPad avfilter_vf_scale_inputs[]
Definition: vf_scale.c:951
fail
#define fail()
Definition: checkasm.h:134
VARS_NB
@ VARS_NB
Definition: vf_scale.c:98
ScaleContext::isws
struct SwsContext * isws[2]
software scaler context for interlaced material
Definition: vf_scale.c:110
ScaleContext::eval_mode
int eval_mode
expression evaluation mode
Definition: vf_scale.c:154
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale.c:75
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_scale.c:104
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
OFFSET
#define OFFSET(x)
Definition: vf_scale.c:891
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
avassert.h
TS2T
#define TS2T(ts, tb)
Definition: internal.h:209
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
av_cold
#define av_cold
Definition: attributes.h:90
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
var_name
var_name
Definition: setts_bsf.c:50
ScaleContext::sws
struct SwsContext * sws
software scaler context
Definition: vf_scale.c:109
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
s
#define s(width, name)
Definition: cbs_vp9.c:257
VAR_OH
@ VAR_OH
Definition: vf_scale.c:77
VAR_S2R_MAIN_W
@ VAR_S2R_MAIN_W
Definition: vf_scale.c:88
SwsContext::brightness
int brightness
Definition: swscale_internal.h:421
scale_frame
static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
Definition: vf_scale.c:651
ScaleContext::slice_y
int slice_y
top of current output slice
Definition: vf_scale.c:125
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:557
AVDictionaryEntry::key
char * key
Definition: dict.h:82
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
av_expr_count_vars
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:756
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:459
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2031
init_dict
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
Definition: vf_scale.c:273
VAR_OVSUB
@ VAR_OVSUB
Definition: vf_scale.c:84
ScaleContext::in_color_matrix
char * in_color_matrix
Definition: vf_scale.c:138
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
ctx
AVFormatContext * ctx
Definition: movenc.c:48
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_scale.c:859
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:40
AVExpr
Definition: eval.c:157
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:81
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SwsContext::contrast
int contrast
Definition: swscale_internal.h:421
ScaleContext::w_pexpr
AVExpr * w_pexpr
Definition: vf_scale.c:132
avpriv_set_systematic_pal2
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:175
NAN
#define NAN
Definition: mathematics.h:64
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
mul
static float mul(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:40
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1661
ScaleContext::out_h_chr_pos
int out_h_chr_pos
Definition: vf_scale.c:144
opts
AVDictionary * opts
Definition: movenc.c:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
ScaleContext::out_v_chr_pos
int out_v_chr_pos
Definition: vf_scale.c:145
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:536
VAR_T
@ VAR_T
Definition: vf_scale.c:86
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
ScaleContext::in_range
int in_range
Definition: vf_scale.c:141
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:341
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale.c:74
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:325
parseutils.h
sws_alloc_context
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
Definition: utils.c:1086
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:105
ScaleContext::h_pexpr
AVExpr * h_pexpr
Definition: vf_scale.c:133
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:511
sws_setColorspaceDetails
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
Definition: utils.c:865
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:95
ff_vf_scale2ref
const AVFilter ff_vf_scale2ref
Definition: vf_scale.c:158
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:542
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
AV_OPT_SEARCH_FAKE_OBJ
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poin...
Definition: opt.h:568
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:37
VAR_IW
@ VAR_IW
Definition: vf_scale.c:74
SWS_PARAM_DEFAULT
#define SWS_PARAM_DEFAULT
Definition: swscale.h:73
eval.h
VAR_IH
@ VAR_IH
Definition: vf_scale.c:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
child_class_iterate
static const AVClass * child_class_iterate(void **iter)
Definition: vf_scale.c:884
ScaleContext::w
int w
New dimensions.
Definition: vf_scale.c:119
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:376
scale_eval.h
ScaleContext::hsub
int hsub
Definition: vf_scale.c:124
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale.c:76
av_pix_fmt_desc_get_id
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
Definition: pixdesc.c:2560
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:814
av_parse_video_size
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:148
internal.h
VAR_POS
@ VAR_POS
Definition: vf_scale.c:87
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:510
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:433
ScaleContext::vsub
int vsub
chroma subsampling
Definition: vf_scale.c:124
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_scale.c:481
interlaced
uint8_t interlaced
Definition: mxfenc.c:2208
ScaleContext::output_is_pal
int output_is_pal
set to 1 if the output format is paletted
Definition: vf_scale.c:127
VAR_SAR
@ VAR_SAR
Definition: vf_scale.c:79
sws_isSupportedEndiannessConversion
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)
Definition: utils.c:282
i
int i
Definition: input.c:407
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:513
VAR_S2R_MAIN_N
@ VAR_S2R_MAIN_N
Definition: vf_scale.c:95
internal.h
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:502
EvalMode
EvalMode
Definition: af_volume.h:39
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:505
ScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale.c:131
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:559
avfilter_vf_scale_outputs
static const AVFilterPad avfilter_vf_scale_outputs[]
Definition: vf_scale.c:960
AVFilter
Filter definition.
Definition: avfilter.h:145
av_opt_eval_flags
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
parse_yuv_type
static const int * parse_yuv_type(const char *s, enum AVColorSpace colorspace)
Definition: vf_scale.c:382
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:391
sws_getColorspaceDetails
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
Definition: utils.c:1000
ff_scale_adjust_dimensions
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
sws_init_context
av_warn_unused_result int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
Definition: utils.c:1172
VAR_S2R_MAIN_T
@ VAR_S2R_MAIN_T
Definition: vf_scale.c:96
ScaleContext::out_color_matrix
char * out_color_matrix
Definition: vf_scale.c:139
scale_eval_dimensions
static int scale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale.c:406
var_names
static const char *const var_names[]
Definition: vf_scale.c:45
AVFrame::height
int height
Definition: frame.h:361
VAR_S2R_MAIN_DAR
@ VAR_S2R_MAIN_DAR
Definition: vf_scale.c:92
scale_options
static const AVOption scale_options[]
Definition: vf_scale.c:895
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2241
AVRational::den
int den
Denominator.
Definition: rational.h:60
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:507
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_scale.c:331
ScaleContext::force_original_aspect_ratio
int force_original_aspect_ratio
Definition: vf_scale.c:149
avfilter_vf_scale2ref_inputs
static const AVFilterPad avfilter_vf_scale2ref_inputs[]
Definition: vf_scale.c:991
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVFilterContext
An instance of a filter.
Definition: avfilter.h:333
VAR_OW
@ VAR_OW
Definition: vf_scale.c:76
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:259
desc
const char * desc
Definition: libsvtav1.c:79
VAR_VSUB
@ VAR_VSUB
Definition: vf_scale.c:82
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
sws_getCoefficients
const int * sws_getCoefficients(int colorspace)
Return a pointer to yuv<->rgb coefficients for the given colorspace suitable for sws_setColorspaceDet...
Definition: yuv2rgb.c:63
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:70
ScaleContext::interlaced
int interlaced
Definition: vf_scale.c:128
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:81
VAR_N
@ VAR_N
Definition: vf_scale.c:85
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
scale_class
static const AVClass scale_class
Definition: vf_scale.c:942
ScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale.c:130
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_scale.c:102
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
h
h
Definition: vp9dsp_template.c:2038
AVDictionaryEntry::value
char * value
Definition: dict.h:83
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
VAR_OHSUB
@ VAR_OHSUB
Definition: vf_scale.c:83
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:504
int
int
Definition: ffmpeg_filter.c:156
SwsContext
Definition: swscale_internal.h:283
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:121
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_vf_scale
const AVFilter ff_vf_scale
Definition: vf_scale.c:969
snprintf
#define snprintf
Definition: snprintf.h:34
ScaleContext::size_str
char * size_str
Definition: vf_scale.c:120
VAR_S2R_MAIN_VSUB
@ VAR_S2R_MAIN_VSUB
Definition: vf_scale.c:94
scale_slice
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
Definition: vf_scale.c:625
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:90
swscale.h
ScaleContext::h
int h
Definition: vf_scale.c:119
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2461
ScaleContext::in_v_chr_pos
int in_v_chr_pos
Definition: vf_scale.c:147
SwsContext::param
double param[2]
Input parameters for scaling algorithms that need them.
Definition: swscale_internal.h:314