FFmpeg
vf_scale.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Bobby Bingham
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * scale video filter
24  */
25 
26 #include <float.h>
27 #include <stdio.h>
28 #include <string.h>
29 
30 #include "avfilter.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "scale_eval.h"
34 #include "video.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/parseutils.h"
41 #include "libavutil/pixdesc.h"
42 #include "libavutil/imgutils.h"
43 #include "libswscale/swscale.h"
44 
45 static const char *const var_names[] = {
46  "in_w", "iw",
47  "in_h", "ih",
48  "out_w", "ow",
49  "out_h", "oh",
50  "a",
51  "sar",
52  "dar",
53  "hsub",
54  "vsub",
55  "ohsub",
56  "ovsub",
57  "n",
58  "t",
59 #if FF_API_FRAME_PKT
60  "pos",
61 #endif
62  "main_w",
63  "main_h",
64  "main_a",
65  "main_sar",
66  "main_dar", "mdar",
67  "main_hsub",
68  "main_vsub",
69  "main_n",
70  "main_t",
71  "main_pos",
72  NULL
73 };
74 
75 enum var_name {
89 #if FF_API_FRAME_PKT
90  VAR_POS,
91 #endif
103 };
104 
105 enum EvalMode {
109 };
110 
111 typedef struct ScaleContext {
112  const AVClass *class;
113  struct SwsContext *sws; ///< software scaler context
114  struct SwsContext *isws[2]; ///< software scaler context for interlaced material
115  // context used for forwarding options to sws
117 
118  /**
119  * New dimensions. Special values are:
120  * 0 = original width/height
121  * -1 = keep original aspect
122  * -N = try to keep aspect but make sure it is divisible by N
123  */
124  int w, h;
125  char *size_str;
126  double param[2]; // sws params
127 
128  int hsub, vsub; ///< chroma subsampling
129  int slice_y; ///< top of current output slice
130  int input_is_pal; ///< set to 1 if the input format is paletted
131  int output_is_pal; ///< set to 1 if the output format is paletted
133 
134  char *w_expr; ///< width expression string
135  char *h_expr; ///< height expression string
139 
140  char *flags_str;
141 
144 
145  int in_range;
148 
153 
156 
157  int eval_mode; ///< expression evaluation mode
158 
159 } ScaleContext;
160 
162 
163 static int config_props(AVFilterLink *outlink);
164 
166 {
167  ScaleContext *scale = ctx->priv;
168  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
169 
170  if (!scale->w_pexpr && !scale->h_pexpr)
171  return AVERROR(EINVAL);
172 
173  if (scale->w_pexpr)
174  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
175  if (scale->h_pexpr)
176  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
177 
178  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
179  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
180  return AVERROR(EINVAL);
181  }
182 
183  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
184  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
185  return AVERROR(EINVAL);
186  }
187 
188  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
189  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
190  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
191  }
192 
193  if (ctx->filter != &ff_vf_scale2ref &&
194  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
195  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
196  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
197  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
198  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
199  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
200  vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] ||
201  vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] ||
202  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
203  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
204  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
205  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n");
206  return AVERROR(EINVAL);
207  }
208 
209  if (scale->eval_mode == EVAL_MODE_INIT &&
210  (vars_w[VAR_N] || vars_h[VAR_N] ||
211  vars_w[VAR_T] || vars_h[VAR_T] ||
213  vars_w[VAR_POS] || vars_h[VAR_POS] ||
214 #endif
215  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
216  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
217  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
218  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
219  return AVERROR(EINVAL);
220  }
221 
222  return 0;
223 }
224 
225 static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
226 {
227  ScaleContext *scale = ctx->priv;
228  int ret, is_inited = 0;
229  char *old_str_expr = NULL;
230  AVExpr *old_pexpr = NULL;
231 
232  if (str_expr) {
233  old_str_expr = av_strdup(str_expr);
234  if (!old_str_expr)
235  return AVERROR(ENOMEM);
236  av_opt_set(scale, var, args, 0);
237  }
238 
239  if (*pexpr_ptr) {
240  old_pexpr = *pexpr_ptr;
241  *pexpr_ptr = NULL;
242  is_inited = 1;
243  }
244 
245  ret = av_expr_parse(pexpr_ptr, args, var_names,
246  NULL, NULL, NULL, NULL, 0, ctx);
247  if (ret < 0) {
248  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args);
249  goto revert;
250  }
251 
252  ret = check_exprs(ctx);
253  if (ret < 0)
254  goto revert;
255 
256  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
257  goto revert;
258 
259  av_expr_free(old_pexpr);
260  old_pexpr = NULL;
261  av_freep(&old_str_expr);
262 
263  return 0;
264 
265 revert:
266  av_expr_free(*pexpr_ptr);
267  *pexpr_ptr = NULL;
268  if (old_str_expr) {
269  av_opt_set(scale, var, old_str_expr, 0);
270  av_free(old_str_expr);
271  }
272  if (old_pexpr)
273  *pexpr_ptr = old_pexpr;
274 
275  return ret;
276 }
277 
279 {
280  ScaleContext *scale = ctx->priv;
281  int ret;
282 
283  scale->sws_opts = sws_alloc_context();
284  if (!scale->sws_opts)
285  return AVERROR(ENOMEM);
286 
287  // set threads=0, so we can later check whether the user modified it
288  ret = av_opt_set_int(scale->sws_opts, "threads", 0, 0);
289  if (ret < 0)
290  return ret;
291 
292  return 0;
293 }
294 
296 {
297  ScaleContext *scale = ctx->priv;
298  int64_t threads;
299  int ret;
300 
301  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
303  "Size and width/height expressions cannot be set at the same time.\n");
304  return AVERROR(EINVAL);
305  }
306 
307  if (scale->w_expr && !scale->h_expr)
308  FFSWAP(char *, scale->w_expr, scale->size_str);
309 
310  if (scale->size_str) {
311  char buf[32];
312  if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
314  "Invalid size '%s'\n", scale->size_str);
315  return ret;
316  }
317  snprintf(buf, sizeof(buf)-1, "%d", scale->w);
318  av_opt_set(scale, "w", buf, 0);
319  snprintf(buf, sizeof(buf)-1, "%d", scale->h);
320  av_opt_set(scale, "h", buf, 0);
321  }
322  if (!scale->w_expr)
323  av_opt_set(scale, "w", "iw", 0);
324  if (!scale->h_expr)
325  av_opt_set(scale, "h", "ih", 0);
326 
327  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
328  if (ret < 0)
329  return ret;
330 
331  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
332  if (ret < 0)
333  return ret;
334 
335  av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
336  scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
337 
338  if (scale->flags_str && *scale->flags_str) {
339  ret = av_opt_set(scale->sws_opts, "sws_flags", scale->flags_str, 0);
340  if (ret < 0)
341  return ret;
342  }
343 
344  for (int i = 0; i < FF_ARRAY_ELEMS(scale->param); i++)
345  if (scale->param[i] != DBL_MAX) {
346  ret = av_opt_set_double(scale->sws_opts, i ? "param1" : "param0",
347  scale->param[i], 0);
348  if (ret < 0)
349  return ret;
350  }
351 
352  // use generic thread-count if the user did not set it explicitly
353  ret = av_opt_get_int(scale->sws_opts, "threads", 0, &threads);
354  if (ret < 0)
355  return ret;
356  if (!threads)
357  av_opt_set_int(scale->sws_opts, "threads", ff_filter_get_nb_threads(ctx), 0);
358 
359  scale->in_frame_range = AVCOL_RANGE_UNSPECIFIED;
360 
361  return 0;
362 }
363 
365 {
366  ScaleContext *scale = ctx->priv;
367  av_expr_free(scale->w_pexpr);
368  av_expr_free(scale->h_pexpr);
369  scale->w_pexpr = scale->h_pexpr = NULL;
370  sws_freeContext(scale->sws_opts);
371  sws_freeContext(scale->sws);
372  sws_freeContext(scale->isws[0]);
373  sws_freeContext(scale->isws[1]);
374  scale->sws = NULL;
375 }
376 
378 {
380  const AVPixFmtDescriptor *desc;
381  enum AVPixelFormat pix_fmt;
382  int ret;
383 
384  desc = NULL;
385  formats = NULL;
386  while ((desc = av_pix_fmt_desc_next(desc))) {
390  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
391  return ret;
392  }
393  }
394  if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats)) < 0)
395  return ret;
396 
397  desc = NULL;
398  formats = NULL;
399  while ((desc = av_pix_fmt_desc_next(desc))) {
403  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
404  return ret;
405  }
406  }
407  if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats)) < 0)
408  return ret;
409 
410  return 0;
411 }
412 
413 static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
414 {
415  if (!s)
416  s = "bt601";
417 
418  if (s && strstr(s, "bt709")) {
419  colorspace = AVCOL_SPC_BT709;
420  } else if (s && strstr(s, "fcc")) {
421  colorspace = AVCOL_SPC_FCC;
422  } else if (s && strstr(s, "smpte240m")) {
423  colorspace = AVCOL_SPC_SMPTE240M;
424  } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
425  colorspace = AVCOL_SPC_BT470BG;
426  } else if (s && strstr(s, "bt2020")) {
427  colorspace = AVCOL_SPC_BT2020_NCL;
428  }
429 
430  if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
431  colorspace = AVCOL_SPC_BT470BG;
432  }
433 
434  return sws_getCoefficients(colorspace);
435 }
436 
438 {
439  ScaleContext *scale = ctx->priv;
440  const char scale2ref = ctx->filter == &ff_vf_scale2ref;
441  const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0];
442  const AVFilterLink *outlink = ctx->outputs[0];
444  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
445  char *expr;
446  int eval_w, eval_h;
447  int ret;
448  double res;
449  const AVPixFmtDescriptor *main_desc;
450  const AVFilterLink *main_link;
451 
452  if (scale2ref) {
453  main_link = ctx->inputs[0];
454  main_desc = av_pix_fmt_desc_get(main_link->format);
455  }
456 
457  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
458  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
459  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
460  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
461  scale->var_values[VAR_A] = (double) inlink->w / inlink->h;
462  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
463  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
464  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
465  scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
466  scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
467  scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
468  scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
469 
470  if (scale2ref) {
471  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
472  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
473  scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h;
474  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
475  (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
476  scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
477  scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
478  scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w;
479  scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h;
480  }
481 
482  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
483  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
484 
485  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
486  if (isnan(res)) {
487  expr = scale->h_expr;
488  ret = AVERROR(EINVAL);
489  goto fail;
490  }
491  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res;
492 
493  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
494  if (isnan(res)) {
495  expr = scale->w_expr;
496  ret = AVERROR(EINVAL);
497  goto fail;
498  }
499  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
500 
501  scale->w = eval_w;
502  scale->h = eval_h;
503 
504  return 0;
505 
506 fail:
508  "Error when evaluating the expression '%s'.\n", expr);
509  return ret;
510 }
511 
512 static int config_props(AVFilterLink *outlink)
513 {
514  AVFilterContext *ctx = outlink->src;
515  AVFilterLink *inlink0 = outlink->src->inputs[0];
516  AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
517  outlink->src->inputs[1] :
518  outlink->src->inputs[0];
519  enum AVPixelFormat outfmt = outlink->format;
521  ScaleContext *scale = ctx->priv;
522  uint8_t *flags_val = NULL;
523  int ret;
524 
525  if ((ret = scale_eval_dimensions(ctx)) < 0)
526  goto fail;
527 
528  outlink->w = scale->w;
529  outlink->h = scale->h;
530 
531  ff_scale_adjust_dimensions(inlink, &outlink->w, &outlink->h,
532  scale->force_original_aspect_ratio,
533  scale->force_divisible_by);
534 
535  if (outlink->w > INT_MAX ||
536  outlink->h > INT_MAX ||
537  (outlink->h * inlink->w) > INT_MAX ||
538  (outlink->w * inlink->h) > INT_MAX)
539  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
540 
541  /* TODO: make algorithm configurable */
542 
543  scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL;
544  if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
545  scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL;
546 
547  if (scale->sws)
548  sws_freeContext(scale->sws);
549  if (scale->isws[0])
550  sws_freeContext(scale->isws[0]);
551  if (scale->isws[1])
552  sws_freeContext(scale->isws[1]);
553  scale->isws[0] = scale->isws[1] = scale->sws = NULL;
554  if (inlink0->w == outlink->w &&
555  inlink0->h == outlink->h &&
556  !scale->out_color_matrix &&
557  scale->in_range == scale->out_range &&
558  inlink0->format == outlink->format)
559  ;
560  else {
561  struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
562  int i;
563 
564  for (i = 0; i < 3; i++) {
565  int in_v_chr_pos = scale->in_v_chr_pos, out_v_chr_pos = scale->out_v_chr_pos;
566  struct SwsContext *const s = sws_alloc_context();
567  if (!s)
568  return AVERROR(ENOMEM);
569  *swscs[i] = s;
570 
571  ret = av_opt_copy(s, scale->sws_opts);
572  if (ret < 0)
573  return ret;
574 
575  av_opt_set_int(s, "srcw", inlink0 ->w, 0);
576  av_opt_set_int(s, "srch", inlink0 ->h >> !!i, 0);
577  av_opt_set_int(s, "src_format", inlink0->format, 0);
578  av_opt_set_int(s, "dstw", outlink->w, 0);
579  av_opt_set_int(s, "dsth", outlink->h >> !!i, 0);
580  av_opt_set_int(s, "dst_format", outfmt, 0);
581  if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
582  av_opt_set_int(s, "src_range",
583  scale->in_range == AVCOL_RANGE_JPEG, 0);
584  else if (scale->in_frame_range != AVCOL_RANGE_UNSPECIFIED)
585  av_opt_set_int(s, "src_range",
586  scale->in_frame_range == AVCOL_RANGE_JPEG, 0);
587  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
588  av_opt_set_int(s, "dst_range",
589  scale->out_range == AVCOL_RANGE_JPEG, 0);
590 
591  /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
592  * MPEG-2 chroma positions are used by convention
593  * XXX: support other 4:2:0 pixel formats */
594  if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
595  in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
596  }
597 
598  if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
599  out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
600  }
601 
602  av_opt_set_int(s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
603  av_opt_set_int(s, "src_v_chr_pos", in_v_chr_pos, 0);
604  av_opt_set_int(s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
605  av_opt_set_int(s, "dst_v_chr_pos", out_v_chr_pos, 0);
606 
607  if ((ret = sws_init_context(s, NULL, NULL)) < 0)
608  return ret;
609  if (!scale->interlaced)
610  break;
611  }
612  }
613 
614  if (inlink0->sample_aspect_ratio.num){
615  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink0->w, outlink->w * inlink0->h}, inlink0->sample_aspect_ratio);
616  } else
617  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
618 
619  if (scale->sws)
620  av_opt_get(scale->sws, "sws_flags", 0, &flags_val);
621 
622  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:%s\n",
623  inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
624  inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
625  outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
626  outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
627  flags_val);
628  av_freep(&flags_val);
629 
630  return 0;
631 
632 fail:
633  return ret;
634 }
635 
636 static int config_props_ref(AVFilterLink *outlink)
637 {
638  AVFilterLink *inlink = outlink->src->inputs[1];
639 
640  outlink->w = inlink->w;
641  outlink->h = inlink->h;
642  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
643  outlink->time_base = inlink->time_base;
644  outlink->frame_rate = inlink->frame_rate;
645 
646  return 0;
647 }
648 
649 static int request_frame(AVFilterLink *outlink)
650 {
651  return ff_request_frame(outlink->src->inputs[0]);
652 }
653 
654 static int request_frame_ref(AVFilterLink *outlink)
655 {
656  return ff_request_frame(outlink->src->inputs[1]);
657 }
658 
659 static void frame_offset(AVFrame *frame, int dir, int is_pal)
660 {
661  for (int i = 0; i < 4 && frame->data[i]; i++) {
662  if (i == 1 && is_pal)
663  break;
664  frame->data[i] += frame->linesize[i] * dir;
665  }
666 }
667 
669  int field)
670 {
671  int orig_h_src = src->height;
672  int orig_h_dst = dst->height;
673  int ret;
674 
675  // offset the data pointers for the bottom field
676  if (field) {
677  frame_offset(src, 1, scale->input_is_pal);
678  frame_offset(dst, 1, scale->output_is_pal);
679  }
680 
681  // take every second line
682  for (int i = 0; i < 4; i++) {
683  src->linesize[i] *= 2;
684  dst->linesize[i] *= 2;
685  }
686  src->height /= 2;
687  dst->height /= 2;
688 
689  ret = sws_scale_frame(scale->isws[field], dst, src);
690  if (ret < 0)
691  return ret;
692 
693  // undo the changes we made above
694  for (int i = 0; i < 4; i++) {
695  src->linesize[i] /= 2;
696  dst->linesize[i] /= 2;
697  }
698  src->height = orig_h_src;
699  dst->height = orig_h_dst;
700 
701  if (field) {
702  frame_offset(src, -1, scale->input_is_pal);
703  frame_offset(dst, -1, scale->output_is_pal);
704  }
705 
706  return 0;
707 }
708 
709 static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
710 {
711  AVFilterContext *ctx = link->dst;
712  ScaleContext *scale = ctx->priv;
713  AVFilterLink *outlink = ctx->outputs[0];
714  AVFrame *out;
716  char buf[32];
717  int ret;
718  int in_range;
719  int frame_changed;
720 
721  *frame_out = NULL;
722  if (in->colorspace == AVCOL_SPC_YCGCO)
723  av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
724 
725  frame_changed = in->width != link->w ||
726  in->height != link->h ||
727  in->format != link->format ||
730 
732  scale->in_range == AVCOL_RANGE_UNSPECIFIED &&
733  in->color_range != scale->in_frame_range) {
734  scale->in_frame_range = in->color_range;
735  frame_changed = 1;
736  }
737 
738  if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) {
739  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
740 
741  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
742  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
743 
744  if (scale->eval_mode == EVAL_MODE_FRAME &&
745  !frame_changed &&
746  ctx->filter != &ff_vf_scale2ref &&
747  !(vars_w[VAR_N] || vars_w[VAR_T]
749  || vars_w[VAR_POS]
750 #endif
751  ) &&
752  !(vars_h[VAR_N] || vars_h[VAR_T]
754  || vars_h[VAR_POS]
755 #endif
756  ) &&
757  scale->w && scale->h)
758  goto scale;
759 
760  if (scale->eval_mode == EVAL_MODE_INIT) {
761  snprintf(buf, sizeof(buf) - 1, "%d", scale->w);
762  av_opt_set(scale, "w", buf, 0);
763  snprintf(buf, sizeof(buf) - 1, "%d", scale->h);
764  av_opt_set(scale, "h", buf, 0);
765 
766  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
767  if (ret < 0)
768  return ret;
769 
770  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
771  if (ret < 0)
772  return ret;
773  }
774 
775  if (ctx->filter == &ff_vf_scale2ref) {
776  scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
777  scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
778 #if FF_API_FRAME_PKT
780  scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
782 #endif
783  } else {
784  scale->var_values[VAR_N] = link->frame_count_out;
785  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
786 #if FF_API_FRAME_PKT
788  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
790 #endif
791  }
792 
793  link->dst->inputs[0]->format = in->format;
794  link->dst->inputs[0]->w = in->width;
795  link->dst->inputs[0]->h = in->height;
796 
797  link->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
798  link->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
799 
800  if ((ret = config_props(outlink)) < 0)
801  return ret;
802  }
803 
804 scale:
805  if (!scale->sws) {
806  *frame_out = in;
807  return 0;
808  }
809 
810  scale->hsub = desc->log2_chroma_w;
811  scale->vsub = desc->log2_chroma_h;
812 
813  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
814  if (!out) {
815  av_frame_free(&in);
816  return AVERROR(ENOMEM);
817  }
818  *frame_out = out;
819 
821  out->width = outlink->w;
822  out->height = outlink->h;
823 
824  // Sanity checks:
825  // 1. If the output is RGB, set the matrix coefficients to RGB.
826  // 2. If the output is not RGB and we've got the RGB/XYZ (identity)
827  // matrix configured, unset the matrix.
828  // In theory these should be in swscale itself as the AVFrame
829  // based API gets in, so that not every swscale API user has
830  // to go through duplicating such sanity checks.
832  out->colorspace = AVCOL_SPC_RGB;
833  else if (out->colorspace == AVCOL_SPC_RGB)
834  out->colorspace = AVCOL_SPC_UNSPECIFIED;
835 
836  if (scale->output_is_pal)
837  avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
838 
839  in_range = in->color_range;
840 
841  if ( scale->in_color_matrix
842  || scale->out_color_matrix
843  || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
844  || in_range != AVCOL_RANGE_UNSPECIFIED
845  || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
846  int in_full, out_full, brightness, contrast, saturation;
847  const int *inv_table, *table;
848 
849  sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
850  (int **)&table, &out_full,
852 
853  if (scale->in_color_matrix)
854  inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
855  if (scale->out_color_matrix)
856  table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
857  else if (scale->in_color_matrix)
858  table = inv_table;
859 
860  if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
861  in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
862  else if (in_range != AVCOL_RANGE_UNSPECIFIED)
863  in_full = (in_range == AVCOL_RANGE_JPEG);
864  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
865  out_full = (scale->out_range == AVCOL_RANGE_JPEG);
866 
867  sws_setColorspaceDetails(scale->sws, inv_table, in_full,
868  table, out_full,
870  if (scale->isws[0])
871  sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
872  table, out_full,
874  if (scale->isws[1])
875  sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
876  table, out_full,
878 
879  out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
880  }
881 
882  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
883  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
884  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
885  INT_MAX);
886 
887  if (scale->interlaced>0 || (scale->interlaced<0 &&
888  (in->flags & AV_FRAME_FLAG_INTERLACED))) {
889  ret = scale_field(scale, out, in, 0);
890  if (ret >= 0)
891  ret = scale_field(scale, out, in, 1);
892  } else {
893  ret = sws_scale_frame(scale->sws, out, in);
894  }
895 
896  av_frame_free(&in);
897  if (ret < 0)
898  av_frame_free(frame_out);
899  return ret;
900 }
901 
903 {
904  AVFilterContext *ctx = link->dst;
905  AVFilterLink *outlink = ctx->outputs[0];
906  AVFrame *out;
907  int ret;
908 
909  ret = scale_frame(link, in, &out);
910  if (out)
911  return ff_filter_frame(outlink, out);
912 
913  return ret;
914 }
915 
917 {
918  ScaleContext *scale = link->dst->priv;
919  AVFilterLink *outlink = link->dst->outputs[1];
920  int frame_changed;
921 
922  frame_changed = in->width != link->w ||
923  in->height != link->h ||
924  in->format != link->format ||
927 
928  if (frame_changed) {
929  link->format = in->format;
930  link->w = in->width;
931  link->h = in->height;
934 
935  config_props_ref(outlink);
936  }
937 
938  if (scale->eval_mode == EVAL_MODE_FRAME) {
939  scale->var_values[VAR_N] = link->frame_count_out;
940  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
941 #if FF_API_FRAME_PKT
943  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
945 #endif
946  }
947 
948  return ff_filter_frame(outlink, in);
949 }
950 
951 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
952  char *res, int res_len, int flags)
953 {
954  ScaleContext *scale = ctx->priv;
955  char *str_expr;
956  AVExpr **pexpr_ptr;
957  int ret, w, h;
958 
959  w = !strcmp(cmd, "width") || !strcmp(cmd, "w");
960  h = !strcmp(cmd, "height") || !strcmp(cmd, "h");
961 
962  if (w || h) {
963  str_expr = w ? scale->w_expr : scale->h_expr;
964  pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr;
965 
966  ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args);
967  } else
968  ret = AVERROR(ENOSYS);
969 
970  if (ret < 0)
971  av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n");
972 
973  return ret;
974 }
975 
976 static const AVClass *child_class_iterate(void **iter)
977 {
978  const AVClass *c = *iter ? NULL : sws_get_class();
979  *iter = (void*)(uintptr_t)c;
980  return c;
981 }
982 
983 static void *child_next(void *obj, void *prev)
984 {
985  ScaleContext *s = obj;
986  if (!prev)
987  return s->sws_opts;
988  return NULL;
989 }
990 
991 #define OFFSET(x) offsetof(ScaleContext, x)
992 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
993 #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
994 
995 static const AVOption scale_options[] = {
996  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
997  { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
998  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
999  { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
1000  { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "" }, .flags = FLAGS },
1001  { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
1002  { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
1003  { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
1004  { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS, "color" },
1005  { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS, "color"},
1006  { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .str = "auto" }, 0, 0, FLAGS, "color" },
1007  { "bt601", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt601" }, 0, 0, FLAGS, "color" },
1008  { "bt470", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt470" }, 0, 0, FLAGS, "color" },
1009  { "smpte170m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte170m" }, 0, 0, FLAGS, "color" },
1010  { "bt709", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt709" }, 0, 0, FLAGS, "color" },
1011  { "fcc", NULL, 0, AV_OPT_TYPE_CONST, { .str = "fcc" }, 0, 0, FLAGS, "color" },
1012  { "smpte240m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte240m" }, 0, 0, FLAGS, "color" },
1013  { "bt2020", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt2020" }, 0, 0, FLAGS, "color" },
1014  { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
1015  { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
1016  { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
1017  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
1018  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
1019  { "limited",NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
1020  { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
1021  { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
1022  { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
1023  { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
1024  { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
1025  { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
1026  { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
1027  { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
1028  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
1029  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
1030  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
1031  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
1032  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
1033  { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS },
1034  { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS },
1035  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
1036  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
1037  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
1038  { NULL }
1039 };
1040 
1041 static const AVClass scale_class = {
1042  .class_name = "scale(2ref)",
1043  .item_name = av_default_item_name,
1044  .option = scale_options,
1045  .version = LIBAVUTIL_VERSION_INT,
1046  .category = AV_CLASS_CATEGORY_FILTER,
1047  .child_class_iterate = child_class_iterate,
1049 };
1050 
1052  {
1053  .name = "default",
1054  .type = AVMEDIA_TYPE_VIDEO,
1055  .filter_frame = filter_frame,
1056  },
1057 };
1058 
1060  {
1061  .name = "default",
1062  .type = AVMEDIA_TYPE_VIDEO,
1063  .config_props = config_props,
1064  },
1065 };
1066 
1068  .name = "scale",
1069  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
1070  .preinit = preinit,
1071  .init = init,
1072  .uninit = uninit,
1073  .priv_size = sizeof(ScaleContext),
1074  .priv_class = &scale_class,
1078  .process_command = process_command,
1079 };
1080 
1082  {
1083  .name = "default",
1084  .type = AVMEDIA_TYPE_VIDEO,
1085  .filter_frame = filter_frame,
1086  },
1087  {
1088  .name = "ref",
1089  .type = AVMEDIA_TYPE_VIDEO,
1090  .filter_frame = filter_frame_ref,
1091  },
1092 };
1093 
1095  {
1096  .name = "default",
1097  .type = AVMEDIA_TYPE_VIDEO,
1098  .config_props = config_props,
1099  .request_frame= request_frame,
1100  },
1101  {
1102  .name = "ref",
1103  .type = AVMEDIA_TYPE_VIDEO,
1104  .config_props = config_props_ref,
1105  .request_frame= request_frame_ref,
1106  },
1107 };
1108 
1110  .name = "scale2ref",
1111  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
1112  .preinit = preinit,
1113  .init = init,
1114  .uninit = uninit,
1115  .priv_size = sizeof(ScaleContext),
1116  .priv_class = &scale_class,
1120  .process_command = process_command,
1121 };
filter_frame_ref
static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:916
ScaleContext::param
double param[2]
Definition: vf_scale.c:126
VAR_S2R_MAIN_SAR
@ VAR_S2R_MAIN_SAR
Definition: vf_scale.c:95
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:108
VAR_S2R_MAIN_A
@ VAR_S2R_MAIN_A
Definition: vf_scale.c:94
VAR_HSUB
@ VAR_HSUB
Definition: vf_scale.c:83
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
config_props_ref
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale.c:636
SwsContext::saturation
int saturation
Definition: swscale_internal.h:454
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:656
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
TFLAGS
#define TFLAGS
Definition: vf_scale.c:993
ScaleContext::sws_opts
struct SwsContext * sws_opts
Definition: vf_scale.c:116
check_exprs
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale.c:165
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ScaleContext::input_is_pal
int input_is_pal
set to 1 if the input format is paletted
Definition: vf_scale.c:130
out
FILE * out
Definition: movenc.c:54
sws_isSupportedOutput
#define sws_isSupportedOutput(x)
ScaleContext
Definition: vf_scale.c:111
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2936
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_scale.c:377
ScaleContext::force_divisible_by
int force_divisible_by
Definition: vf_scale.c:155
avfilter_vf_scale2ref_outputs
static const AVFilterPad avfilter_vf_scale2ref_outputs[]
Definition: vf_scale.c:1094
FLAGS
#define FLAGS
Definition: vf_scale.c:992
ScaleContext::flags_str
char * flags_str
Definition: vf_scale.c:140
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:667
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
AVFrame::width
int width
Definition: frame.h:412
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:669
VAR_A
@ VAR_A
Definition: vf_scale.c:80
request_frame_ref
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale.c:654
av_opt_set_double
int av_opt_set_double(void *obj, const char *name, double val, int search_flags)
Definition: opt.c:629
AVOption
AVOption.
Definition: opt.h:251
scale_parse_expr
static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale.c:225
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:169
table
static const uint16_t table[]
Definition: prosumer.c:205
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale.c:649
av_pix_fmt_desc_next
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
Definition: pixdesc.c:2943
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:431
VAR_S2R_MAIN_HSUB
@ VAR_S2R_MAIN_HSUB
Definition: vf_scale.c:97
ScaleContext::var_values
double var_values[VARS_NB]
Definition: vf_scale.c:138
ScaleContext::out_range
int out_range
Definition: vf_scale.c:147
VAR_S2R_MDAR
@ VAR_S2R_MDAR
Definition: vf_scale.c:96
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:596
float.h
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_scale.c:107
VAR_S2R_MAIN_H
@ VAR_S2R_MAIN_H
Definition: vf_scale.c:93
mathematics.h
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ScaleContext::in_h_chr_pos
int in_h_chr_pos
Definition: vf_scale.c:151
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale.c:79
video.h
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
VAR_S2R_MAIN_POS
@ VAR_S2R_MAIN_POS
Definition: vf_scale.c:101
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:601
VAR_DAR
@ VAR_DAR
Definition: vf_scale.c:82
avfilter_vf_scale_inputs
static const AVFilterPad avfilter_vf_scale_inputs[]
Definition: vf_scale.c:1051
fail
#define fail()
Definition: checkasm.h:138
VARS_NB
@ VARS_NB
Definition: vf_scale.c:102
frame_offset
static void frame_offset(AVFrame *frame, int dir, int is_pal)
Definition: vf_scale.c:659
ScaleContext::isws
struct SwsContext * isws[2]
software scaler context for interlaced material
Definition: vf_scale.c:114
ScaleContext::eval_mode
int eval_mode
expression evaluation mode
Definition: vf_scale.c:157
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale.c:77
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_scale.c:108
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:97
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
OFFSET
#define OFFSET(x)
Definition: vf_scale.c:991
preinit
static av_cold int preinit(AVFilterContext *ctx)
Definition: vf_scale.c:278
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
TS2T
#define TS2T(ts, tb)
Definition: internal.h:255
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ScaleContext::sws
struct SwsContext * sws
software scaler context
Definition: vf_scale.c:113
s
#define s(width, name)
Definition: cbs_vp9.c:254
VAR_OH
@ VAR_OH
Definition: vf_scale.c:79
VAR_S2R_MAIN_W
@ VAR_S2R_MAIN_W
Definition: vf_scale.c:92
SwsContext::brightness
int brightness
Definition: swscale_internal.h:454
scale_frame
static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
Definition: vf_scale.c:709
ScaleContext::slice_y
int slice_y
top of current output slice
Definition: vf_scale.c:129
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
av_expr_count_vars
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:756
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:617
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_scale.c:295
VAR_OVSUB
@ VAR_OVSUB
Definition: vf_scale.c:86
ScaleContext::in_color_matrix
char * in_color_matrix
Definition: vf_scale.c:142
var_name
var_name
Definition: noise_bsf.c:46
ctx
AVFormatContext * ctx
Definition: movenc.c:48
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_scale.c:951
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
AVExpr
Definition: eval.c:157
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SwsContext::contrast
int contrast
Definition: swscale_internal.h:454
ScaleContext::w_pexpr
AVExpr * w_pexpr
Definition: vf_scale.c:136
avpriv_set_systematic_pal2
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:178
NAN
#define NAN
Definition: mathematics.h:115
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
ScaleContext::out_h_chr_pos
int out_h_chr_pos
Definition: vf_scale.c:149
scale_field
static int scale_field(ScaleContext *scale, AVFrame *dst, AVFrame *src, int field)
Definition: vf_scale.c:668
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ScaleContext::out_v_chr_pos
int out_v_chr_pos
Definition: vf_scale.c:150
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:736
VAR_T
@ VAR_T
Definition: vf_scale.c:88
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
ScaleContext::in_range
int in_range
Definition: vf_scale.c:145
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:405
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale.c:76
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:470
parseutils.h
sws_alloc_context
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
Definition: utils.c:1180
VAR_POS
@ VAR_POS
Definition: noise_bsf.c:55
ScaleContext::h_pexpr
AVExpr * h_pexpr
Definition: vf_scale.c:137
double
double
Definition: af_crystalizer.c:131
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
Definition: pixfmt.h:604
ScaleContext::in_frame_range
int in_frame_range
Definition: vf_scale.c:146
av_opt_get_int
int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
Definition: opt.c:978
sws_setColorspaceDetails
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
Definition: utils.c:999
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_vf_scale2ref
const AVFilter ff_vf_scale2ref
Definition: vf_scale.c:161
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:635
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
VAR_IW
@ VAR_IW
Definition: vf_scale.c:76
eval.h
VAR_IH
@ VAR_IH
Definition: vf_scale.c:77
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
AVClass::child_next
void *(* child_next)(void *obj, void *prev)
Return next AVOptions-enabled child or NULL.
Definition: log.h:131
child_class_iterate
static const AVClass * child_class_iterate(void **iter)
Definition: vf_scale.c:976
ScaleContext::w
int w
New dimensions.
Definition: vf_scale.c:124
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:467
AVFrame::pkt_pos
attribute_deprecated int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:687
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
scale_eval.h
FF_API_FRAME_PKT
#define FF_API_FRAME_PKT
Definition: version.h:117
ScaleContext::hsub
int hsub
Definition: vf_scale.c:128
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale.c:78
av_pix_fmt_desc_get_id
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
Definition: pixdesc.c:2955
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:902
av_parse_video_size
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:150
sws_isSupportedInput
#define sws_isSupportedInput(x)
internal.h
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:603
ScaleContext::vsub
int vsub
chroma subsampling
Definition: vf_scale.c:128
sws_scale_frame
int sws_scale_frame(struct SwsContext *c, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
Definition: swscale.c:1183
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_scale.c:512
interlaced
uint8_t interlaced
Definition: mxfenc.c:2148
ScaleContext::output_is_pal
int output_is_pal
set to 1 if the output format is paletted
Definition: vf_scale.c:131
VAR_SAR
@ VAR_SAR
Definition: vf_scale.c:81
sws_isSupportedEndiannessConversion
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)
Definition: utils.c:339
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:270
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:606
VAR_S2R_MAIN_N
@ VAR_S2R_MAIN_N
Definition: vf_scale.c:99
internal.h
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:595
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:786
EvalMode
EvalMode
Definition: af_volume.h:39
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:598
ScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale.c:135
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:652
avfilter_vf_scale_outputs
static const AVFilterPad avfilter_vf_scale_outputs[]
Definition: vf_scale.c:1059
AVFilter
Filter definition.
Definition: avfilter.h:166
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
parse_yuv_type
static const int * parse_yuv_type(const char *s, enum AVColorSpace colorspace)
Definition: vf_scale.c:413
child_next
static void * child_next(void *obj, void *prev)
Definition: vf_scale.c:983
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:447
sws_getColorspaceDetails
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
Definition: utils.c:1156
ff_scale_adjust_dimensions
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
sws_init_context
av_warn_unused_result int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
Definition: utils.c:2031
VAR_S2R_MAIN_T
@ VAR_S2R_MAIN_T
Definition: vf_scale.c:100
ScaleContext::out_color_matrix
char * out_color_matrix
Definition: vf_scale.c:143
scale_eval_dimensions
static int scale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale.c:437
var_names
static const char *const var_names[]
Definition: vf_scale.c:45
AVFrame::height
int height
Definition: frame.h:412
VAR_S2R_MAIN_DAR
@ VAR_S2R_MAIN_DAR
Definition: vf_scale.c:96
scale_options
static const AVOption scale_options[]
Definition: vf_scale.c:995
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2420
AVRational::den
int den
Denominator.
Definition: rational.h:60
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:600
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_scale.c:364
ScaleContext::force_original_aspect_ratio
int force_original_aspect_ratio
Definition: vf_scale.c:154
avfilter_vf_scale2ref_inputs
static const AVFilterPad avfilter_vf_scale2ref_inputs[]
Definition: vf_scale.c:1081
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
VAR_OW
@ VAR_OW
Definition: vf_scale.c:78
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:83
VAR_VSUB
@ VAR_VSUB
Definition: vf_scale.c:84
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
sws_getCoefficients
const int * sws_getCoefficients(int colorspace)
Return a pointer to yuv<->rgb coefficients for the given colorspace suitable for sws_setColorspaceDet...
Definition: yuv2rgb.c:62
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ScaleContext::interlaced
int interlaced
Definition: vf_scale.c:132
av_opt_copy
int av_opt_copy(void *dst, const void *src)
Copy options from src object into dest object.
Definition: opt.c:1885
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
VAR_N
@ VAR_N
Definition: vf_scale.c:87
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
scale_class
static const AVClass scale_class
Definition: vf_scale.c:1041
ScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale.c:134
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_scale.c:106
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:557
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_opt_get
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:837
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
VAR_OHSUB
@ VAR_OHSUB
Definition: vf_scale.c:85
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:597
int
int
Definition: ffmpeg_filter.c:331
SwsContext
Definition: swscale_internal.h:299
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_vf_scale
const AVFilter ff_vf_scale
Definition: vf_scale.c:1067
snprintf
#define snprintf
Definition: snprintf.h:34
ScaleContext::size_str
char * size_str
Definition: vf_scale.c:125
VAR_S2R_MAIN_VSUB
@ VAR_S2R_MAIN_VSUB
Definition: vf_scale.c:98
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
swscale.h
ScaleContext::h
int h
Definition: vf_scale.c:124
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:312
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2856
ScaleContext::in_v_chr_pos
int in_v_chr_pos
Definition: vf_scale.c:152
SwsContext::param
double param[2]
Input parameters for scaling algorithms that need them.
Definition: swscale_internal.h:342