FFmpeg
vf_scale.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Bobby Bingham
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * scale video filter
24  */
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "scale_eval.h"
33 #include "video.h"
34 #include "libavutil/avstring.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/imgutils.h"
42 #include "libswscale/swscale.h"
43 
44 static const char *const var_names[] = {
45  "in_w", "iw",
46  "in_h", "ih",
47  "out_w", "ow",
48  "out_h", "oh",
49  "a",
50  "sar",
51  "dar",
52  "hsub",
53  "vsub",
54  "ohsub",
55  "ovsub",
56  "n",
57  "t",
58  "pos",
59  "main_w",
60  "main_h",
61  "main_a",
62  "main_sar",
63  "main_dar", "mdar",
64  "main_hsub",
65  "main_vsub",
66  "main_n",
67  "main_t",
68  "main_pos",
69  NULL
70 };
71 
72 enum var_name {
98 };
99 
100 enum EvalMode {
104 };
105 
106 typedef struct ScaleContext {
107  const AVClass *class;
108  struct SwsContext *sws; ///< software scaler context
109  struct SwsContext *isws[2]; ///< software scaler context for interlaced material
111 
112  /**
113  * New dimensions. Special values are:
114  * 0 = original width/height
115  * -1 = keep original aspect
116  * -N = try to keep aspect but make sure it is divisible by N
117  */
118  int w, h;
119  char *size_str;
120  unsigned int flags; ///sws flags
121  double param[2]; // sws params
122 
123  int hsub, vsub; ///< chroma subsampling
124  int slice_y; ///< top of current output slice
125  int input_is_pal; ///< set to 1 if the input format is paletted
126  int output_is_pal; ///< set to 1 if the output format is paletted
128 
129  char *w_expr; ///< width expression string
130  char *h_expr; ///< height expression string
134 
135  char *flags_str;
136 
139 
140  int in_range;
143 
148 
151 
152  int eval_mode; ///< expression evaluation mode
153 
154 } ScaleContext;
155 
157 
158 static int config_props(AVFilterLink *outlink);
159 
161 {
162  ScaleContext *scale = ctx->priv;
163  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
164 
165  if (!scale->w_pexpr && !scale->h_pexpr)
166  return AVERROR(EINVAL);
167 
168  if (scale->w_pexpr)
169  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
170  if (scale->h_pexpr)
171  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
172 
173  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
174  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
175  return AVERROR(EINVAL);
176  }
177 
178  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
179  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
180  return AVERROR(EINVAL);
181  }
182 
183  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
184  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
185  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
186  }
187 
188  if (ctx->filter != &ff_vf_scale2ref &&
189  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
190  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
191  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
192  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
193  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
194  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
195  vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] ||
196  vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] ||
197  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
198  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
199  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
200  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n");
201  return AVERROR(EINVAL);
202  }
203 
204  if (scale->eval_mode == EVAL_MODE_INIT &&
205  (vars_w[VAR_N] || vars_h[VAR_N] ||
206  vars_w[VAR_T] || vars_h[VAR_T] ||
207  vars_w[VAR_POS] || vars_h[VAR_POS] ||
208  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
209  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
210  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
211  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
212  return AVERROR(EINVAL);
213  }
214 
215  return 0;
216 }
217 
218 static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
219 {
220  ScaleContext *scale = ctx->priv;
221  int ret, is_inited = 0;
222  char *old_str_expr = NULL;
223  AVExpr *old_pexpr = NULL;
224 
225  if (str_expr) {
226  old_str_expr = av_strdup(str_expr);
227  if (!old_str_expr)
228  return AVERROR(ENOMEM);
229  av_opt_set(scale, var, args, 0);
230  }
231 
232  if (*pexpr_ptr) {
233  old_pexpr = *pexpr_ptr;
234  *pexpr_ptr = NULL;
235  is_inited = 1;
236  }
237 
238  ret = av_expr_parse(pexpr_ptr, args, var_names,
239  NULL, NULL, NULL, NULL, 0, ctx);
240  if (ret < 0) {
241  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args);
242  goto revert;
243  }
244 
245  ret = check_exprs(ctx);
246  if (ret < 0)
247  goto revert;
248 
249  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
250  goto revert;
251 
252  av_expr_free(old_pexpr);
253  old_pexpr = NULL;
254  av_freep(&old_str_expr);
255 
256  return 0;
257 
258 revert:
259  av_expr_free(*pexpr_ptr);
260  *pexpr_ptr = NULL;
261  if (old_str_expr) {
262  av_opt_set(scale, var, old_str_expr, 0);
263  av_free(old_str_expr);
264  }
265  if (old_pexpr)
266  *pexpr_ptr = old_pexpr;
267 
268  return ret;
269 }
270 
272 {
273  ScaleContext *scale = ctx->priv;
274  int ret;
275 
276  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
278  "Size and width/height expressions cannot be set at the same time.\n");
279  return AVERROR(EINVAL);
280  }
281 
282  if (scale->w_expr && !scale->h_expr)
283  FFSWAP(char *, scale->w_expr, scale->size_str);
284 
285  if (scale->size_str) {
286  char buf[32];
287  if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
289  "Invalid size '%s'\n", scale->size_str);
290  return ret;
291  }
292  snprintf(buf, sizeof(buf)-1, "%d", scale->w);
293  av_opt_set(scale, "w", buf, 0);
294  snprintf(buf, sizeof(buf)-1, "%d", scale->h);
295  av_opt_set(scale, "h", buf, 0);
296  }
297  if (!scale->w_expr)
298  av_opt_set(scale, "w", "iw", 0);
299  if (!scale->h_expr)
300  av_opt_set(scale, "h", "ih", 0);
301 
302  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
303  if (ret < 0)
304  return ret;
305 
306  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
307  if (ret < 0)
308  return ret;
309 
310  av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
311  scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
312 
313  scale->flags = 0;
314 
315  if (scale->flags_str && *scale->flags_str) {
316  const AVClass *class = sws_get_class();
317  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
319  int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
320  if (ret < 0)
321  return ret;
322  }
323  scale->opts = *opts;
324  *opts = NULL;
325 
326  scale->in_frame_range = AVCOL_RANGE_UNSPECIFIED;
327 
328  return 0;
329 }
330 
332 {
333  ScaleContext *scale = ctx->priv;
334  av_expr_free(scale->w_pexpr);
335  av_expr_free(scale->h_pexpr);
336  scale->w_pexpr = scale->h_pexpr = NULL;
337  sws_freeContext(scale->sws);
338  sws_freeContext(scale->isws[0]);
339  sws_freeContext(scale->isws[1]);
340  scale->sws = NULL;
341  av_dict_free(&scale->opts);
342 }
343 
345 {
347  const AVPixFmtDescriptor *desc;
348  enum AVPixelFormat pix_fmt;
349  int ret;
350 
351  desc = NULL;
352  formats = NULL;
353  while ((desc = av_pix_fmt_desc_next(desc))) {
357  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
358  return ret;
359  }
360  }
361  if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats)) < 0)
362  return ret;
363 
364  desc = NULL;
365  formats = NULL;
366  while ((desc = av_pix_fmt_desc_next(desc))) {
370  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
371  return ret;
372  }
373  }
374  if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats)) < 0)
375  return ret;
376 
377  return 0;
378 }
379 
380 static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
381 {
382  if (!s)
383  s = "bt601";
384 
385  if (s && strstr(s, "bt709")) {
386  colorspace = AVCOL_SPC_BT709;
387  } else if (s && strstr(s, "fcc")) {
388  colorspace = AVCOL_SPC_FCC;
389  } else if (s && strstr(s, "smpte240m")) {
390  colorspace = AVCOL_SPC_SMPTE240M;
391  } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
392  colorspace = AVCOL_SPC_BT470BG;
393  } else if (s && strstr(s, "bt2020")) {
394  colorspace = AVCOL_SPC_BT2020_NCL;
395  }
396 
397  if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
398  colorspace = AVCOL_SPC_BT470BG;
399  }
400 
401  return sws_getCoefficients(colorspace);
402 }
403 
405 {
406  ScaleContext *scale = ctx->priv;
407  const char scale2ref = ctx->filter == &ff_vf_scale2ref;
408  const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0];
409  const AVFilterLink *outlink = ctx->outputs[0];
411  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
412  char *expr;
413  int eval_w, eval_h;
414  int ret;
415  double res;
416  const AVPixFmtDescriptor *main_desc;
417  const AVFilterLink *main_link;
418 
419  if (scale2ref) {
420  main_link = ctx->inputs[0];
421  main_desc = av_pix_fmt_desc_get(main_link->format);
422  }
423 
424  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
425  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
426  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
427  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
428  scale->var_values[VAR_A] = (double) inlink->w / inlink->h;
429  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
430  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
431  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
432  scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
433  scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
434  scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
435  scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
436 
437  if (scale2ref) {
438  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
439  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
440  scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h;
441  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
442  (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
443  scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
444  scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
445  scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w;
446  scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h;
447  }
448 
449  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
450  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
451 
452  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
453  if (isnan(res)) {
454  expr = scale->h_expr;
455  ret = AVERROR(EINVAL);
456  goto fail;
457  }
458  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res;
459 
460  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
461  if (isnan(res)) {
462  expr = scale->w_expr;
463  ret = AVERROR(EINVAL);
464  goto fail;
465  }
466  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
467 
468  scale->w = eval_w;
469  scale->h = eval_h;
470 
471  return 0;
472 
473 fail:
475  "Error when evaluating the expression '%s'.\n", expr);
476  return ret;
477 }
478 
479 static int config_props(AVFilterLink *outlink)
480 {
481  AVFilterContext *ctx = outlink->src;
482  AVFilterLink *inlink0 = outlink->src->inputs[0];
483  AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
484  outlink->src->inputs[1] :
485  outlink->src->inputs[0];
486  enum AVPixelFormat outfmt = outlink->format;
488  ScaleContext *scale = ctx->priv;
489  int ret;
490 
491  if ((ret = scale_eval_dimensions(ctx)) < 0)
492  goto fail;
493 
495  scale->force_original_aspect_ratio,
496  scale->force_divisible_by);
497 
498  if (scale->w > INT_MAX ||
499  scale->h > INT_MAX ||
500  (scale->h * inlink->w) > INT_MAX ||
501  (scale->w * inlink->h) > INT_MAX)
502  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
503 
504  outlink->w = scale->w;
505  outlink->h = scale->h;
506 
507  /* TODO: make algorithm configurable */
508 
509  scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL;
510  if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
511  scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL;
512 
513  if (scale->sws)
514  sws_freeContext(scale->sws);
515  if (scale->isws[0])
516  sws_freeContext(scale->isws[0]);
517  if (scale->isws[1])
518  sws_freeContext(scale->isws[1]);
519  scale->isws[0] = scale->isws[1] = scale->sws = NULL;
520  if (inlink0->w == outlink->w &&
521  inlink0->h == outlink->h &&
522  !scale->out_color_matrix &&
523  scale->in_range == scale->out_range &&
524  inlink0->format == outlink->format)
525  ;
526  else {
527  struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
528  int i;
529 
530  for (i = 0; i < 3; i++) {
531  int in_v_chr_pos = scale->in_v_chr_pos, out_v_chr_pos = scale->out_v_chr_pos;
532  struct SwsContext *const s = sws_alloc_context();
533  if (!s)
534  return AVERROR(ENOMEM);
535  *swscs[i] = s;
536 
537  av_opt_set_int(s, "srcw", inlink0 ->w, 0);
538  av_opt_set_int(s, "srch", inlink0 ->h >> !!i, 0);
539  av_opt_set_int(s, "src_format", inlink0->format, 0);
540  av_opt_set_int(s, "dstw", outlink->w, 0);
541  av_opt_set_int(s, "dsth", outlink->h >> !!i, 0);
542  av_opt_set_int(s, "dst_format", outfmt, 0);
543  av_opt_set_int(s, "sws_flags", scale->flags, 0);
544  av_opt_set_int(s, "param0", scale->param[0], 0);
545  av_opt_set_int(s, "param1", scale->param[1], 0);
546  av_opt_set_int(s, "threads", ff_filter_get_nb_threads(ctx), 0);
547  if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
548  av_opt_set_int(s, "src_range",
549  scale->in_range == AVCOL_RANGE_JPEG, 0);
550  else if (scale->in_frame_range != AVCOL_RANGE_UNSPECIFIED)
551  av_opt_set_int(s, "src_range",
552  scale->in_frame_range == AVCOL_RANGE_JPEG, 0);
553  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
554  av_opt_set_int(s, "dst_range",
555  scale->out_range == AVCOL_RANGE_JPEG, 0);
556 
557  if (scale->opts) {
558  AVDictionaryEntry *e = NULL;
559  while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
560  if ((ret = av_opt_set(s, e->key, e->value, 0)) < 0)
561  return ret;
562  }
563  }
564  /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
565  * MPEG-2 chroma positions are used by convention
566  * XXX: support other 4:2:0 pixel formats */
567  if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
568  in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
569  }
570 
571  if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
572  out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
573  }
574 
575  av_opt_set_int(s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
576  av_opt_set_int(s, "src_v_chr_pos", in_v_chr_pos, 0);
577  av_opt_set_int(s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
578  av_opt_set_int(s, "dst_v_chr_pos", out_v_chr_pos, 0);
579 
580  if ((ret = sws_init_context(s, NULL, NULL)) < 0)
581  return ret;
582  if (!scale->interlaced)
583  break;
584  }
585  }
586 
587  if (inlink0->sample_aspect_ratio.num){
588  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink0->w, outlink->w * inlink0->h}, inlink0->sample_aspect_ratio);
589  } else
590  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
591 
592  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
593  inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
594  inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
595  outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
596  outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
597  scale->flags);
598  return 0;
599 
600 fail:
601  return ret;
602 }
603 
604 static int config_props_ref(AVFilterLink *outlink)
605 {
606  AVFilterLink *inlink = outlink->src->inputs[1];
607 
608  outlink->w = inlink->w;
609  outlink->h = inlink->h;
610  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
611  outlink->time_base = inlink->time_base;
612  outlink->frame_rate = inlink->frame_rate;
613 
614  return 0;
615 }
616 
617 static int request_frame(AVFilterLink *outlink)
618 {
619  return ff_request_frame(outlink->src->inputs[0]);
620 }
621 
622 static int request_frame_ref(AVFilterLink *outlink)
623 {
624  return ff_request_frame(outlink->src->inputs[1]);
625 }
626 
627 static void frame_offset(AVFrame *frame, int dir, int is_pal)
628 {
629  for (int i = 0; i < 4 && frame->data[i]; i++) {
630  if (i == 1 && is_pal)
631  break;
632  frame->data[i] += frame->linesize[i] * dir;
633  }
634 }
635 
637  int field)
638 {
639  int orig_h_src = src->height;
640  int orig_h_dst = dst->height;
641  int ret;
642 
643  // offset the data pointers for the bottom field
644  if (field) {
645  frame_offset(src, 1, scale->input_is_pal);
646  frame_offset(dst, 1, scale->output_is_pal);
647  }
648 
649  // take every second line
650  for (int i = 0; i < 4; i++) {
651  src->linesize[i] *= 2;
652  dst->linesize[i] *= 2;
653  }
654  src->height /= 2;
655  dst->height /= 2;
656 
657  ret = sws_scale_frame(scale->isws[field], dst, src);
658  if (ret < 0)
659  return ret;
660 
661  // undo the changes we made above
662  for (int i = 0; i < 4; i++) {
663  src->linesize[i] /= 2;
664  dst->linesize[i] /= 2;
665  }
666  src->height = orig_h_src;
667  dst->height = orig_h_dst;
668 
669  if (field) {
670  frame_offset(src, -1, scale->input_is_pal);
671  frame_offset(dst, -1, scale->output_is_pal);
672  }
673 
674  return 0;
675 }
676 
677 static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
678 {
679  AVFilterContext *ctx = link->dst;
680  ScaleContext *scale = ctx->priv;
681  AVFilterLink *outlink = ctx->outputs[0];
682  AVFrame *out;
684  char buf[32];
685  int ret;
686  int in_range;
687  int frame_changed;
688 
689  *frame_out = NULL;
690  if (in->colorspace == AVCOL_SPC_YCGCO)
691  av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
692 
693  frame_changed = in->width != link->w ||
694  in->height != link->h ||
695  in->format != link->format ||
698 
700  scale->in_range == AVCOL_RANGE_UNSPECIFIED &&
701  in->color_range != scale->in_frame_range) {
702  scale->in_frame_range = in->color_range;
703  frame_changed = 1;
704  }
705 
706  if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) {
707  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
708 
709  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
710  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
711 
712  if (scale->eval_mode == EVAL_MODE_FRAME &&
713  !frame_changed &&
714  ctx->filter != &ff_vf_scale2ref &&
715  !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) &&
716  !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) &&
717  scale->w && scale->h)
718  goto scale;
719 
720  if (scale->eval_mode == EVAL_MODE_INIT) {
721  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
722  av_opt_set(scale, "w", buf, 0);
723  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
724  av_opt_set(scale, "h", buf, 0);
725 
726  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
727  if (ret < 0)
728  return ret;
729 
730  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
731  if (ret < 0)
732  return ret;
733  }
734 
735  if (ctx->filter == &ff_vf_scale2ref) {
736  scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
737  scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
738  scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
739  } else {
740  scale->var_values[VAR_N] = link->frame_count_out;
741  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
742  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
743  }
744 
745  link->dst->inputs[0]->format = in->format;
746  link->dst->inputs[0]->w = in->width;
747  link->dst->inputs[0]->h = in->height;
748 
749  link->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
750  link->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
751 
752  if ((ret = config_props(outlink)) < 0)
753  return ret;
754  }
755 
756 scale:
757  if (!scale->sws) {
758  *frame_out = in;
759  return 0;
760  }
761 
762  scale->hsub = desc->log2_chroma_w;
763  scale->vsub = desc->log2_chroma_h;
764 
765  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
766  if (!out) {
767  av_frame_free(&in);
768  return AVERROR(ENOMEM);
769  }
770  *frame_out = out;
771 
773  out->width = outlink->w;
774  out->height = outlink->h;
775 
776  // Sanity checks:
777  // 1. If the output is RGB, set the matrix coefficients to RGB.
778  // 2. If the output is not RGB and we've got the RGB/XYZ (identity)
779  // matrix configured, unset the matrix.
780  // In theory these should be in swscale itself as the AVFrame
781  // based API gets in, so that not every swscale API user has
782  // to go through duplicating such sanity checks.
784  out->colorspace = AVCOL_SPC_RGB;
785  else if (out->colorspace == AVCOL_SPC_RGB)
786  out->colorspace = AVCOL_SPC_UNSPECIFIED;
787 
788  if (scale->output_is_pal)
789  avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
790 
791  in_range = in->color_range;
792 
793  if ( scale->in_color_matrix
794  || scale->out_color_matrix
795  || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
796  || in_range != AVCOL_RANGE_UNSPECIFIED
797  || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
798  int in_full, out_full, brightness, contrast, saturation;
799  const int *inv_table, *table;
800 
801  sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
802  (int **)&table, &out_full,
804 
805  if (scale->in_color_matrix)
806  inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
807  if (scale->out_color_matrix)
808  table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
809  else if (scale->in_color_matrix)
810  table = inv_table;
811 
812  if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
813  in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
814  else if (in_range != AVCOL_RANGE_UNSPECIFIED)
815  in_full = (in_range == AVCOL_RANGE_JPEG);
816  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
817  out_full = (scale->out_range == AVCOL_RANGE_JPEG);
818 
819  sws_setColorspaceDetails(scale->sws, inv_table, in_full,
820  table, out_full,
822  if (scale->isws[0])
823  sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
824  table, out_full,
826  if (scale->isws[1])
827  sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
828  table, out_full,
830 
831  out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
832  }
833 
834  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
835  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
836  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
837  INT_MAX);
838 
839  if (scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)) {
840  ret = scale_field(scale, out, in, 0);
841  if (ret >= 0)
842  ret = scale_field(scale, out, in, 1);
843  } else {
844  ret = sws_scale_frame(scale->sws, out, in);
845  }
846 
847  av_frame_free(&in);
848  if (ret < 0)
849  av_frame_free(frame_out);
850  return ret;
851 }
852 
854 {
855  AVFilterContext *ctx = link->dst;
856  AVFilterLink *outlink = ctx->outputs[0];
857  AVFrame *out;
858  int ret;
859 
860  ret = scale_frame(link, in, &out);
861  if (out)
862  return ff_filter_frame(outlink, out);
863 
864  return ret;
865 }
866 
868 {
869  ScaleContext *scale = link->dst->priv;
870  AVFilterLink *outlink = link->dst->outputs[1];
871  int frame_changed;
872 
873  frame_changed = in->width != link->w ||
874  in->height != link->h ||
875  in->format != link->format ||
878 
879  if (frame_changed) {
880  link->format = in->format;
881  link->w = in->width;
882  link->h = in->height;
885 
886  config_props_ref(outlink);
887  }
888 
889  if (scale->eval_mode == EVAL_MODE_FRAME) {
890  scale->var_values[VAR_N] = link->frame_count_out;
891  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
892  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
893  }
894 
895  return ff_filter_frame(outlink, in);
896 }
897 
898 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
899  char *res, int res_len, int flags)
900 {
901  ScaleContext *scale = ctx->priv;
902  char *str_expr;
903  AVExpr **pexpr_ptr;
904  int ret, w, h;
905 
906  w = !strcmp(cmd, "width") || !strcmp(cmd, "w");
907  h = !strcmp(cmd, "height") || !strcmp(cmd, "h");
908 
909  if (w || h) {
910  str_expr = w ? scale->w_expr : scale->h_expr;
911  pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr;
912 
913  ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args);
914  } else
915  ret = AVERROR(ENOSYS);
916 
917  if (ret < 0)
918  av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n");
919 
920  return ret;
921 }
922 
923 static const AVClass *child_class_iterate(void **iter)
924 {
925  const AVClass *c = *iter ? NULL : sws_get_class();
926  *iter = (void*)(uintptr_t)c;
927  return c;
928 }
929 
930 #define OFFSET(x) offsetof(ScaleContext, x)
931 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
932 #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
933 
934 static const AVOption scale_options[] = {
935  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
936  { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
937  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
938  { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
939  { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "" }, .flags = FLAGS },
940  { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
941  { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
942  { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
943  { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS, "color" },
944  { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS, "color"},
945  { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .str = "auto" }, 0, 0, FLAGS, "color" },
946  { "bt601", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt601" }, 0, 0, FLAGS, "color" },
947  { "bt470", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt470" }, 0, 0, FLAGS, "color" },
948  { "smpte170m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte170m" }, 0, 0, FLAGS, "color" },
949  { "bt709", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt709" }, 0, 0, FLAGS, "color" },
950  { "fcc", NULL, 0, AV_OPT_TYPE_CONST, { .str = "fcc" }, 0, 0, FLAGS, "color" },
951  { "smpte240m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte240m" }, 0, 0, FLAGS, "color" },
952  { "bt2020", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt2020" }, 0, 0, FLAGS, "color" },
953  { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
954  { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
955  { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
956  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
957  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
958  { "limited",NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
959  { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
960  { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
961  { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
962  { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
963  { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
964  { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
965  { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
966  { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
967  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
968  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
969  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
970  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
971  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
972  { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
973  { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
974  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
975  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
976  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
977  { NULL }
978 };
979 
980 static const AVClass scale_class = {
981  .class_name = "scale(2ref)",
982  .item_name = av_default_item_name,
983  .option = scale_options,
984  .version = LIBAVUTIL_VERSION_INT,
985  .category = AV_CLASS_CATEGORY_FILTER,
986  .child_class_iterate = child_class_iterate,
987 };
988 
990  {
991  .name = "default",
992  .type = AVMEDIA_TYPE_VIDEO,
993  .filter_frame = filter_frame,
994  },
995 };
996 
998  {
999  .name = "default",
1000  .type = AVMEDIA_TYPE_VIDEO,
1001  .config_props = config_props,
1002  },
1003 };
1004 
1006  .name = "scale",
1007  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
1008  .init_dict = init_dict,
1009  .uninit = uninit,
1010  .priv_size = sizeof(ScaleContext),
1011  .priv_class = &scale_class,
1015  .process_command = process_command,
1016 };
1017 
1019  {
1020  .name = "default",
1021  .type = AVMEDIA_TYPE_VIDEO,
1022  .filter_frame = filter_frame,
1023  },
1024  {
1025  .name = "ref",
1026  .type = AVMEDIA_TYPE_VIDEO,
1027  .filter_frame = filter_frame_ref,
1028  },
1029 };
1030 
1032  {
1033  .name = "default",
1034  .type = AVMEDIA_TYPE_VIDEO,
1035  .config_props = config_props,
1036  .request_frame= request_frame,
1037  },
1038  {
1039  .name = "ref",
1040  .type = AVMEDIA_TYPE_VIDEO,
1041  .config_props = config_props_ref,
1042  .request_frame= request_frame_ref,
1043  },
1044 };
1045 
1047  .name = "scale2ref",
1048  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
1049  .init_dict = init_dict,
1050  .uninit = uninit,
1051  .priv_size = sizeof(ScaleContext),
1052  .priv_class = &scale_class,
1056  .process_command = process_command,
1057 };
filter_frame_ref
static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:867
ScaleContext::param
double param[2]
sws flags
Definition: vf_scale.c:121
VAR_S2R_MAIN_SAR
@ VAR_S2R_MAIN_SAR
Definition: vf_scale.c:90
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:101
VAR_S2R_MAIN_A
@ VAR_S2R_MAIN_A
Definition: vf_scale.c:89
VAR_HSUB
@ VAR_HSUB
Definition: vf_scale.c:80
config_props_ref
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale.c:604
SwsContext::saturation
int saturation
Definition: swscale_internal.h:453
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:578
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
TFLAGS
#define TFLAGS
Definition: vf_scale.c:932
check_exprs
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale.c:160
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ScaleContext::input_is_pal
int input_is_pal
set to 1 if the input format is paletted
Definition: vf_scale.c:125
out
FILE * out
Definition: movenc.c:54
sws_isSupportedOutput
#define sws_isSupportedOutput(x)
ScaleContext
Definition: vf_scale.c:106
ScaleContext::flags
unsigned int flags
Definition: vf_scale.c:120
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:999
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_scale.c:344
ScaleContext::force_divisible_by
int force_divisible_by
Definition: vf_scale.c:150
avfilter_vf_scale2ref_outputs
static const AVFilterPad avfilter_vf_scale2ref_outputs[]
Definition: vf_scale.c:1031
FLAGS
#define FLAGS
Definition: vf_scale.c:931
ScaleContext::flags_str
char * flags_str
Definition: vf_scale.c:135
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:589
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
AVFrame::width
int width
Definition: frame.h:397
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:599
VAR_A
@ VAR_A
Definition: vf_scale.c:77
request_frame_ref
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale.c:622
AVOption
AVOption.
Definition: opt.h:251
scale_parse_expr
static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale.c:218
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:167
table
static const uint16_t table[]
Definition: prosumer.c:206
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale.c:617
av_pix_fmt_desc_next
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
Definition: pixdesc.c:2669
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:400
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:68
VAR_S2R_MAIN_HSUB
@ VAR_S2R_MAIN_HSUB
Definition: vf_scale.c:92
ScaleContext::var_values
double var_values[VARS_NB]
Definition: vf_scale.c:133
ScaleContext::out_range
int out_range
Definition: vf_scale.c:142
VAR_S2R_MDAR
@ VAR_S2R_MDAR
Definition: vf_scale.c:91
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:526
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_scale.c:102
VAR_S2R_MAIN_H
@ VAR_S2R_MAIN_H
Definition: vf_scale.c:88
mathematics.h
AVDictionary
Definition: dict.c:30
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
ScaleContext::in_h_chr_pos
int in_h_chr_pos
Definition: vf_scale.c:146
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale.c:76
ScaleContext::opts
AVDictionary * opts
Definition: vf_scale.c:110
video.h
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
VAR_S2R_MAIN_POS
@ VAR_S2R_MAIN_POS
Definition: vf_scale.c:96
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:531
VAR_DAR
@ VAR_DAR
Definition: vf_scale.c:79
avfilter_vf_scale_inputs
static const AVFilterPad avfilter_vf_scale_inputs[]
Definition: vf_scale.c:989
fail
#define fail()
Definition: checkasm.h:131
VARS_NB
@ VARS_NB
Definition: vf_scale.c:97
frame_offset
static void frame_offset(AVFrame *frame, int dir, int is_pal)
Definition: vf_scale.c:627
ScaleContext::isws
struct SwsContext * isws[2]
software scaler context for interlaced material
Definition: vf_scale.c:109
ScaleContext::eval_mode
int eval_mode
expression evaluation mode
Definition: vf_scale.c:152
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale.c:74
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_scale.c:103
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:97
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
OFFSET
#define OFFSET(x)
Definition: vf_scale.c:930
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
TS2T
#define TS2T(ts, tb)
Definition: internal.h:263
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ScaleContext::sws
struct SwsContext * sws
software scaler context
Definition: vf_scale.c:108
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
s
#define s(width, name)
Definition: cbs_vp9.c:256
VAR_OH
@ VAR_OH
Definition: vf_scale.c:76
VAR_S2R_MAIN_W
@ VAR_S2R_MAIN_W
Definition: vf_scale.c:87
SwsContext::brightness
int brightness
Definition: swscale_internal.h:453
scale_frame
static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
Definition: vf_scale.c:677
ScaleContext::slice_y
int slice_y
top of current output slice
Definition: vf_scale.c:124
AVFrame::pkt_pos
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:605
AVDictionaryEntry::key
char * key
Definition: dict.h:80
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
av_expr_count_vars
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:756
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:596
init_dict
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
Definition: vf_scale.c:271
VAR_OVSUB
@ VAR_OVSUB
Definition: vf_scale.c:83
ScaleContext::in_color_matrix
char * in_color_matrix
Definition: vf_scale.c:137
var_name
var_name
Definition: noise_bsf.c:46
ctx
AVFormatContext * ctx
Definition: movenc.c:48
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_scale.c:898
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
AVExpr
Definition: eval.c:157
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SwsContext::contrast
int contrast
Definition: swscale_internal.h:453
ScaleContext::w_pexpr
AVExpr * w_pexpr
Definition: vf_scale.c:131
avpriv_set_systematic_pal2
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:178
NAN
#define NAN
Definition: mathematics.h:64
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1771
ScaleContext::out_h_chr_pos
int out_h_chr_pos
Definition: vf_scale.c:144
scale_field
static int scale_field(ScaleContext *scale, AVFrame *dst, AVFrame *src, int field)
Definition: vf_scale.c:636
opts
AVDictionary * opts
Definition: movenc.c:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
ScaleContext::out_v_chr_pos
int out_v_chr_pos
Definition: vf_scale.c:145
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:596
VAR_T
@ VAR_T
Definition: vf_scale.c:85
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
ScaleContext::in_range
int in_range
Definition: vf_scale.c:140
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:416
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale.c:73
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:449
parseutils.h
sws_alloc_context
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
Definition: utils.c:1150
ScaleContext::h_pexpr
AVExpr * h_pexpr
Definition: vf_scale.c:132
double
double
Definition: af_crystalizer.c:132
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
Definition: pixfmt.h:534
ScaleContext::in_frame_range
int in_frame_range
Definition: vf_scale.c:141
sws_setColorspaceDetails
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
Definition: utils.c:901
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
ff_vf_scale2ref
const AVFilter ff_vf_scale2ref
Definition: vf_scale.c:156
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:565
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
AV_OPT_SEARCH_FAKE_OBJ
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poin...
Definition: opt.h:571
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
VAR_IW
@ VAR_IW
Definition: vf_scale.c:73
SWS_PARAM_DEFAULT
#define SWS_PARAM_DEFAULT
Definition: swscale.h:80
eval.h
VAR_IH
@ VAR_IH
Definition: vf_scale.c:74
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
child_class_iterate
static const AVClass * child_class_iterate(void **iter)
Definition: vf_scale.c:923
ScaleContext::w
int w
New dimensions.
Definition: vf_scale.c:118
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:447
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:412
scale_eval.h
ScaleContext::hsub
int hsub
Definition: vf_scale.c:123
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale.c:75
av_pix_fmt_desc_get_id
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
Definition: pixdesc.c:2681
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:853
av_parse_video_size
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:150
sws_isSupportedInput
#define sws_isSupportedInput(x)
internal.h
VAR_POS
@ VAR_POS
Definition: vf_scale.c:86
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:533
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:477
ScaleContext::vsub
int vsub
chroma subsampling
Definition: vf_scale.c:123
sws_scale_frame
int sws_scale_frame(struct SwsContext *c, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
Definition: swscale.c:1180
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_scale.c:479
interlaced
uint8_t interlaced
Definition: mxfenc.c:2042
ScaleContext::output_is_pal
int output_is_pal
set to 1 if the output format is paletted
Definition: vf_scale.c:126
VAR_SAR
@ VAR_SAR
Definition: vf_scale.c:78
sws_isSupportedEndiannessConversion
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)
Definition: utils.c:318
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:536
VAR_S2R_MAIN_N
@ VAR_S2R_MAIN_N
Definition: vf_scale.c:94
internal.h
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:525
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:783
EvalMode
EvalMode
Definition: af_volume.h:39
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:528
ScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale.c:130
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:582
avfilter_vf_scale_outputs
static const AVFilterPad avfilter_vf_scale_outputs[]
Definition: vf_scale.c:997
AVFilter
Filter definition.
Definition: avfilter.h:171
av_opt_eval_flags
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
parse_yuv_type
static const int * parse_yuv_type(const char *s, enum AVColorSpace colorspace)
Definition: vf_scale.c:380
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:427
sws_getColorspaceDetails
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
if LIBSWSCALE_VERSION_MAJOR > 6
Definition: utils.c:1058
ff_scale_adjust_dimensions
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
sws_init_context
av_warn_unused_result int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
Definition: utils.c:1292
VAR_S2R_MAIN_T
@ VAR_S2R_MAIN_T
Definition: vf_scale.c:95
ScaleContext::out_color_matrix
char * out_color_matrix
Definition: vf_scale.c:138
scale_eval_dimensions
static int scale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale.c:404
var_names
static const char *const var_names[]
Definition: vf_scale.c:44
AVFrame::height
int height
Definition: frame.h:397
VAR_S2R_MAIN_DAR
@ VAR_S2R_MAIN_DAR
Definition: vf_scale.c:91
scale_options
static const AVOption scale_options[]
Definition: vf_scale.c:934
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2381
AVRational::den
int den
Denominator.
Definition: rational.h:60
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:530
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_scale.c:331
ScaleContext::force_original_aspect_ratio
int force_original_aspect_ratio
Definition: vf_scale.c:149
avfilter_vf_scale2ref_inputs
static const AVFilterPad avfilter_vf_scale2ref_inputs[]
Definition: vf_scale.c:1018
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
VAR_OW
@ VAR_OW
Definition: vf_scale.c:75
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:280
desc
const char * desc
Definition: libsvtav1.c:83
VAR_VSUB
@ VAR_VSUB
Definition: vf_scale.c:81
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
sws_getCoefficients
const int * sws_getCoefficients(int colorspace)
Return a pointer to yuv<->rgb coefficients for the given colorspace suitable for sws_setColorspaceDet...
Definition: yuv2rgb.c:62
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ScaleContext::interlaced
int interlaced
Definition: vf_scale.c:127
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:79
VAR_N
@ VAR_N
Definition: vf_scale.c:84
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
scale_class
static const AVClass scale_class
Definition: vf_scale.c:980
ScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale.c:129
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_scale.c:101
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
AVDictionaryEntry::value
char * value
Definition: dict.h:81
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
VAR_OHSUB
@ VAR_OHSUB
Definition: vf_scale.c:82
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:527
int
int
Definition: ffmpeg_filter.c:153
SwsContext
Definition: swscale_internal.h:298
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
ff_vf_scale
const AVFilter ff_vf_scale
Definition: vf_scale.c:1005
snprintf
#define snprintf
Definition: snprintf.h:34
ScaleContext::size_str
char * size_str
Definition: vf_scale.c:119
VAR_S2R_MAIN_VSUB
@ VAR_S2R_MAIN_VSUB
Definition: vf_scale.c:93
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
swscale.h
ScaleContext::h
int h
Definition: vf_scale.c:118
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2582
ScaleContext::in_v_chr_pos
int in_v_chr_pos
Definition: vf_scale.c:147
SwsContext::param
double param[2]
Input parameters for scaling algorithms that need them.
Definition: swscale_internal.h:341