FFmpeg
vf_scale.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Bobby Bingham
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * scale video filter
24  */
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "scale_eval.h"
33 #include "video.h"
34 #include "libavutil/avstring.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/avassert.h"
43 #include "libswscale/swscale.h"
44 
45 static const char *const var_names[] = {
46  "in_w", "iw",
47  "in_h", "ih",
48  "out_w", "ow",
49  "out_h", "oh",
50  "a",
51  "sar",
52  "dar",
53  "hsub",
54  "vsub",
55  "ohsub",
56  "ovsub",
57  "n",
58  "t",
59  "pos",
60  "main_w",
61  "main_h",
62  "main_a",
63  "main_sar",
64  "main_dar", "mdar",
65  "main_hsub",
66  "main_vsub",
67  "main_n",
68  "main_t",
69  "main_pos",
70  NULL
71 };
72 
73 enum var_name {
99 };
100 
101 enum EvalMode {
105 };
106 
107 typedef struct ScaleContext {
108  const AVClass *class;
109  struct SwsContext *sws; ///< software scaler context
110  struct SwsContext *isws[2]; ///< software scaler context for interlaced material
112 
113  /**
114  * New dimensions. Special values are:
115  * 0 = original width/height
116  * -1 = keep original aspect
117  * -N = try to keep aspect but make sure it is divisible by N
118  */
119  int w, h;
120  char *size_str;
121  unsigned int flags; ///sws flags
122  double param[2]; // sws params
123 
124  int hsub, vsub; ///< chroma subsampling
125  int slice_y; ///< top of current output slice
126  int input_is_pal; ///< set to 1 if the input format is paletted
127  int output_is_pal; ///< set to 1 if the output format is paletted
129 
130  char *w_expr; ///< width expression string
131  char *h_expr; ///< height expression string
135 
136  char *flags_str;
137 
140 
141  int in_range;
143 
148 
151 
153 
154  int eval_mode; ///< expression evaluation mode
155 
156 } ScaleContext;
157 
159 
160 static int config_props(AVFilterLink *outlink);
161 
163 {
164  ScaleContext *scale = ctx->priv;
165  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
166 
167  if (!scale->w_pexpr && !scale->h_pexpr)
168  return AVERROR(EINVAL);
169 
170  if (scale->w_pexpr)
171  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
172  if (scale->h_pexpr)
173  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
174 
175  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
176  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
177  return AVERROR(EINVAL);
178  }
179 
180  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
181  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
182  return AVERROR(EINVAL);
183  }
184 
185  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
186  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
187  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
188  }
189 
190  if (ctx->filter != &ff_vf_scale2ref &&
191  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
192  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
193  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
194  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
195  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
196  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
197  vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] ||
198  vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] ||
199  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
200  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
201  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
202  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n");
203  return AVERROR(EINVAL);
204  }
205 
206  if (scale->eval_mode == EVAL_MODE_INIT &&
207  (vars_w[VAR_N] || vars_h[VAR_N] ||
208  vars_w[VAR_T] || vars_h[VAR_T] ||
209  vars_w[VAR_POS] || vars_h[VAR_POS] ||
210  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
211  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
212  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
213  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
214  return AVERROR(EINVAL);
215  }
216 
217  return 0;
218 }
219 
220 static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
221 {
222  ScaleContext *scale = ctx->priv;
223  int ret, is_inited = 0;
224  char *old_str_expr = NULL;
225  AVExpr *old_pexpr = NULL;
226 
227  if (str_expr) {
228  old_str_expr = av_strdup(str_expr);
229  if (!old_str_expr)
230  return AVERROR(ENOMEM);
231  av_opt_set(scale, var, args, 0);
232  }
233 
234  if (*pexpr_ptr) {
235  old_pexpr = *pexpr_ptr;
236  *pexpr_ptr = NULL;
237  is_inited = 1;
238  }
239 
240  ret = av_expr_parse(pexpr_ptr, args, var_names,
241  NULL, NULL, NULL, NULL, 0, ctx);
242  if (ret < 0) {
243  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args);
244  goto revert;
245  }
246 
247  ret = check_exprs(ctx);
248  if (ret < 0)
249  goto revert;
250 
251  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
252  goto revert;
253 
254  av_expr_free(old_pexpr);
255  old_pexpr = NULL;
256  av_freep(&old_str_expr);
257 
258  return 0;
259 
260 revert:
261  av_expr_free(*pexpr_ptr);
262  *pexpr_ptr = NULL;
263  if (old_str_expr) {
264  av_opt_set(scale, var, old_str_expr, 0);
265  av_free(old_str_expr);
266  }
267  if (old_pexpr)
268  *pexpr_ptr = old_pexpr;
269 
270  return ret;
271 }
272 
274 {
275  ScaleContext *scale = ctx->priv;
276  int ret;
277 
278  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
279  av_log(ctx, AV_LOG_ERROR,
280  "Size and width/height expressions cannot be set at the same time.\n");
281  return AVERROR(EINVAL);
282  }
283 
284  if (scale->w_expr && !scale->h_expr)
285  FFSWAP(char *, scale->w_expr, scale->size_str);
286 
287  if (scale->size_str) {
288  char buf[32];
289  if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
290  av_log(ctx, AV_LOG_ERROR,
291  "Invalid size '%s'\n", scale->size_str);
292  return ret;
293  }
294  snprintf(buf, sizeof(buf)-1, "%d", scale->w);
295  av_opt_set(scale, "w", buf, 0);
296  snprintf(buf, sizeof(buf)-1, "%d", scale->h);
297  av_opt_set(scale, "h", buf, 0);
298  }
299  if (!scale->w_expr)
300  av_opt_set(scale, "w", "iw", 0);
301  if (!scale->h_expr)
302  av_opt_set(scale, "h", "ih", 0);
303 
304  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
305  if (ret < 0)
306  return ret;
307 
308  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
309  if (ret < 0)
310  return ret;
311 
312  av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
313  scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
314 
315  scale->flags = 0;
316 
317  if (scale->flags_str) {
318  const AVClass *class = sws_get_class();
319  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
321  int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
322  if (ret < 0)
323  return ret;
324  }
325  scale->opts = *opts;
326  *opts = NULL;
327 
328  return 0;
329 }
330 
332 {
333  ScaleContext *scale = ctx->priv;
334  av_expr_free(scale->w_pexpr);
335  av_expr_free(scale->h_pexpr);
336  scale->w_pexpr = scale->h_pexpr = NULL;
337  sws_freeContext(scale->sws);
338  sws_freeContext(scale->isws[0]);
339  sws_freeContext(scale->isws[1]);
340  scale->sws = NULL;
341  av_dict_free(&scale->opts);
342 }
343 
345 {
347  enum AVPixelFormat pix_fmt;
348  int ret;
349 
350  if (ctx->inputs[0]) {
351  const AVPixFmtDescriptor *desc = NULL;
352  formats = NULL;
353  while ((desc = av_pix_fmt_desc_next(desc))) {
354  pix_fmt = av_pix_fmt_desc_get_id(desc);
355  if ((sws_isSupportedInput(pix_fmt) ||
357  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
358  return ret;
359  }
360  }
361  if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats)) < 0)
362  return ret;
363  }
364  if (ctx->outputs[0]) {
365  const AVPixFmtDescriptor *desc = NULL;
366  formats = NULL;
367  while ((desc = av_pix_fmt_desc_next(desc))) {
368  pix_fmt = av_pix_fmt_desc_get_id(desc);
369  if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
371  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
372  return ret;
373  }
374  }
375  if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats)) < 0)
376  return ret;
377  }
378 
379  return 0;
380 }
381 
382 static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
383 {
384  if (!s)
385  s = "bt601";
386 
387  if (s && strstr(s, "bt709")) {
388  colorspace = AVCOL_SPC_BT709;
389  } else if (s && strstr(s, "fcc")) {
390  colorspace = AVCOL_SPC_FCC;
391  } else if (s && strstr(s, "smpte240m")) {
392  colorspace = AVCOL_SPC_SMPTE240M;
393  } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
394  colorspace = AVCOL_SPC_BT470BG;
395  } else if (s && strstr(s, "bt2020")) {
396  colorspace = AVCOL_SPC_BT2020_NCL;
397  }
398 
399  if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
400  colorspace = AVCOL_SPC_BT470BG;
401  }
402 
403  return sws_getCoefficients(colorspace);
404 }
405 
407 {
408  ScaleContext *scale = ctx->priv;
409  const char scale2ref = ctx->filter == &ff_vf_scale2ref;
410  const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0];
411  const AVFilterLink *outlink = ctx->outputs[0];
413  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
414  char *expr;
415  int eval_w, eval_h;
416  int ret;
417  double res;
418  const AVPixFmtDescriptor *main_desc;
419  const AVFilterLink *main_link;
420 
421  if (scale2ref) {
422  main_link = ctx->inputs[0];
423  main_desc = av_pix_fmt_desc_get(main_link->format);
424  }
425 
426  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
427  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
428  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
429  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
430  scale->var_values[VAR_A] = (double) inlink->w / inlink->h;
431  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
432  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
433  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
434  scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
435  scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
436  scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
437  scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
438 
439  if (scale2ref) {
440  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
441  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
442  scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h;
443  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
444  (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
447  scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w;
448  scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h;
449  }
450 
451  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
452  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
453 
454  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
455  if (isnan(res)) {
456  expr = scale->h_expr;
457  ret = AVERROR(EINVAL);
458  goto fail;
459  }
460  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res;
461 
462  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
463  if (isnan(res)) {
464  expr = scale->w_expr;
465  ret = AVERROR(EINVAL);
466  goto fail;
467  }
468  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
469 
470  scale->w = eval_w;
471  scale->h = eval_h;
472 
473  return 0;
474 
475 fail:
476  av_log(ctx, AV_LOG_ERROR,
477  "Error when evaluating the expression '%s'.\n", expr);
478  return ret;
479 }
480 
481 static int config_props(AVFilterLink *outlink)
482 {
483  AVFilterContext *ctx = outlink->src;
484  AVFilterLink *inlink0 = outlink->src->inputs[0];
485  AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
486  outlink->src->inputs[1] :
487  outlink->src->inputs[0];
488  enum AVPixelFormat outfmt = outlink->format;
490  ScaleContext *scale = ctx->priv;
491  int ret;
492 
493  if ((ret = scale_eval_dimensions(ctx)) < 0)
494  goto fail;
495 
496  ff_scale_adjust_dimensions(inlink, &scale->w, &scale->h,
498  scale->force_divisible_by);
499 
500  if (scale->w > INT_MAX ||
501  scale->h > INT_MAX ||
502  (scale->h * inlink->w) > INT_MAX ||
503  (scale->w * inlink->h) > INT_MAX)
504  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
505 
506  outlink->w = scale->w;
507  outlink->h = scale->h;
508 
509  /* TODO: make algorithm configurable */
510 
511  scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL;
512  if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
515 
516  if (scale->sws)
517  sws_freeContext(scale->sws);
518  if (scale->isws[0])
519  sws_freeContext(scale->isws[0]);
520  if (scale->isws[1])
521  sws_freeContext(scale->isws[1]);
522  scale->isws[0] = scale->isws[1] = scale->sws = NULL;
523  if (inlink0->w == outlink->w &&
524  inlink0->h == outlink->h &&
525  !scale->out_color_matrix &&
526  scale->in_range == scale->out_range &&
527  inlink0->format == outlink->format)
528  ;
529  else {
530  struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
531  int i;
532 
533  for (i = 0; i < 3; i++) {
534  int in_v_chr_pos = scale->in_v_chr_pos, out_v_chr_pos = scale->out_v_chr_pos;
535  struct SwsContext **s = swscs[i];
536  *s = sws_alloc_context();
537  if (!*s)
538  return AVERROR(ENOMEM);
539 
540  av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
541  av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
542  av_opt_set_int(*s, "src_format", inlink0->format, 0);
543  av_opt_set_int(*s, "dstw", outlink->w, 0);
544  av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
545  av_opt_set_int(*s, "dst_format", outfmt, 0);
546  av_opt_set_int(*s, "sws_flags", scale->flags, 0);
547  av_opt_set_int(*s, "param0", scale->param[0], 0);
548  av_opt_set_int(*s, "param1", scale->param[1], 0);
549  if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
550  av_opt_set_int(*s, "src_range",
551  scale->in_range == AVCOL_RANGE_JPEG, 0);
552  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
553  av_opt_set_int(*s, "dst_range",
554  scale->out_range == AVCOL_RANGE_JPEG, 0);
555 
556  if (scale->opts) {
557  AVDictionaryEntry *e = NULL;
558  while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
559  if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
560  return ret;
561  }
562  }
563  /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
564  * MPEG-2 chroma positions are used by convention
565  * XXX: support other 4:2:0 pixel formats */
566  if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
567  in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
568  }
569 
570  if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
571  out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
572  }
573 
574  av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
575  av_opt_set_int(*s, "src_v_chr_pos", in_v_chr_pos, 0);
576  av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
577  av_opt_set_int(*s, "dst_v_chr_pos", out_v_chr_pos, 0);
578 
579  if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
580  return ret;
581  if (!scale->interlaced)
582  break;
583  }
584  }
585 
586  if (inlink0->sample_aspect_ratio.num){
587  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink0->w, outlink->w * inlink0->h}, inlink0->sample_aspect_ratio);
588  } else
589  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
590 
591  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
592  inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
594  outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
595  outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
596  scale->flags);
597  return 0;
598 
599 fail:
600  return ret;
601 }
602 
603 static int config_props_ref(AVFilterLink *outlink)
604 {
605  AVFilterLink *inlink = outlink->src->inputs[1];
606 
607  outlink->w = inlink->w;
608  outlink->h = inlink->h;
609  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
610  outlink->time_base = inlink->time_base;
611  outlink->frame_rate = inlink->frame_rate;
612 
613  return 0;
614 }
615 
616 static int request_frame(AVFilterLink *outlink)
617 {
618  return ff_request_frame(outlink->src->inputs[0]);
619 }
620 
621 static int request_frame_ref(AVFilterLink *outlink)
622 {
623  return ff_request_frame(outlink->src->inputs[1]);
624 }
625 
626 static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
627 {
628  ScaleContext *scale = link->dst->priv;
629  const uint8_t *in[4];
630  uint8_t *out[4];
631  int in_stride[4],out_stride[4];
632  int i;
633 
634  for (i=0; i<4; i++) {
635  int vsub= ((i+1)&2) ? scale->vsub : 0;
636  in_stride[i] = cur_pic->linesize[i] * mul;
637  out_stride[i] = out_buf->linesize[i] * mul;
638  in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
639  out[i] = out_buf->data[i] + field * out_buf->linesize[i];
640  }
641  if (scale->input_is_pal)
642  in[1] = cur_pic->data[1];
643  if (scale->output_is_pal)
644  out[1] = out_buf->data[1];
645 
646  return sws_scale(sws, in, in_stride, y/mul, h,
647  out,out_stride);
648 }
649 
650 static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
651 {
652  AVFilterContext *ctx = link->dst;
653  ScaleContext *scale = ctx->priv;
654  AVFilterLink *outlink = ctx->outputs[0];
655  AVFrame *out;
657  char buf[32];
658  int in_range;
659  int frame_changed;
660 
661  *frame_out = NULL;
662  if (in->colorspace == AVCOL_SPC_YCGCO)
663  av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
664 
665  frame_changed = in->width != link->w ||
666  in->height != link->h ||
667  in->format != link->format ||
670 
671  if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) {
672  int ret;
673  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
674 
675  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
676  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
677 
678  if (scale->eval_mode == EVAL_MODE_FRAME &&
679  !frame_changed &&
680  ctx->filter != &ff_vf_scale2ref &&
681  !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) &&
682  !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) &&
683  scale->w && scale->h)
684  goto scale;
685 
686  if (scale->eval_mode == EVAL_MODE_INIT) {
687  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
688  av_opt_set(scale, "w", buf, 0);
689  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
690  av_opt_set(scale, "h", buf, 0);
691 
692  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
693  if (ret < 0)
694  return ret;
695 
696  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
697  if (ret < 0)
698  return ret;
699  }
700 
701  if (ctx->filter == &ff_vf_scale2ref) {
702  scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
703  scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
704  scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
705  } else {
706  scale->var_values[VAR_N] = link->frame_count_out;
707  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
708  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
709  }
710 
711  link->dst->inputs[0]->format = in->format;
712  link->dst->inputs[0]->w = in->width;
713  link->dst->inputs[0]->h = in->height;
714 
717 
718  if ((ret = config_props(outlink)) < 0)
719  return ret;
720  }
721 
722 scale:
723  if (!scale->sws) {
724  *frame_out = in;
725  return 0;
726  }
727 
728  scale->hsub = desc->log2_chroma_w;
729  scale->vsub = desc->log2_chroma_h;
730 
731  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
732  if (!out) {
733  av_frame_free(&in);
734  return AVERROR(ENOMEM);
735  }
736  *frame_out = out;
737 
738  av_frame_copy_props(out, in);
739  out->width = outlink->w;
740  out->height = outlink->h;
741 
742  if (scale->output_is_pal)
743  avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
744 
745  in_range = in->color_range;
746 
747  if ( scale->in_color_matrix
748  || scale->out_color_matrix
749  || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
750  || in_range != AVCOL_RANGE_UNSPECIFIED
751  || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
752  int in_full, out_full, brightness, contrast, saturation;
753  const int *inv_table, *table;
754 
755  sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
756  (int **)&table, &out_full,
757  &brightness, &contrast, &saturation);
758 
759  if (scale->in_color_matrix)
760  inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
761  if (scale->out_color_matrix)
763  else if (scale->in_color_matrix)
764  table = inv_table;
765 
766  if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
767  in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
768  else if (in_range != AVCOL_RANGE_UNSPECIFIED)
769  in_full = (in_range == AVCOL_RANGE_JPEG);
770  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
771  out_full = (scale->out_range == AVCOL_RANGE_JPEG);
772 
773  sws_setColorspaceDetails(scale->sws, inv_table, in_full,
774  table, out_full,
775  brightness, contrast, saturation);
776  if (scale->isws[0])
777  sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
778  table, out_full,
779  brightness, contrast, saturation);
780  if (scale->isws[1])
781  sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
782  table, out_full,
783  brightness, contrast, saturation);
784 
785  out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
786  }
787 
789  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
790  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
791  INT_MAX);
792 
793  if (scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)) {
794  scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
795  scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
796  } else if (scale->nb_slices) {
797  int i, slice_h, slice_start, slice_end = 0;
798  const int nb_slices = FFMIN(scale->nb_slices, link->h);
799  for (i = 0; i < nb_slices; i++) {
800  slice_start = slice_end;
801  slice_end = (link->h * (i+1)) / nb_slices;
802  slice_h = slice_end - slice_start;
803  scale_slice(link, out, in, scale->sws, slice_start, slice_h, 1, 0);
804  }
805  } else {
806  scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
807  }
808 
809  av_frame_free(&in);
810  return 0;
811 }
812 
814 {
815  AVFilterContext *ctx = link->dst;
816  AVFilterLink *outlink = ctx->outputs[0];
817  AVFrame *out;
818  int ret;
819 
820  ret = scale_frame(link, in, &out);
821  if (out)
822  return ff_filter_frame(outlink, out);
823 
824  return ret;
825 }
826 
828 {
829  ScaleContext *scale = link->dst->priv;
830  AVFilterLink *outlink = link->dst->outputs[1];
831  int frame_changed;
832 
833  frame_changed = in->width != link->w ||
834  in->height != link->h ||
835  in->format != link->format ||
838 
839  if (frame_changed) {
840  link->format = in->format;
841  link->w = in->width;
842  link->h = in->height;
845 
846  config_props_ref(outlink);
847  }
848 
849  if (scale->eval_mode == EVAL_MODE_FRAME) {
850  scale->var_values[VAR_N] = link->frame_count_out;
851  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
852  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
853  }
854 
855  return ff_filter_frame(outlink, in);
856 }
857 
858 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
859  char *res, int res_len, int flags)
860 {
861  ScaleContext *scale = ctx->priv;
862  char *str_expr;
863  AVExpr **pexpr_ptr;
864  int ret, w, h;
865 
866  w = !strcmp(cmd, "width") || !strcmp(cmd, "w");
867  h = !strcmp(cmd, "height") || !strcmp(cmd, "h");
868 
869  if (w || h) {
870  str_expr = w ? scale->w_expr : scale->h_expr;
871  pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr;
872 
873  ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args);
874  } else
875  ret = AVERROR(ENOSYS);
876 
877  if (ret < 0)
878  av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n");
879 
880  return ret;
881 }
882 
883 #if FF_API_CHILD_CLASS_NEXT
884 static const AVClass *child_class_next(const AVClass *prev)
885 {
886  return prev ? NULL : sws_get_class();
887 }
888 #endif
889 
890 static const AVClass *child_class_iterate(void **iter)
891 {
892  const AVClass *c = *iter ? NULL : sws_get_class();
893  *iter = (void*)(uintptr_t)c;
894  return c;
895 }
896 
897 #define OFFSET(x) offsetof(ScaleContext, x)
898 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
899 #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
900 
901 static const AVOption scale_options[] = {
902  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
903  { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
904  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
905  { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
906  { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
907  { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
908  { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
909  { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
910  { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS, "color" },
911  { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS, "color"},
912  { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .str = "auto" }, 0, 0, FLAGS, "color" },
913  { "bt601", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt601" }, 0, 0, FLAGS, "color" },
914  { "bt470", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt470" }, 0, 0, FLAGS, "color" },
915  { "smpte170m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte170m" }, 0, 0, FLAGS, "color" },
916  { "bt709", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt709" }, 0, 0, FLAGS, "color" },
917  { "fcc", NULL, 0, AV_OPT_TYPE_CONST, { .str = "fcc" }, 0, 0, FLAGS, "color" },
918  { "smpte240m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte240m" }, 0, 0, FLAGS, "color" },
919  { "bt2020", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt2020" }, 0, 0, FLAGS, "color" },
920  { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
921  { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
922  { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
923  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
924  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
925  { "limited",NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
926  { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
927  { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
928  { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
929  { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
930  { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
931  { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
932  { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
933  { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
934  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
935  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
936  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
937  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
938  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
939  { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
940  { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
941  { "nb_slices", "set the number of slices (debug purpose only)", OFFSET(nb_slices), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
942  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
943  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
944  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
945  { NULL }
946 };
947 
948 static const AVClass scale_class = {
949  .class_name = "scale",
950  .item_name = av_default_item_name,
951  .option = scale_options,
952  .version = LIBAVUTIL_VERSION_INT,
953  .category = AV_CLASS_CATEGORY_FILTER,
954 #if FF_API_CHILD_CLASS_NEXT
955  .child_class_next = child_class_next,
956 #endif
957  .child_class_iterate = child_class_iterate,
958 };
959 
961  {
962  .name = "default",
963  .type = AVMEDIA_TYPE_VIDEO,
964  .filter_frame = filter_frame,
965  },
966  { NULL }
967 };
968 
970  {
971  .name = "default",
972  .type = AVMEDIA_TYPE_VIDEO,
973  .config_props = config_props,
974  },
975  { NULL }
976 };
977 
979  .name = "scale",
980  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
981  .init_dict = init_dict,
982  .uninit = uninit,
983  .query_formats = query_formats,
984  .priv_size = sizeof(ScaleContext),
985  .priv_class = &scale_class,
986  .inputs = avfilter_vf_scale_inputs,
987  .outputs = avfilter_vf_scale_outputs,
989 };
990 
991 static const AVClass scale2ref_class = {
992  .class_name = "scale2ref",
993  .item_name = av_default_item_name,
994  .option = scale_options,
995  .version = LIBAVUTIL_VERSION_INT,
996  .category = AV_CLASS_CATEGORY_FILTER,
997 #if FF_API_CHILD_CLASS_NEXT
998  .child_class_next = child_class_next,
999 #endif
1000  .child_class_iterate = child_class_iterate,
1001 };
1002 
1004  {
1005  .name = "default",
1006  .type = AVMEDIA_TYPE_VIDEO,
1007  .filter_frame = filter_frame,
1008  },
1009  {
1010  .name = "ref",
1011  .type = AVMEDIA_TYPE_VIDEO,
1012  .filter_frame = filter_frame_ref,
1013  },
1014  { NULL }
1015 };
1016 
1018  {
1019  .name = "default",
1020  .type = AVMEDIA_TYPE_VIDEO,
1021  .config_props = config_props,
1022  .request_frame= request_frame,
1023  },
1024  {
1025  .name = "ref",
1026  .type = AVMEDIA_TYPE_VIDEO,
1027  .config_props = config_props_ref,
1028  .request_frame= request_frame_ref,
1029  },
1030  { NULL }
1031 };
1032 
1033 AVFilter ff_vf_scale2ref = {
1034  .name = "scale2ref",
1035  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
1036  .init_dict = init_dict,
1037  .uninit = uninit,
1038  .query_formats = query_formats,
1039  .priv_size = sizeof(ScaleContext),
1040  .priv_class = &scale2ref_class,
1041  .inputs = avfilter_vf_scale2ref_inputs,
1042  .outputs = avfilter_vf_scale2ref_outputs,
1044 };
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
#define NULL
Definition: coverity.c:32
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
Definition: vf_scale.c:273
static enum AVPixelFormat pix_fmt
#define FLAGS
Definition: vf_scale.c:898
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
int in_h_chr_pos
Definition: vf_scale.c:146
AVOption.
Definition: opt.h:248
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:148
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:579
const char * desc
Definition: libsvtav1.c:79
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
AVExpr * w_pexpr
Definition: vf_scale.c:132
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
Definition: utils.c:1002
#define TS2T(ts, tb)
Definition: internal.h:209
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
Definition: vf_scale.c:86
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
Definition: vf_scale.c:626
int num
Numerator.
Definition: rational.h:59
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
int in_v_chr_pos
Definition: vf_scale.c:147
int out_h_chr_pos
Definition: vf_scale.c:144
int eval_mode
expression evaluation mode
Definition: vf_scale.c:154
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)
Definition: utils.c:289
int force_original_aspect_ratio
Definition: vf_scale.c:149
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
Definition: vf_scale.c:85
int vsub
chroma subsampling
Definition: vf_scale.c:124
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int output_is_pal
set to 1 if the output format is paletted
Definition: vf_scale.c:127
functionally identical to above
Definition: pixfmt.h:520
const char * name
Pad name.
Definition: internal.h:60
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:347
char * w_expr
width expression string
Definition: vf_scale.c:130
static int query_formats(AVFilterContext *ctx)
Definition: vf_scale.c:344
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
uint8_t
#define av_cold
Definition: attributes.h:88
av_warn_unused_result int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
Definition: utils.c:1174
char * h_expr
height expression string
Definition: vf_scale.c:131
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
static const AVClass * child_class_iterate(void **iter)
Definition: vf_scale.c:890
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:521
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
Definition: eval.c:157
AVFilter ff_vf_scale2ref
Definition: vf_scale.c:158
static const int * parse_yuv_type(const char *s, enum AVColorSpace colorspace)
Definition: vf_scale.c:382
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static const AVClass scale_class
Definition: vf_scale.c:948
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:455
external API header
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
Definition: vf_scale.c:650
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
A filter pad used for either input or output.
Definition: internal.h:54
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
static const AVClass scale2ref_class
Definition: vf_scale.c:991
static const AVOption scale_options[]
Definition: vf_scale.c:901
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define sws_isSupportedOutput(x)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
void * priv
private data for use by the filter
Definition: avfilter.h:354
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:552
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:443
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:563
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:333
#define fail()
Definition: checkasm.h:123
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_scale.c:858
static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale.c:220
static float mul(float src0, float src1)
int w
New dimensions.
Definition: vf_scale.c:119
static int config_props(AVFilterLink *outlink)
Definition: vf_scale.c:481
var_name
Definition: aeval.c:46
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1660
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
char * out_color_matrix
Definition: vf_scale.c:139
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
Definition: pixdesc.c:2592
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_scale.c:331
#define NAN
Definition: mathematics.h:64
static const AVFilterPad avfilter_vf_scale_outputs[]
Definition: vf_scale.c:969
#define FFMIN(a, b)
Definition: common.h:96
double param[2]
sws flags
Definition: vf_scale.c:122
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale.c:616
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:523
AVFormatContext * ctx
Definition: movenc.c:48
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2332
#define s(width, name)
Definition: cbs_vp9.c:257
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale.c:162
int interlaced
Definition: vf_scale.c:128
const int * sws_getCoefficients(int colorspace)
Return a pointer to yuv<->rgb coefficients for the given colorspace suitable for sws_setColorspaceDet...
Definition: yuv2rgb.c:63
#define sws_isSupportedInput(x)
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
Definition: utils.c:867
char * flags_str
Definition: vf_scale.c:136
static int scale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale.c:406
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
EvalMode
Definition: af_volume.h:39
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:538
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale.c:603
static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:827
AVExpr * h_pexpr
Definition: vf_scale.c:133
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
#define OFFSET(x)
Definition: vf_scale.c:897
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVFilter ff_vf_scale
Definition: vf_scale.c:978
#define TFLAGS
Definition: vf_scale.c:899
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
struct SwsContext * sws
software scaler context
Definition: vf_scale.c:109
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale.c:621
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static const AVFilterPad avfilter_vf_scale_inputs[]
Definition: vf_scale.c:960
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:396
static const AVFilterPad avfilter_vf_scale2ref_outputs[]
Definition: vf_scale.c:1017
AVDictionary * opts
Definition: vf_scale.c:111
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:744
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define isnan(x)
Definition: libm.h:340
const char * name
Filter name.
Definition: avfilter.h:149
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define snprintf
Definition: snprintf.h:34
int input_is_pal
set to 1 if the input format is paletted
Definition: vf_scale.c:126
static const char *const var_names[]
Definition: vf_scale.c:45
unsigned int flags
Definition: vf_scale.c:121
misc parsing utilities
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
Definition: utils.c:1088
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:351
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
int out_v_chr_pos
Definition: vf_scale.c:145
struct SwsContext * isws[2]
software scaler context for interlaced material
Definition: vf_scale.c:110
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:813
int in_range
Definition: vf_scale.c:141
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:537
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
double var_values[VARS_NB]
Definition: vf_scale.c:134
int out_range
Definition: vf_scale.c:142
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poi...
Definition: opt.h:569
char * size_str
Definition: vf_scale.c:120
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
#define SWS_PARAM_DEFAULT
Definition: swscale.h:73
int slice_y
top of current output slice
Definition: vf_scale.c:125
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:756
Definition: vf_scale.c:78
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2039
#define FF_PSEUDOPAL
Definition: internal.h:335
#define av_free(p)
char * value
Definition: dict.h:87
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
char * in_color_matrix
Definition: vf_scale.c:138
int nb_slices
Definition: vf_scale.c:152
A list of supported formats for one end of a filter link.
Definition: formats.h:65
An instance of a filter.
Definition: avfilter.h:339
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int height
Definition: frame.h:366
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
#define FFSWAP(type, a, b)
Definition: common.h:99
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
internal API functions
int force_divisible_by
Definition: vf_scale.c:150
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:342
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:671
int i
Definition: input.c:407
static const AVFilterPad avfilter_vf_scale2ref_inputs[]
Definition: vf_scale.c:1003
simple arithmetic expression evaluator
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
Definition: pixdesc.c:2580