FFmpeg
vf_scale.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Bobby Bingham
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * scale video filter
24  */
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "scale_eval.h"
33 #include "video.h"
34 #include "libavutil/avstring.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/imgutils.h"
42 #include "libavutil/avassert.h"
43 #include "libswscale/swscale.h"
44 
45 static const char *const var_names[] = {
46  "in_w", "iw",
47  "in_h", "ih",
48  "out_w", "ow",
49  "out_h", "oh",
50  "a",
51  "sar",
52  "dar",
53  "hsub",
54  "vsub",
55  "ohsub",
56  "ovsub",
57  "n",
58  "t",
59  "pos",
60  "main_w",
61  "main_h",
62  "main_a",
63  "main_sar",
64  "main_dar", "mdar",
65  "main_hsub",
66  "main_vsub",
67  "main_n",
68  "main_t",
69  "main_pos",
70  NULL
71 };
72 
73 enum var_name {
99 };
100 
101 enum EvalMode {
105 };
106 
107 typedef struct ScaleContext {
108  const AVClass *class;
109  struct SwsContext *sws; ///< software scaler context
110  struct SwsContext *isws[2]; ///< software scaler context for interlaced material
112 
113  /**
114  * New dimensions. Special values are:
115  * 0 = original width/height
116  * -1 = keep original aspect
117  * -N = try to keep aspect but make sure it is divisible by N
118  */
119  int w, h;
120  char *size_str;
121  unsigned int flags; ///sws flags
122  double param[2]; // sws params
123 
124  int hsub, vsub; ///< chroma subsampling
125  int slice_y; ///< top of current output slice
126  int input_is_pal; ///< set to 1 if the input format is paletted
127  int output_is_pal; ///< set to 1 if the output format is paletted
129 
130  char *w_expr; ///< width expression string
131  char *h_expr; ///< height expression string
135 
136  char *flags_str;
137 
140 
141  int in_range;
143 
148 
151 
153 
154  int eval_mode; ///< expression evaluation mode
155 
156 } ScaleContext;
157 
159 
160 static int config_props(AVFilterLink *outlink);
161 
163 {
164  ScaleContext *scale = ctx->priv;
165  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
166 
167  if (!scale->w_pexpr && !scale->h_pexpr)
168  return AVERROR(EINVAL);
169 
170  if (scale->w_pexpr)
171  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
172  if (scale->h_pexpr)
173  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
174 
175  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
176  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
177  return AVERROR(EINVAL);
178  }
179 
180  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
181  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
182  return AVERROR(EINVAL);
183  }
184 
185  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
186  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
187  av_log(ctx, AV_LOG_ERROR, "Circular expressions invalid for width '%s' and height '%s'.\n", scale->w_expr, scale->h_expr);
188  return AVERROR(EINVAL);
189  }
190 
191  if (ctx->filter != &ff_vf_scale2ref &&
192  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
193  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
194  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
195  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
196  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
197  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
198  vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] ||
199  vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] ||
200  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
201  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
202  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
203  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n");
204  return AVERROR(EINVAL);
205  }
206 
207  if (scale->eval_mode == EVAL_MODE_INIT &&
208  (vars_w[VAR_N] || vars_h[VAR_N] ||
209  vars_w[VAR_T] || vars_h[VAR_T] ||
210  vars_w[VAR_POS] || vars_h[VAR_POS] ||
211  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
212  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
213  vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
214  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
215  return AVERROR(EINVAL);
216  }
217 
218  return 0;
219 }
220 
221 static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
222 {
223  ScaleContext *scale = ctx->priv;
224  int ret, is_inited = 0;
225  char *old_str_expr = NULL;
226  AVExpr *old_pexpr = NULL;
227 
228  if (str_expr) {
229  old_str_expr = av_strdup(str_expr);
230  if (!old_str_expr)
231  return AVERROR(ENOMEM);
232  av_opt_set(scale, var, args, 0);
233  }
234 
235  if (*pexpr_ptr) {
236  old_pexpr = *pexpr_ptr;
237  *pexpr_ptr = NULL;
238  is_inited = 1;
239  }
240 
241  ret = av_expr_parse(pexpr_ptr, args, var_names,
242  NULL, NULL, NULL, NULL, 0, ctx);
243  if (ret < 0) {
244  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args);
245  goto revert;
246  }
247 
248  ret = check_exprs(ctx);
249  if (ret < 0)
250  goto revert;
251 
252  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
253  goto revert;
254 
255  av_expr_free(old_pexpr);
256  old_pexpr = NULL;
257  av_freep(&old_str_expr);
258 
259  return 0;
260 
261 revert:
262  av_expr_free(*pexpr_ptr);
263  *pexpr_ptr = NULL;
264  if (old_str_expr) {
265  av_opt_set(scale, var, old_str_expr, 0);
266  av_free(old_str_expr);
267  }
268  if (old_pexpr)
269  *pexpr_ptr = old_pexpr;
270 
271  return ret;
272 }
273 
275 {
276  ScaleContext *scale = ctx->priv;
277  int ret;
278 
279  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
280  av_log(ctx, AV_LOG_ERROR,
281  "Size and width/height expressions cannot be set at the same time.\n");
282  return AVERROR(EINVAL);
283  }
284 
285  if (scale->w_expr && !scale->h_expr)
286  FFSWAP(char *, scale->w_expr, scale->size_str);
287 
288  if (scale->size_str) {
289  char buf[32];
290  if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
291  av_log(ctx, AV_LOG_ERROR,
292  "Invalid size '%s'\n", scale->size_str);
293  return ret;
294  }
295  snprintf(buf, sizeof(buf)-1, "%d", scale->w);
296  av_opt_set(scale, "w", buf, 0);
297  snprintf(buf, sizeof(buf)-1, "%d", scale->h);
298  av_opt_set(scale, "h", buf, 0);
299  }
300  if (!scale->w_expr)
301  av_opt_set(scale, "w", "iw", 0);
302  if (!scale->h_expr)
303  av_opt_set(scale, "h", "ih", 0);
304 
305  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
306  if (ret < 0)
307  return ret;
308 
309  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
310  if (ret < 0)
311  return ret;
312 
313  av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
314  scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
315 
316  scale->flags = 0;
317 
318  if (scale->flags_str) {
319  const AVClass *class = sws_get_class();
320  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
322  int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
323  if (ret < 0)
324  return ret;
325  }
326  scale->opts = *opts;
327  *opts = NULL;
328 
329  return 0;
330 }
331 
333 {
334  ScaleContext *scale = ctx->priv;
335  av_expr_free(scale->w_pexpr);
336  av_expr_free(scale->h_pexpr);
337  scale->w_pexpr = scale->h_pexpr = NULL;
338  sws_freeContext(scale->sws);
339  sws_freeContext(scale->isws[0]);
340  sws_freeContext(scale->isws[1]);
341  scale->sws = NULL;
342  av_dict_free(&scale->opts);
343 }
344 
346 {
348  enum AVPixelFormat pix_fmt;
349  int ret;
350 
351  if (ctx->inputs[0]) {
352  const AVPixFmtDescriptor *desc = NULL;
353  formats = NULL;
354  while ((desc = av_pix_fmt_desc_next(desc))) {
355  pix_fmt = av_pix_fmt_desc_get_id(desc);
356  if ((sws_isSupportedInput(pix_fmt) ||
358  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
359  return ret;
360  }
361  }
362  if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->out_formats)) < 0)
363  return ret;
364  }
365  if (ctx->outputs[0]) {
366  const AVPixFmtDescriptor *desc = NULL;
367  formats = NULL;
368  while ((desc = av_pix_fmt_desc_next(desc))) {
369  pix_fmt = av_pix_fmt_desc_get_id(desc);
370  if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
372  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
373  return ret;
374  }
375  }
376  if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0)
377  return ret;
378  }
379 
380  return 0;
381 }
382 
383 static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
384 {
385  if (!s)
386  s = "bt601";
387 
388  if (s && strstr(s, "bt709")) {
389  colorspace = AVCOL_SPC_BT709;
390  } else if (s && strstr(s, "fcc")) {
391  colorspace = AVCOL_SPC_FCC;
392  } else if (s && strstr(s, "smpte240m")) {
393  colorspace = AVCOL_SPC_SMPTE240M;
394  } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
395  colorspace = AVCOL_SPC_BT470BG;
396  } else if (s && strstr(s, "bt2020")) {
397  colorspace = AVCOL_SPC_BT2020_NCL;
398  }
399 
400  if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
401  colorspace = AVCOL_SPC_BT470BG;
402  }
403 
404  return sws_getCoefficients(colorspace);
405 }
406 
408 {
409  ScaleContext *scale = ctx->priv;
410  const char scale2ref = ctx->filter == &ff_vf_scale2ref;
411  const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0];
412  const AVFilterLink *outlink = ctx->outputs[0];
414  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
415  char *expr;
416  int eval_w, eval_h;
417  int ret;
418  double res;
419  const AVPixFmtDescriptor *main_desc;
420  const AVFilterLink *main_link;
421 
422  if (scale2ref) {
423  main_link = ctx->inputs[0];
424  main_desc = av_pix_fmt_desc_get(main_link->format);
425  }
426 
427  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
428  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
429  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
430  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
431  scale->var_values[VAR_A] = (double) inlink->w / inlink->h;
432  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
433  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
434  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
435  scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
436  scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
437  scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
438  scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
439 
440  if (scale2ref) {
441  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
442  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
443  scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h;
444  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
445  (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
448  scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w;
449  scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h;
450  }
451 
452  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
453  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
454 
455  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
456  if (isnan(res)) {
457  expr = scale->h_expr;
458  ret = AVERROR(EINVAL);
459  goto fail;
460  }
461  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res;
462 
463  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
464  if (isnan(res)) {
465  expr = scale->w_expr;
466  ret = AVERROR(EINVAL);
467  goto fail;
468  }
469  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
470 
471  scale->w = eval_w;
472  scale->h = eval_h;
473 
474  return 0;
475 
476 fail:
477  av_log(ctx, AV_LOG_ERROR,
478  "Error when evaluating the expression '%s'.\n", expr);
479  return ret;
480 }
481 
482 static int config_props(AVFilterLink *outlink)
483 {
484  AVFilterContext *ctx = outlink->src;
485  AVFilterLink *inlink0 = outlink->src->inputs[0];
486  AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
487  outlink->src->inputs[1] :
488  outlink->src->inputs[0];
489  enum AVPixelFormat outfmt = outlink->format;
491  ScaleContext *scale = ctx->priv;
492  int ret;
493 
494  if ((ret = scale_eval_dimensions(ctx)) < 0)
495  goto fail;
496 
497  ff_scale_adjust_dimensions(inlink, &scale->w, &scale->h,
499  scale->force_divisible_by);
500 
501  if (scale->w > INT_MAX ||
502  scale->h > INT_MAX ||
503  (scale->h * inlink->w) > INT_MAX ||
504  (scale->w * inlink->h) > INT_MAX)
505  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
506 
507  outlink->w = scale->w;
508  outlink->h = scale->h;
509 
510  /* TODO: make algorithm configurable */
511 
512  scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL;
513  if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
516 
517  if (scale->sws)
518  sws_freeContext(scale->sws);
519  if (scale->isws[0])
520  sws_freeContext(scale->isws[0]);
521  if (scale->isws[1])
522  sws_freeContext(scale->isws[1]);
523  scale->isws[0] = scale->isws[1] = scale->sws = NULL;
524  if (inlink0->w == outlink->w &&
525  inlink0->h == outlink->h &&
526  !scale->out_color_matrix &&
527  scale->in_range == scale->out_range &&
528  inlink0->format == outlink->format)
529  ;
530  else {
531  struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
532  int i;
533 
534  for (i = 0; i < 3; i++) {
535  int in_v_chr_pos = scale->in_v_chr_pos, out_v_chr_pos = scale->out_v_chr_pos;
536  struct SwsContext **s = swscs[i];
537  *s = sws_alloc_context();
538  if (!*s)
539  return AVERROR(ENOMEM);
540 
541  av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
542  av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
543  av_opt_set_int(*s, "src_format", inlink0->format, 0);
544  av_opt_set_int(*s, "dstw", outlink->w, 0);
545  av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
546  av_opt_set_int(*s, "dst_format", outfmt, 0);
547  av_opt_set_int(*s, "sws_flags", scale->flags, 0);
548  av_opt_set_int(*s, "param0", scale->param[0], 0);
549  av_opt_set_int(*s, "param1", scale->param[1], 0);
550  if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
551  av_opt_set_int(*s, "src_range",
552  scale->in_range == AVCOL_RANGE_JPEG, 0);
553  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
554  av_opt_set_int(*s, "dst_range",
555  scale->out_range == AVCOL_RANGE_JPEG, 0);
556 
557  if (scale->opts) {
558  AVDictionaryEntry *e = NULL;
559  while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
560  if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
561  return ret;
562  }
563  }
564  /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
565  * MPEG-2 chroma positions are used by convention
566  * XXX: support other 4:2:0 pixel formats */
567  if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
568  in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
569  }
570 
571  if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
572  out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
573  }
574 
575  av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
576  av_opt_set_int(*s, "src_v_chr_pos", in_v_chr_pos, 0);
577  av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
578  av_opt_set_int(*s, "dst_v_chr_pos", out_v_chr_pos, 0);
579 
580  if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
581  return ret;
582  if (!scale->interlaced)
583  break;
584  }
585  }
586 
587  if (inlink0->sample_aspect_ratio.num){
588  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink0->w, outlink->w * inlink0->h}, inlink0->sample_aspect_ratio);
589  } else
590  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
591 
592  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
593  inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
595  outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
596  outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
597  scale->flags);
598  return 0;
599 
600 fail:
601  return ret;
602 }
603 
604 static int config_props_ref(AVFilterLink *outlink)
605 {
606  AVFilterLink *inlink = outlink->src->inputs[1];
607 
608  outlink->w = inlink->w;
609  outlink->h = inlink->h;
610  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
611  outlink->time_base = inlink->time_base;
612  outlink->frame_rate = inlink->frame_rate;
613 
614  return 0;
615 }
616 
617 static int request_frame(AVFilterLink *outlink)
618 {
619  return ff_request_frame(outlink->src->inputs[0]);
620 }
621 
622 static int request_frame_ref(AVFilterLink *outlink)
623 {
624  return ff_request_frame(outlink->src->inputs[1]);
625 }
626 
627 static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
628 {
629  ScaleContext *scale = link->dst->priv;
630  const uint8_t *in[4];
631  uint8_t *out[4];
632  int in_stride[4],out_stride[4];
633  int i;
634 
635  for (i=0; i<4; i++) {
636  int vsub= ((i+1)&2) ? scale->vsub : 0;
637  in_stride[i] = cur_pic->linesize[i] * mul;
638  out_stride[i] = out_buf->linesize[i] * mul;
639  in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
640  out[i] = out_buf->data[i] + field * out_buf->linesize[i];
641  }
642  if (scale->input_is_pal)
643  in[1] = cur_pic->data[1];
644  if (scale->output_is_pal)
645  out[1] = out_buf->data[1];
646 
647  return sws_scale(sws, in, in_stride, y/mul, h,
648  out,out_stride);
649 }
650 
651 #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
652 
653 static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
654 {
655  AVFilterContext *ctx = link->dst;
656  ScaleContext *scale = ctx->priv;
657  AVFilterLink *outlink = ctx->outputs[0];
658  AVFrame *out;
660  char buf[32];
661  int in_range;
662  int frame_changed;
663 
664  *frame_out = NULL;
665  if (in->colorspace == AVCOL_SPC_YCGCO)
666  av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
667 
668  frame_changed = in->width != link->w ||
669  in->height != link->h ||
670  in->format != link->format ||
673 
674  if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) {
675  int ret;
676  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
677 
678  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
679  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
680 
681  if (scale->eval_mode == EVAL_MODE_FRAME &&
682  !frame_changed &&
683  ctx->filter != &ff_vf_scale2ref &&
684  !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) &&
685  !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) &&
686  scale->w && scale->h)
687  goto scale;
688 
689  if (scale->eval_mode == EVAL_MODE_INIT) {
690  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
691  av_opt_set(scale, "w", buf, 0);
692  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
693  av_opt_set(scale, "h", buf, 0);
694 
695  ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
696  if (ret < 0)
697  return ret;
698 
699  ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
700  if (ret < 0)
701  return ret;
702  }
703 
704  if (ctx->filter == &ff_vf_scale2ref) {
705  scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
706  scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
707  scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
708  } else {
709  scale->var_values[VAR_N] = link->frame_count_out;
710  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
711  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
712  }
713 
714  link->dst->inputs[0]->format = in->format;
715  link->dst->inputs[0]->w = in->width;
716  link->dst->inputs[0]->h = in->height;
717 
720 
721  if ((ret = config_props(outlink)) < 0)
722  return ret;
723  }
724 
725 scale:
726  if (!scale->sws) {
727  *frame_out = in;
728  return 0;
729  }
730 
731  scale->hsub = desc->log2_chroma_w;
732  scale->vsub = desc->log2_chroma_h;
733 
734  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
735  if (!out) {
736  av_frame_free(&in);
737  return AVERROR(ENOMEM);
738  }
739  *frame_out = out;
740 
741  av_frame_copy_props(out, in);
742  out->width = outlink->w;
743  out->height = outlink->h;
744 
745  if (scale->output_is_pal)
746  avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
747 
748  in_range = in->color_range;
749 
750  if ( scale->in_color_matrix
751  || scale->out_color_matrix
752  || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
753  || in_range != AVCOL_RANGE_UNSPECIFIED
754  || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
755  int in_full, out_full, brightness, contrast, saturation;
756  const int *inv_table, *table;
757 
758  sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
759  (int **)&table, &out_full,
760  &brightness, &contrast, &saturation);
761 
762  if (scale->in_color_matrix)
763  inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
764  if (scale->out_color_matrix)
766  else if (scale->in_color_matrix)
767  table = inv_table;
768 
769  if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
770  in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
771  else if (in_range != AVCOL_RANGE_UNSPECIFIED)
772  in_full = (in_range == AVCOL_RANGE_JPEG);
773  if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
774  out_full = (scale->out_range == AVCOL_RANGE_JPEG);
775 
776  sws_setColorspaceDetails(scale->sws, inv_table, in_full,
777  table, out_full,
778  brightness, contrast, saturation);
779  if (scale->isws[0])
780  sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
781  table, out_full,
782  brightness, contrast, saturation);
783  if (scale->isws[1])
784  sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
785  table, out_full,
786  brightness, contrast, saturation);
787 
788  out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
789  }
790 
792  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
793  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
794  INT_MAX);
795 
796  if (scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)) {
797  scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
798  scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
799  } else if (scale->nb_slices) {
800  int i, slice_h, slice_start, slice_end = 0;
801  const int nb_slices = FFMIN(scale->nb_slices, link->h);
802  for (i = 0; i < nb_slices; i++) {
803  slice_start = slice_end;
804  slice_end = (link->h * (i+1)) / nb_slices;
805  slice_h = slice_end - slice_start;
806  scale_slice(link, out, in, scale->sws, slice_start, slice_h, 1, 0);
807  }
808  } else {
809  scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
810  }
811 
812  av_frame_free(&in);
813  return 0;
814 }
815 
817 {
818  AVFilterContext *ctx = link->dst;
819  AVFilterLink *outlink = ctx->outputs[0];
820  AVFrame *out;
821  int ret;
822 
823  ret = scale_frame(link, in, &out);
824  if (out)
825  return ff_filter_frame(outlink, out);
826 
827  return ret;
828 }
829 
831 {
832  ScaleContext *scale = link->dst->priv;
833  AVFilterLink *outlink = link->dst->outputs[1];
834  int frame_changed;
835 
836  frame_changed = in->width != link->w ||
837  in->height != link->h ||
838  in->format != link->format ||
841 
842  if (frame_changed) {
843  link->format = in->format;
844  link->w = in->width;
845  link->h = in->height;
848 
849  config_props_ref(outlink);
850  }
851 
852  if (scale->eval_mode == EVAL_MODE_FRAME) {
853  scale->var_values[VAR_N] = link->frame_count_out;
854  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
855  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
856  }
857 
858  return ff_filter_frame(outlink, in);
859 }
860 
861 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
862  char *res, int res_len, int flags)
863 {
864  ScaleContext *scale = ctx->priv;
865  char *str_expr;
866  AVExpr **pexpr_ptr;
867  int ret, w, h;
868 
869  w = !strcmp(cmd, "width") || !strcmp(cmd, "w");
870  h = !strcmp(cmd, "height") || !strcmp(cmd, "h");
871 
872  if (w || h) {
873  str_expr = w ? scale->w_expr : scale->h_expr;
874  pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr;
875 
876  ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args);
877  } else
878  ret = AVERROR(ENOSYS);
879 
880  if (ret < 0)
881  av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n");
882 
883  return ret;
884 }
885 
886 static const AVClass *child_class_next(const AVClass *prev)
887 {
888  return prev ? NULL : sws_get_class();
889 }
890 
891 #define OFFSET(x) offsetof(ScaleContext, x)
892 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
893 #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
894 
895 static const AVOption scale_options[] = {
896  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
897  { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
898  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
899  { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
900  { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
901  { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
902  { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
903  { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
904  { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS, "color" },
905  { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS, "color"},
906  { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .str = "auto" }, 0, 0, FLAGS, "color" },
907  { "bt601", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt601" }, 0, 0, FLAGS, "color" },
908  { "bt470", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt470" }, 0, 0, FLAGS, "color" },
909  { "smpte170m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte170m" }, 0, 0, FLAGS, "color" },
910  { "bt709", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt709" }, 0, 0, FLAGS, "color" },
911  { "fcc", NULL, 0, AV_OPT_TYPE_CONST, { .str = "fcc" }, 0, 0, FLAGS, "color" },
912  { "smpte240m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte240m" }, 0, 0, FLAGS, "color" },
913  { "bt2020", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt2020" }, 0, 0, FLAGS, "color" },
914  { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
915  { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
916  { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
917  { "unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
918  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
919  { "limited",NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
920  { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
921  { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
922  { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
923  { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
924  { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
925  { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
926  { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
927  { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
928  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
929  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
930  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
931  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
932  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
933  { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
934  { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
935  { "nb_slices", "set the number of slices (debug purpose only)", OFFSET(nb_slices), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
936  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
937  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
938  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
939  { NULL }
940 };
941 
942 static const AVClass scale_class = {
943  .class_name = "scale",
944  .item_name = av_default_item_name,
945  .option = scale_options,
946  .version = LIBAVUTIL_VERSION_INT,
947  .category = AV_CLASS_CATEGORY_FILTER,
948  .child_class_next = child_class_next,
949 };
950 
952  {
953  .name = "default",
954  .type = AVMEDIA_TYPE_VIDEO,
955  .filter_frame = filter_frame,
956  },
957  { NULL }
958 };
959 
961  {
962  .name = "default",
963  .type = AVMEDIA_TYPE_VIDEO,
964  .config_props = config_props,
965  },
966  { NULL }
967 };
968 
970  .name = "scale",
971  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
972  .init_dict = init_dict,
973  .uninit = uninit,
974  .query_formats = query_formats,
975  .priv_size = sizeof(ScaleContext),
976  .priv_class = &scale_class,
977  .inputs = avfilter_vf_scale_inputs,
978  .outputs = avfilter_vf_scale_outputs,
980 };
981 
982 static const AVClass scale2ref_class = {
983  .class_name = "scale2ref",
984  .item_name = av_default_item_name,
985  .option = scale_options,
986  .version = LIBAVUTIL_VERSION_INT,
987  .category = AV_CLASS_CATEGORY_FILTER,
988  .child_class_next = child_class_next,
989 };
990 
992  {
993  .name = "default",
994  .type = AVMEDIA_TYPE_VIDEO,
995  .filter_frame = filter_frame,
996  },
997  {
998  .name = "ref",
999  .type = AVMEDIA_TYPE_VIDEO,
1000  .filter_frame = filter_frame_ref,
1001  },
1002  { NULL }
1003 };
1004 
1006  {
1007  .name = "default",
1008  .type = AVMEDIA_TYPE_VIDEO,
1009  .config_props = config_props,
1010  .request_frame= request_frame,
1011  },
1012  {
1013  .name = "ref",
1014  .type = AVMEDIA_TYPE_VIDEO,
1015  .config_props = config_props_ref,
1016  .request_frame= request_frame_ref,
1017  },
1018  { NULL }
1019 };
1020 
1021 AVFilter ff_vf_scale2ref = {
1022  .name = "scale2ref",
1023  .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
1024  .init_dict = init_dict,
1025  .uninit = uninit,
1026  .query_formats = query_formats,
1027  .priv_size = sizeof(ScaleContext),
1028  .priv_class = &scale2ref_class,
1029  .inputs = avfilter_vf_scale2ref_inputs,
1030  .outputs = avfilter_vf_scale2ref_outputs,
1032 };
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:499
#define NULL
Definition: coverity.c:32
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
Definition: vf_scale.c:274
static enum AVPixelFormat pix_fmt
#define FLAGS
Definition: vf_scale.c:892
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int in_h_chr_pos
Definition: vf_scale.c:146
AVOption.
Definition: opt.h:246
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:148
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
AVExpr * w_pexpr
Definition: vf_scale.c:132
const char * desc
Definition: nvenc.c:68
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
Definition: utils.c:993
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:503
Definition: vf_scale.c:86
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
Definition: vf_scale.c:627
int num
Numerator.
Definition: rational.h:59
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
int in_v_chr_pos
Definition: vf_scale.c:147
int out_h_chr_pos
Definition: vf_scale.c:144
int eval_mode
expression evaluation mode
Definition: vf_scale.c:154
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt)
Definition: utils.c:283
int force_original_aspect_ratio
Definition: vf_scale.c:149
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
Definition: vf_scale.c:85
int vsub
chroma subsampling
Definition: vf_scale.c:124
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int output_is_pal
set to 1 if the output format is paletted
Definition: vf_scale.c:127
functionally identical to above
Definition: pixfmt.h:505
const char * name
Pad name.
Definition: internal.h:60
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
char * w_expr
width expression string
Definition: vf_scale.c:130
static int query_formats(AVFilterContext *ctx)
Definition: vf_scale.c:345
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
uint8_t
#define av_cold
Definition: attributes.h:82
av_warn_unused_result int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
Definition: utils.c:1165
char * h_expr
height expression string
Definition: vf_scale.c:131
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:497
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:506
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
AVFilter ff_vf_scale2ref
Definition: vf_scale.c:158
static const int * parse_yuv_type(const char *s, enum AVColorSpace colorspace)
Definition: vf_scale.c:383
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static const AVClass scale_class
Definition: vf_scale.c:942
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
external API header
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
Definition: vf_scale.c:653
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
A filter pad used for either input or output.
Definition: internal.h:54
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
static const AVClass scale2ref_class
Definition: vf_scale.c:982
static const AVOption scale_options[]
Definition: vf_scale.c:895
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define sws_isSupportedOutput(x)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:588
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:550
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:336
#define fail()
Definition: checkasm.h:122
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: vf_scale.c:861
static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale.c:221
int w
New dimensions.
Definition: vf_scale.c:119
static int config_props(AVFilterLink *outlink)
Definition: vf_scale.c:482
var_name
Definition: aeval.c:46
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1662
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
char * out_color_matrix
Definition: vf_scale.c:139
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
Definition: pixdesc.c:2541
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_scale.c:332
#define NAN
Definition: mathematics.h:64
static const AVFilterPad avfilter_vf_scale_outputs[]
Definition: vf_scale.c:960
#define FFMIN(a, b)
Definition: common.h:96
double param[2]
sws flags
Definition: vf_scale.c:122
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:439
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale.c:617
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:508
static const AVClass * child_class_next(const AVClass *prev)
Definition: vf_scale.c:886
AVFormatContext * ctx
Definition: movenc.c:48
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2311
#define s(width, name)
Definition: cbs_vp9.c:257
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale.c:162
int interlaced
Definition: vf_scale.c:128
const int * sws_getCoefficients(int colorspace)
Return a pointer to yuv<->rgb coefficients for the given colorspace suitable for sws_setColorspaceDet...
Definition: yuv2rgb.c:63
#define sws_isSupportedInput(x)
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
Definition: utils.c:858
char * flags_str
Definition: vf_scale.c:136
static int scale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale.c:407
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:502
EvalMode
Definition: af_volume.h:39
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:523
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale.c:604
static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:830
AVExpr * h_pexpr
Definition: vf_scale.c:133
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
#define OFFSET(x)
Definition: vf_scale.c:891
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVFilter ff_vf_scale
Definition: vf_scale.c:969
#define TFLAGS
Definition: vf_scale.c:893
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
struct SwsContext * sws
software scaler context
Definition: vf_scale.c:109
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale.c:622
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static const AVFilterPad avfilter_vf_scale_inputs[]
Definition: vf_scale.c:951
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
static const AVFilterPad avfilter_vf_scale2ref_outputs[]
Definition: vf_scale.c:1005
AVDictionary * opts
Definition: vf_scale.c:111
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:753
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define isnan(x)
Definition: libm.h:340
const char * name
Filter name.
Definition: avfilter.h:148
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define snprintf
Definition: snprintf.h:34
int input_is_pal
set to 1 if the input format is paletted
Definition: vf_scale.c:126
static const char *const var_names[]
Definition: vf_scale.c:45
unsigned int flags
Definition: vf_scale.c:121
misc parsing utilities
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
Definition: utils.c:1079
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
int out_v_chr_pos
Definition: vf_scale.c:145
struct SwsContext * isws[2]
software scaler context for interlaced material
Definition: vf_scale.c:110
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:816
int in_range
Definition: vf_scale.c:141
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:522
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
double var_values[VARS_NB]
Definition: vf_scale.c:134
int out_range
Definition: vf_scale.c:142
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poi...
Definition: opt.h:566
char * size_str
Definition: vf_scale.c:120
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
#define SWS_PARAM_DEFAULT
Definition: swscale.h:73
int slice_y
top of current output slice
Definition: vf_scale.c:125
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:756
Definition: vf_scale.c:78
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2035
#define FF_PSEUDOPAL
Definition: internal.h:367
#define av_free(p)
char * value
Definition: dict.h:87
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
char * in_color_matrix
Definition: vf_scale.c:138
int nb_slices
Definition: vf_scale.c:152
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
#define FFSWAP(type, a, b)
Definition: common.h:99
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
internal API functions
int force_divisible_by
Definition: vf_scale.c:150
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:467
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
static const AVFilterPad avfilter_vf_scale2ref_inputs[]
Definition: vf_scale.c:991
simple arithmetic expression evaluator
#define TS2T(ts, tb)
Definition: vf_scale.c:651
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
Definition: pixdesc.c:2529