FFmpeg
vf_perspective.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2013 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/imgutils.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/opt.h"
27 #include "avfilter.h"
28 #include "formats.h"
29 #include "internal.h"
30 #include "video.h"
31 
32 #define SUB_PIXEL_BITS 8
33 #define SUB_PIXELS (1 << SUB_PIXEL_BITS)
34 #define COEFF_BITS 11
35 
36 #define LINEAR 0
37 #define CUBIC 1
38 
39 typedef struct PerspectiveContext {
40  const AVClass *class;
41  char *expr_str[4][2];
42  double ref[4][2];
43  int32_t (*pv)[2];
46  int linesize[4];
47  int height[4];
48  int hsub, vsub;
49  int nb_planes;
50  int sense;
51  int eval_mode;
52 
54  void *arg, int job, int nb_jobs);
56 
57 #define OFFSET(x) offsetof(PerspectiveContext, x)
58 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
59 
61  PERSPECTIVE_SENSE_SOURCE = 0, ///< coordinates give locations in source of corners of destination.
62  PERSPECTIVE_SENSE_DESTINATION = 1, ///< coordinates give locations in destination of corners of source.
63 };
64 
65 enum EvalMode {
69 };
70 
71 static const AVOption perspective_options[] = {
72  { "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
73  { "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
74  { "x1", "set top right x coordinate", OFFSET(expr_str[1][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
75  { "y1", "set top right y coordinate", OFFSET(expr_str[1][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
76  { "x2", "set bottom left x coordinate", OFFSET(expr_str[2][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS },
77  { "y2", "set bottom left y coordinate", OFFSET(expr_str[2][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
78  { "x3", "set bottom right x coordinate", OFFSET(expr_str[3][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS },
79  { "y3", "set bottom right y coordinate", OFFSET(expr_str[3][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS },
80  { "interpolation", "set interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, 1, FLAGS, "interpolation" },
81  { "linear", "", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "interpolation" },
82  { "cubic", "", 0, AV_OPT_TYPE_CONST, {.i64=CUBIC}, 0, 0, FLAGS, "interpolation" },
83  { "sense", "specify the sense of the coordinates", OFFSET(sense), AV_OPT_TYPE_INT, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 1, FLAGS, "sense"},
84  { "source", "specify locations in source to send to corners in destination",
85  0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 0, FLAGS, "sense"},
86  { "destination", "specify locations in destination to send corners of source",
87  0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_DESTINATION}, 0, 0, FLAGS, "sense"},
88  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
89  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
90  { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
91 
92  { NULL }
93 };
94 
95 AVFILTER_DEFINE_CLASS(perspective);
96 
98 {
99  static const enum AVPixelFormat pix_fmts[] = {
104  };
105 
107 }
108 
109 static inline double get_coeff(double d)
110 {
111  double coeff, A = -0.60;
112 
113  d = fabs(d);
114 
115  if (d < 1.0)
116  coeff = (1.0 - (A + 3.0) * d * d + (A + 2.0) * d * d * d);
117  else if (d < 2.0)
118  coeff = (-4.0 * A + 8.0 * A * d - 5.0 * A * d * d + A * d * d * d);
119  else
120  coeff = 0.0;
121 
122  return coeff;
123 }
124 
125 static const char *const var_names[] = { "W", "H", "in", "on", NULL };
127 
129 {
130  PerspectiveContext *s = ctx->priv;
131  AVFilterLink *outlink = ctx->outputs[0];
132  double (*ref)[2] = s->ref;
133 
134  double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h,
135  [VAR_IN] = inlink->frame_count_out + 1,
136  [VAR_ON] = outlink->frame_count_in + 1 };
137  const int h = values[VAR_H];
138  const int w = values[VAR_W];
139  double x0, x1, x2, x3, x4, x5, x6, x7, x8, q;
140  double t0, t1, t2, t3;
141  int x, y, i, j, ret;
142 
143  for (i = 0; i < 4; i++) {
144  for (j = 0; j < 2; j++) {
145  if (!s->expr_str[i][j])
146  return AVERROR(EINVAL);
147  ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j],
148  var_names, &values[0],
149  NULL, NULL, NULL, NULL,
150  0, 0, ctx);
151  if (ret < 0)
152  return ret;
153  }
154  }
155 
156  switch (s->sense) {
158  x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
159  (ref[2][1] - ref[3][1]) -
160  ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
161  (ref[2][0] - ref[3][0])) * h;
162  x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) *
163  (ref[1][0] - ref[3][0]) -
164  ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) *
165  (ref[1][1] - ref[3][1])) * w;
166  q = ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) -
167  ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]);
168 
169  x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0];
170  x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0];
171  x2 = q * ref[0][0] * w * h;
172  x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1];
173  x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1];
174  x5 = q * ref[0][1] * w * h;
175  x8 = q * w * h;
176  break;
178  t0 = ref[0][0] * (ref[3][1] - ref[1][1]) +
179  ref[1][0] * (ref[0][1] - ref[3][1]) +
180  ref[3][0] * (ref[1][1] - ref[0][1]);
181  t1 = ref[1][0] * (ref[2][1] - ref[3][1]) +
182  ref[2][0] * (ref[3][1] - ref[1][1]) +
183  ref[3][0] * (ref[1][1] - ref[2][1]);
184  t2 = ref[0][0] * (ref[3][1] - ref[2][1]) +
185  ref[2][0] * (ref[0][1] - ref[3][1]) +
186  ref[3][0] * (ref[2][1] - ref[0][1]);
187  t3 = ref[0][0] * (ref[1][1] - ref[2][1]) +
188  ref[1][0] * (ref[2][1] - ref[0][1]) +
189  ref[2][0] * (ref[0][1] - ref[1][1]);
190 
191  x0 = t0 * t1 * w * (ref[2][1] - ref[0][1]);
192  x1 = t0 * t1 * w * (ref[0][0] - ref[2][0]);
193  x2 = t0 * t1 * w * (ref[0][1] * ref[2][0] - ref[0][0] * ref[2][1]);
194  x3 = t1 * t2 * h * (ref[1][1] - ref[0][1]);
195  x4 = t1 * t2 * h * (ref[0][0] - ref[1][0]);
196  x5 = t1 * t2 * h * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]);
197  x6 = t1 * t2 * (ref[1][1] - ref[0][1]) +
198  t0 * t3 * (ref[2][1] - ref[3][1]);
199  x7 = t1 * t2 * (ref[0][0] - ref[1][0]) +
200  t0 * t3 * (ref[3][0] - ref[2][0]);
201  x8 = t1 * t2 * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]) +
202  t0 * t3 * (ref[2][0] * ref[3][1] - ref[2][1] * ref[3][0]);
203  break;
204  default:
205  av_assert0(0);
206  }
207 
208  for (y = 0; y < h; y++){
209  for (x = 0; x < w; x++){
210  int u, v;
211 
212  u = lrint(SUB_PIXELS * (x0 * x + x1 * y + x2) /
213  (x6 * x + x7 * y + x8));
214  v = lrint(SUB_PIXELS * (x3 * x + x4 * y + x5) /
215  (x6 * x + x7 * y + x8));
216 
217  s->pv[x + y * w][0] = u;
218  s->pv[x + y * w][1] = v;
219  }
220  }
221 
222  return 0;
223 }
224 
226 {
227  AVFilterContext *ctx = inlink->dst;
228  PerspectiveContext *s = ctx->priv;
230  int h = inlink->h;
231  int w = inlink->w;
232  int i, j, ret;
233  s->hsub = desc->log2_chroma_w;
234  s->vsub = desc->log2_chroma_h;
235  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
236  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
237  return ret;
238 
239  s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
240  s->height[0] = s->height[3] = inlink->h;
241 
242  s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv));
243  if (!s->pv)
244  return AVERROR(ENOMEM);
245 
246  if (s->eval_mode == EVAL_MODE_INIT) {
247  if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
248  return ret;
249  }
250  }
251 
252  for (i = 0; i < SUB_PIXELS; i++){
253  double d = i / (double)SUB_PIXELS;
254  double temp[4];
255  double sum = 0;
256 
257  for (j = 0; j < 4; j++)
258  temp[j] = get_coeff(j - d - 1);
259 
260  for (j = 0; j < 4; j++)
261  sum += temp[j];
262 
263  for (j = 0; j < 4; j++)
264  s->coeff[i][j] = lrint((1 << COEFF_BITS) * temp[j] / sum);
265  }
266 
267  return 0;
268 }
269 
270 typedef struct ThreadData {
271  uint8_t *dst;
272  int dst_linesize;
273  uint8_t *src;
274  int src_linesize;
275  int w, h;
276  int hsub, vsub;
277 } ThreadData;
278 
280  int job, int nb_jobs)
281 {
282  PerspectiveContext *s = ctx->priv;
283  ThreadData *td = arg;
284  uint8_t *dst = td->dst;
285  int dst_linesize = td->dst_linesize;
286  uint8_t *src = td->src;
287  int src_linesize = td->src_linesize;
288  int w = td->w;
289  int h = td->h;
290  int hsub = td->hsub;
291  int vsub = td->vsub;
292  int start = (h * job) / nb_jobs;
293  int end = (h * (job+1)) / nb_jobs;
294  const int linesize = s->linesize[0];
295  int x, y;
296 
297  for (y = start; y < end; y++) {
298  int sy = y << vsub;
299  for (x = 0; x < w; x++) {
300  int u, v, subU, subV, sum, sx;
301 
302  sx = x << hsub;
303  u = s->pv[sx + sy * linesize][0] >> hsub;
304  v = s->pv[sx + sy * linesize][1] >> vsub;
305  subU = u & (SUB_PIXELS - 1);
306  subV = v & (SUB_PIXELS - 1);
307  u >>= SUB_PIXEL_BITS;
308  v >>= SUB_PIXEL_BITS;
309 
310  if (u > 0 && v > 0 && u < w - 2 && v < h - 2){
311  const int index = u + v*src_linesize;
312  const int a = s->coeff[subU][0];
313  const int b = s->coeff[subU][1];
314  const int c = s->coeff[subU][2];
315  const int d = s->coeff[subU][3];
316 
317  sum = s->coeff[subV][0] * (a * src[index - 1 - src_linesize] + b * src[index - 0 - src_linesize] +
318  c * src[index + 1 - src_linesize] + d * src[index + 2 - src_linesize]) +
319  s->coeff[subV][1] * (a * src[index - 1 ] + b * src[index - 0 ] +
320  c * src[index + 1 ] + d * src[index + 2 ]) +
321  s->coeff[subV][2] * (a * src[index - 1 + src_linesize] + b * src[index - 0 + src_linesize] +
322  c * src[index + 1 + src_linesize] + d * src[index + 2 + src_linesize]) +
323  s->coeff[subV][3] * (a * src[index - 1 + 2 * src_linesize] + b * src[index - 0 + 2 * src_linesize] +
324  c * src[index + 1 + 2 * src_linesize] + d * src[index + 2 + 2 * src_linesize]);
325  } else {
326  int dx, dy;
327 
328  sum = 0;
329 
330  for (dy = 0; dy < 4; dy++) {
331  int iy = v + dy - 1;
332 
333  if (iy < 0)
334  iy = 0;
335  else if (iy >= h)
336  iy = h-1;
337  for (dx = 0; dx < 4; dx++) {
338  int ix = u + dx - 1;
339 
340  if (ix < 0)
341  ix = 0;
342  else if (ix >= w)
343  ix = w - 1;
344 
345  sum += s->coeff[subU][dx] * s->coeff[subV][dy] * src[ ix + iy * src_linesize];
346  }
347  }
348  }
349 
350  sum = (sum + (1<<(COEFF_BITS * 2 - 1))) >> (COEFF_BITS * 2);
351  sum = av_clip_uint8(sum);
352  dst[x + y * dst_linesize] = sum;
353  }
354  }
355  return 0;
356 }
357 
359  int job, int nb_jobs)
360 {
361  PerspectiveContext *s = ctx->priv;
362  ThreadData *td = arg;
363  uint8_t *dst = td->dst;
364  int dst_linesize = td->dst_linesize;
365  uint8_t *src = td->src;
366  int src_linesize = td->src_linesize;
367  int w = td->w;
368  int h = td->h;
369  int hsub = td->hsub;
370  int vsub = td->vsub;
371  int start = (h * job) / nb_jobs;
372  int end = (h * (job+1)) / nb_jobs;
373  const int linesize = s->linesize[0];
374  int x, y;
375 
376  for (y = start; y < end; y++){
377  int sy = y << vsub;
378  for (x = 0; x < w; x++){
379  int u, v, subU, subV, sum, sx, index, subUI, subVI;
380 
381  sx = x << hsub;
382  u = s->pv[sx + sy * linesize][0] >> hsub;
383  v = s->pv[sx + sy * linesize][1] >> vsub;
384  subU = u & (SUB_PIXELS - 1);
385  subV = v & (SUB_PIXELS - 1);
386  u >>= SUB_PIXEL_BITS;
387  v >>= SUB_PIXEL_BITS;
388 
389  index = u + v * src_linesize;
390  subUI = SUB_PIXELS - subU;
391  subVI = SUB_PIXELS - subV;
392 
393  if ((unsigned)u < (unsigned)(w - 1)){
394  if((unsigned)v < (unsigned)(h - 1)){
395  sum = subVI * (subUI * src[index] + subU * src[index + 1]) +
396  subV * (subUI * src[index + src_linesize] + subU * src[index + src_linesize + 1]);
397  sum = (sum + (1 << (SUB_PIXEL_BITS * 2 - 1)))>> (SUB_PIXEL_BITS * 2);
398  } else {
399  if (v < 0)
400  v = 0;
401  else
402  v = h - 1;
403  index = u + v * src_linesize;
404  sum = subUI * src[index] + subU * src[index + 1];
405  sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
406  }
407  } else {
408  if (u < 0)
409  u = 0;
410  else
411  u = w - 1;
412  if ((unsigned)v < (unsigned)(h - 1)){
413  index = u + v * src_linesize;
414  sum = subVI * src[index] + subV * src[index + src_linesize];
415  sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS;
416  } else {
417  if (v < 0)
418  v = 0;
419  else
420  v = h - 1;
421  index = u + v * src_linesize;
422  sum = src[index];
423  }
424  }
425 
426  sum = av_clip_uint8(sum);
427  dst[x + y * dst_linesize] = sum;
428  }
429  }
430  return 0;
431 }
432 
434 {
435  PerspectiveContext *s = ctx->priv;
436 
437  switch (s->interpolation) {
438  case LINEAR: s->perspective = resample_linear; break;
439  case CUBIC: s->perspective = resample_cubic; break;
440  }
441 
442  return 0;
443 }
444 
446 {
447  AVFilterContext *ctx = inlink->dst;
448  AVFilterLink *outlink = ctx->outputs[0];
449  PerspectiveContext *s = ctx->priv;
450  AVFrame *out;
451  int plane;
452  int ret;
453 
454  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
455  if (!out) {
457  return AVERROR(ENOMEM);
458  }
460 
461  if (s->eval_mode == EVAL_MODE_FRAME) {
462  if ((ret = calc_persp_luts(ctx, inlink)) < 0) {
463  av_frame_free(&out);
464  return ret;
465  }
466  }
467 
468  for (plane = 0; plane < s->nb_planes; plane++) {
469  int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
470  int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
471  ThreadData td = {.dst = out->data[plane],
472  .dst_linesize = out->linesize[plane],
473  .src = frame->data[plane],
474  .src_linesize = frame->linesize[plane],
475  .w = s->linesize[plane],
476  .h = s->height[plane],
477  .hsub = hsub,
478  .vsub = vsub };
479  ff_filter_execute(ctx, s->perspective, &td, NULL,
481  }
482 
484  return ff_filter_frame(outlink, out);
485 }
486 
488 {
489  PerspectiveContext *s = ctx->priv;
490 
491  av_freep(&s->pv);
492 }
493 
494 static const AVFilterPad perspective_inputs[] = {
495  {
496  .name = "default",
497  .type = AVMEDIA_TYPE_VIDEO,
498  .filter_frame = filter_frame,
499  .config_props = config_input,
500  },
501 };
502 
504  {
505  .name = "default",
506  .type = AVMEDIA_TYPE_VIDEO,
507  },
508 };
509 
511  .name = "perspective",
512  .description = NULL_IF_CONFIG_SMALL("Correct the perspective of video."),
513  .priv_size = sizeof(PerspectiveContext),
514  .init = init,
515  .uninit = uninit,
519  .priv_class = &perspective_class,
521 };
get_coeff
static double get_coeff(double d)
Definition: vf_perspective.c:109
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
PerspectiveContext::sense
int sense
Definition: vf_perspective.c:50
ThreadData::hsub
int hsub
Definition: vf_perspective.c:276
PerspectiveContext::coeff
int32_t coeff[SUB_PIXELS][4]
Definition: vf_perspective.c:44
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
PerspectiveContext::height
int height[4]
Definition: vf_perspective.c:47
PerspectiveContext::expr_str
char * expr_str[4][2]
Definition: vf_perspective.c:41
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
VAR_VARS_NB
@ VAR_VARS_NB
Definition: vf_perspective.c:126
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
PerspectiveContext::interpolation
int interpolation
Definition: vf_perspective.c:45
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
PerspectiveContext::perspective
int(* perspective)(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:53
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
pixdesc.h
index
fg index
Definition: ffmpeg_filter.c:168
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_perspective.c:67
w
uint8_t w
Definition: llviddspenc.c:38
calc_persp_luts
static int calc_persp_luts(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: vf_perspective.c:128
AVOption
AVOption.
Definition: opt.h:247
t0
#define t0
Definition: regdef.h:28
b
#define b
Definition: input.c:40
PerspectiveContext::nb_planes
int nb_planes
Definition: vf_perspective.c:49
PERSPECTIVESense
PERSPECTIVESense
Definition: vf_perspective.c:60
t1
#define t1
Definition: regdef.h:29
ThreadData::w
int w
Definition: vf_blend.c:59
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
VAR_IN
@ VAR_IN
Definition: vf_perspective.c:126
video.h
PERSPECTIVE_SENSE_SOURCE
@ PERSPECTIVE_SENSE_SOURCE
coordinates give locations in source of corners of destination.
Definition: vf_perspective.c:61
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:74
formats.h
A
#define A(x)
Definition: vp56_arith.h:28
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2580
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
PerspectiveContext::ref
double ref[4][2]
Definition: vf_perspective.c:42
COEFF_BITS
#define COEFF_BITS
Definition: vf_perspective.c:34
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_perspective.c:68
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
av_cold
#define av_cold
Definition: attributes.h:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_perspective.c:433
SUB_PIXEL_BITS
#define SUB_PIXEL_BITS
Definition: vf_perspective.c:32
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
resample_linear
static int resample_linear(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:358
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
PerspectiveContext::linesize
int linesize[4]
Definition: vf_perspective.c:46
LINEAR
#define LINEAR
Definition: vf_perspective.c:36
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
ThreadData::src
uint8_t * src
Definition: vf_perspective.c:273
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
ThreadData::h
int h
Definition: vf_blend.c:59
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
VAR_W
@ VAR_W
Definition: vf_perspective.c:126
arg
const char * arg
Definition: jacosubdec.c:67
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:33
perspective_options
static const AVOption perspective_options[]
Definition: vf_perspective.c:71
perspective_inputs
static const AVFilterPad perspective_inputs[]
Definition: vf_perspective.c:494
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
VAR_H
@ VAR_H
Definition: vf_perspective.c:126
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
eval.h
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:776
OFFSET
#define OFFSET(x)
Definition: vf_perspective.c:57
ff_vf_perspective
const AVFilter ff_vf_perspective
Definition: vf_perspective.c:510
VAR_ON
@ VAR_ON
Definition: vf_perspective.c:126
perspective_outputs
static const AVFilterPad perspective_outputs[]
Definition: vf_perspective.c:503
PERSPECTIVE_SENSE_DESTINATION
@ PERSPECTIVE_SENSE_DESTINATION
coordinates give locations in destination of corners of source.
Definition: vf_perspective.c:62
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(perspective)
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
interpolation
static int interpolation(DeclickChannel *c, const double *src, int ar_order, double *acoefficients, int *index, int nb_errors, double *auxiliary, double *interpolated)
Definition: af_adeclick.c:355
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:130
i
int i
Definition: input.c:406
t3
#define t3
Definition: regdef.h:31
ThreadData::dst
uint8_t * dst
Definition: vf_perspective.c:271
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:804
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
EvalMode
EvalMode
Definition: af_volume.h:39
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_perspective.c:225
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_perspective.c:487
AVFilter
Filter definition.
Definition: avfilter.h:149
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
PerspectiveContext
Definition: vf_perspective.c:39
ThreadData::vsub
int vsub
Definition: vf_perspective.c:276
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_perspective.c:66
PerspectiveContext::vsub
int vsub
Definition: vf_perspective.c:48
ThreadData::src_linesize
int src_linesize
Definition: vf_bm3d.c:56
resample_cubic
static int resample_cubic(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
Definition: vf_perspective.c:279
t2
#define t2
Definition: regdef.h:30
PerspectiveContext::hsub
int hsub
Definition: vf_perspective.c:48
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
var_names
static const char *const var_names[]
Definition: vf_perspective.c:125
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_perspective.c:97
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
temp
else temp
Definition: vf_mcdeint.c:256
SUB_PIXELS
#define SUB_PIXELS
Definition: vf_perspective.c:33
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ThreadData::dst_linesize
int dst_linesize
Definition: vf_colorlevels.c:96
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
PerspectiveContext::eval_mode
int eval_mode
Definition: vf_perspective.c:51
d
d
Definition: ffmpeg_filter.c:156
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_perspective.c:445
h
h
Definition: vp9dsp_template.c:2038
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
int
int
Definition: ffmpeg_filter.c:156
PerspectiveContext::pv
int32_t(* pv)[2]
Definition: vf_perspective.c:43
FLAGS
#define FLAGS
Definition: vf_perspective.c:58
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
CUBIC
#define CUBIC
Definition: vf_perspective.c:37