FFmpeg
vf_lensfun.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 by Andrew Zabolotny (author of lensfun, from which this filter derives from)
3  * Copyright (C) 2018 Stephen Seo
4  *
5  * This file is part of FFmpeg.
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 3 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program. If not, see <https://www.gnu.org/licenses/>.
19  */
20 
21 /**
22  * @file
23  * Lensfun filter, applies lens correction with parameters from the lensfun database
24  *
25  * @see https://lensfun.sourceforge.net/
26  */
27 
28 #include <float.h>
29 #include <math.h>
30 
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libswscale/swscale.h"
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #include <lensfun.h>
41 
42 #define LANCZOS_RESOLUTION 256
43 
44 enum Mode {
45  VIGNETTING = 0x1,
48 };
49 
54 };
55 
56 typedef struct VignettingThreadData {
57  int width, height;
61  lfModifier *modifier;
63 
65  int width, height;
66  const float *distortion_coords;
67  const uint8_t *data_in;
70  const float *interpolation;
71  int mode;
74 
75 typedef struct LensfunContext {
76  const AVClass *class;
77  const char *make, *model, *lens_model;
78  int mode;
79  float focal_length;
80  float aperture;
82  float scale;
84  int reverse;
86 
88  float *interpolation;
89 
90  lfLens *lens;
91  lfCamera *camera;
92  lfModifier *modifier;
94 
95 #define OFFSET(x) offsetof(LensfunContext, x)
96 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
97 static const AVOption lensfun_options[] = {
98  { "make", "set camera maker", OFFSET(make), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
99  { "model", "set camera model", OFFSET(model), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
100  { "lens_model", "set lens model", OFFSET(lens_model), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
101  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=GEOMETRY_DISTORTION}, 0, VIGNETTING | GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION, FLAGS, "mode" },
102  { "vignetting", "fix lens vignetting", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING}, 0, 0, FLAGS, "mode" },
103  { "geometry", "correct geometry distortion", 0, AV_OPT_TYPE_CONST, {.i64=GEOMETRY_DISTORTION}, 0, 0, FLAGS, "mode" },
104  { "subpixel", "fix chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
105  { "vig_geo", "fix lens vignetting and correct geometry distortion", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | GEOMETRY_DISTORTION}, 0, 0, FLAGS, "mode" },
106  { "vig_subpixel", "fix lens vignetting and chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
107  { "distortion", "correct geometry distortion and chromatic aberrations", 0, AV_OPT_TYPE_CONST, {.i64=GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
108  { "all", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VIGNETTING | GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION}, 0, 0, FLAGS, "mode" },
109  { "focal_length", "focal length of video (zoom; constant for the duration of the use of this filter)", OFFSET(focal_length), AV_OPT_TYPE_FLOAT, {.dbl=18}, 0.0, DBL_MAX, FLAGS },
110  { "aperture", "aperture (constant for the duration of the use of this filter)", OFFSET(aperture), AV_OPT_TYPE_FLOAT, {.dbl=3.5}, 0.0, DBL_MAX, FLAGS },
111  { "focus_distance", "focus distance (constant for the duration of the use of this filter)", OFFSET(focus_distance), AV_OPT_TYPE_FLOAT, {.dbl=1000.0f}, 0.0, DBL_MAX, FLAGS },
112  { "scale", "scale factor applied after corrections (0.0 means automatic scaling)", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, DBL_MAX, FLAGS },
113  { "target_geometry", "target geometry of the lens correction (only when geometry correction is enabled)", OFFSET(target_geometry), AV_OPT_TYPE_INT, {.i64=LF_RECTILINEAR}, 0, INT_MAX, FLAGS, "lens_geometry" },
114  { "rectilinear", "rectilinear lens (default)", 0, AV_OPT_TYPE_CONST, {.i64=LF_RECTILINEAR}, 0, 0, FLAGS, "lens_geometry" },
115  { "fisheye", "fisheye lens", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE}, 0, 0, FLAGS, "lens_geometry" },
116  { "panoramic", "panoramic (cylindrical)", 0, AV_OPT_TYPE_CONST, {.i64=LF_PANORAMIC}, 0, 0, FLAGS, "lens_geometry" },
117  { "equirectangular", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=LF_EQUIRECTANGULAR}, 0, 0, FLAGS, "lens_geometry" },
118  { "fisheye_orthographic", "orthographic fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_ORTHOGRAPHIC}, 0, 0, FLAGS, "lens_geometry" },
119  { "fisheye_stereographic", "stereographic fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_STEREOGRAPHIC}, 0, 0, FLAGS, "lens_geometry" },
120  { "fisheye_equisolid", "equisolid fisheye", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_EQUISOLID}, 0, 0, FLAGS, "lens_geometry" },
121  { "fisheye_thoby", "fisheye as measured by thoby", 0, AV_OPT_TYPE_CONST, {.i64=LF_FISHEYE_THOBY}, 0, 0, FLAGS, "lens_geometry" },
122  { "reverse", "Does reverse correction (regular image to lens distorted)", OFFSET(reverse), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
123  { "interpolation", "Type of interpolation", OFFSET(interpolation_type), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, LANCZOS, FLAGS, "interpolation" },
124  { "nearest", NULL, 0, AV_OPT_TYPE_CONST, {.i64=NEAREST}, 0, 0, FLAGS, "interpolation" },
125  { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "interpolation" },
126  { "lanczos", NULL, 0, AV_OPT_TYPE_CONST, {.i64=LANCZOS}, 0, 0, FLAGS, "interpolation" },
127  { NULL }
128 };
129 
130 AVFILTER_DEFINE_CLASS(lensfun);
131 
133 {
134  LensfunContext *lensfun = ctx->priv;
135  lfDatabase *db;
136  const lfCamera **cameras;
137  const lfLens **lenses;
138 
139  db = lf_db_create();
140  if (lf_db_load(db) != LF_NO_ERROR) {
141  lf_db_destroy(db);
142  av_log(ctx, AV_LOG_FATAL, "Failed to load lensfun database\n");
143  return AVERROR_INVALIDDATA;
144  }
145 
146  if (!lensfun->make || !lensfun->model) {
147  const lfCamera *const *cameras = lf_db_get_cameras(db);
148 
149  av_log(ctx, AV_LOG_FATAL, "Option \"make\" or option \"model\" not specified\n");
150  av_log(ctx, AV_LOG_INFO, "Available values for \"make\" and \"model\":\n");
151  for (int i = 0; cameras && cameras[i]; i++)
152  av_log(ctx, AV_LOG_INFO, "\t%s\t%s\n", cameras[i]->Maker, cameras[i]->Model);
153  lf_db_destroy(db);
154  return AVERROR(EINVAL);
155  } else if (!lensfun->lens_model) {
156  const lfLens *const *lenses = lf_db_get_lenses(db);
157 
158  av_log(ctx, AV_LOG_FATAL, "Option \"lens_model\" not specified\n");
159  av_log(ctx, AV_LOG_INFO, "Available values for \"lens_model\":\n");
160  for (int i = 0; lenses && lenses[i]; i++)
161  av_log(ctx, AV_LOG_INFO, "\t%s\t(make %s)\n", lenses[i]->Model, lenses[i]->Maker);
162  lf_db_destroy(db);
163  return AVERROR(EINVAL);
164  }
165 
166  lensfun->lens = lf_lens_create();
167  lensfun->camera = lf_camera_create();
168 
169  cameras = lf_db_find_cameras(db, lensfun->make, lensfun->model);
170  if (cameras && *cameras) {
171  lf_camera_copy(lensfun->camera, *cameras);
172  av_log(ctx, AV_LOG_INFO, "Using camera %s\n", lensfun->camera->Model);
173  } else {
174  lf_free(cameras);
175  lf_db_destroy(db);
176  av_log(ctx, AV_LOG_FATAL, "Failed to find camera in lensfun database\n");
177  return AVERROR_INVALIDDATA;
178  }
179  lf_free(cameras);
180 
181  lenses = lf_db_find_lenses(db, lensfun->camera, NULL, lensfun->lens_model, 0);
182  if (lenses && *lenses) {
183  lf_lens_copy(lensfun->lens, *lenses);
184  av_log(ctx, AV_LOG_INFO, "Using lens %s\n", lensfun->lens->Model);
185  } else {
186  lf_free(lenses);
187  lf_db_destroy(db);
188  av_log(ctx, AV_LOG_FATAL, "Failed to find lens in lensfun database\n");
189  return AVERROR_INVALIDDATA;
190  }
191  lf_free(lenses);
192 
193  lf_db_destroy(db);
194  return 0;
195 }
196 
198 {
199  // Some of the functions provided by lensfun require pixels in RGB format
200  static const enum AVPixelFormat fmts[] = {AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE};
201  AVFilterFormats *fmts_list = ff_make_format_list(fmts);
202  return ff_set_common_formats(ctx, fmts_list);
203 }
204 
205 static float lanczos_kernel(float x)
206 {
207  if (x == 0.0f) {
208  return 1.0f;
209  } else if (x > -2.0f && x < 2.0f) {
210  return (2.0f * sin(M_PI * x) * sin(M_PI / 2.0f * x)) / (M_PI * M_PI * x * x);
211  } else {
212  return 0.0f;
213  }
214 }
215 
217 {
218  AVFilterContext *ctx = inlink->dst;
219  LensfunContext *lensfun = ctx->priv;
220  int index;
221  float a;
222 
223  if (!lensfun->modifier) {
224  if (lensfun->camera && lensfun->lens) {
225  lensfun->modifier = lf_modifier_create(lensfun->lens,
226  lensfun->focal_length,
227  lensfun->camera->CropFactor,
228  inlink->w,
229  inlink->h, LF_PF_U8, lensfun->reverse);
230  if (lensfun->mode & VIGNETTING)
231  lf_modifier_enable_vignetting_correction(lensfun->modifier, lensfun->aperture, lensfun->focus_distance);
232  if (lensfun->mode & GEOMETRY_DISTORTION) {
233  lf_modifier_enable_distortion_correction(lensfun->modifier);
234  lf_modifier_enable_projection_transform(lensfun->modifier, lensfun->target_geometry);
235  lf_modifier_enable_scaling(lensfun->modifier, lensfun->scale);
236  }
237  if (lensfun->mode & SUBPIXEL_DISTORTION)
238  lf_modifier_enable_tca_correction(lensfun->modifier);
239  } else {
240  // lensfun->camera and lensfun->lens should have been initialized
241  return AVERROR_BUG;
242  }
243  }
244 
245  if (!lensfun->distortion_coords) {
246  if (lensfun->mode & SUBPIXEL_DISTORTION) {
247  lensfun->distortion_coords = av_malloc_array(inlink->w * inlink->h, sizeof(float) * 2 * 3);
248  if (!lensfun->distortion_coords)
249  return AVERROR(ENOMEM);
250  if (lensfun->mode & GEOMETRY_DISTORTION) {
251  // apply both geometry and subpixel distortion
252  lf_modifier_apply_subpixel_geometry_distortion(lensfun->modifier,
253  0, 0,
254  inlink->w, inlink->h,
255  lensfun->distortion_coords);
256  } else {
257  // apply only subpixel distortion
258  lf_modifier_apply_subpixel_distortion(lensfun->modifier,
259  0, 0,
260  inlink->w, inlink->h,
261  lensfun->distortion_coords);
262  }
263  } else if (lensfun->mode & GEOMETRY_DISTORTION) {
264  lensfun->distortion_coords = av_malloc_array(inlink->w * inlink->h, sizeof(float) * 2);
265  if (!lensfun->distortion_coords)
266  return AVERROR(ENOMEM);
267  // apply only geometry distortion
268  lf_modifier_apply_geometry_distortion(lensfun->modifier,
269  0, 0,
270  inlink->w, inlink->h,
271  lensfun->distortion_coords);
272  }
273  }
274 
275  if (!lensfun->interpolation)
276  if (lensfun->interpolation_type == LANCZOS) {
277  lensfun->interpolation = av_malloc_array(LANCZOS_RESOLUTION, sizeof(float) * 4);
278  if (!lensfun->interpolation)
279  return AVERROR(ENOMEM);
280  for (index = 0; index < 4 * LANCZOS_RESOLUTION; ++index) {
281  if (index == 0) {
282  lensfun->interpolation[index] = 1.0f;
283  } else {
284  a = sqrtf((float)index / LANCZOS_RESOLUTION);
285  lensfun->interpolation[index] = lanczos_kernel(a);
286  }
287  }
288  }
289 
290  return 0;
291 }
292 
293 static int vignetting_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
294 {
296  const int slice_start = thread_data->height * jobnr / nb_jobs;
297  const int slice_end = thread_data->height * (jobnr + 1) / nb_jobs;
298 
299  lf_modifier_apply_color_modification(thread_data->modifier,
300  thread_data->data_in + slice_start * thread_data->linesize_in,
301  0,
302  slice_start,
303  thread_data->width,
304  slice_end - slice_start,
305  thread_data->pixel_composition,
306  thread_data->linesize_in);
307 
308  return 0;
309 }
310 
311 static float square(float x)
312 {
313  return x * x;
314 }
315 
316 static int distortion_correction_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
317 {
319  const int slice_start = thread_data->height * jobnr / nb_jobs;
320  const int slice_end = thread_data->height * (jobnr + 1) / nb_jobs;
321 
322  int x, y, i, j, rgb_index;
323  float interpolated, new_x, new_y, d, norm;
324  int new_x_int, new_y_int;
325  for (y = slice_start; y < slice_end; ++y)
326  for (x = 0; x < thread_data->width; ++x)
327  for (rgb_index = 0; rgb_index < 3; ++rgb_index) {
328  if (thread_data->mode & SUBPIXEL_DISTORTION) {
329  // subpixel (and possibly geometry) distortion correction was applied, correct distortion
330  switch(thread_data->interpolation_type) {
331  case NEAREST:
332  new_x_int = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2] + 0.5f;
333  new_y_int = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1] + 0.5f;
334  if (new_x_int < 0 || new_x_int >= thread_data->width || new_y_int < 0 || new_y_int >= thread_data->height) {
335  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
336  } else {
337  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in];
338  }
339  break;
340  case LINEAR:
341  interpolated = 0.0f;
342  new_x = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2];
343  new_x_int = new_x;
344  new_y = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1];
345  new_y_int = new_y;
346  if (new_x_int < 0 || new_x_int + 1 >= thread_data->width || new_y_int < 0 || new_y_int + 1 >= thread_data->height) {
347  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
348  } else {
349  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] =
350  thread_data->data_in[ new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y_int + 1 - new_y)
351  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x - new_x_int) * (new_y_int + 1 - new_y)
352  + thread_data->data_in[ new_x_int * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y - new_y_int)
353  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x - new_x_int) * (new_y - new_y_int);
354  }
355  break;
356  case LANCZOS:
357  interpolated = 0.0f;
358  norm = 0.0f;
359  new_x = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2];
360  new_x_int = new_x;
361  new_y = thread_data->distortion_coords[x * 2 * 3 + y * thread_data->width * 2 * 3 + rgb_index * 2 + 1];
362  new_y_int = new_y;
363  for (j = 0; j < 4; ++j)
364  for (i = 0; i < 4; ++i) {
365  if (new_x_int + i - 2 < 0 || new_x_int + i - 2 >= thread_data->width || new_y_int + j - 2 < 0 || new_y_int + j - 2 >= thread_data->height)
366  continue;
367  d = square(new_x - (new_x_int + i - 2)) * square(new_y - (new_y_int + j - 2));
368  if (d >= 4.0f)
369  continue;
370  d = thread_data->interpolation[(int)(d * LANCZOS_RESOLUTION)];
371  norm += d;
372  interpolated += thread_data->data_in[(new_x_int + i - 2) * 3 + rgb_index + (new_y_int + j - 2) * thread_data->linesize_in] * d;
373  }
374  if (norm == 0.0f) {
375  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
376  } else {
377  interpolated /= norm;
378  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = interpolated < 0.0f ? 0.0f : interpolated > 255.0f ? 255.0f : interpolated;
379  }
380  break;
381  }
382  } else if (thread_data->mode & GEOMETRY_DISTORTION) {
383  // geometry distortion correction was applied, correct distortion
384  switch(thread_data->interpolation_type) {
385  case NEAREST:
386  new_x_int = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2] + 0.5f;
387  new_y_int = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2 + 1] + 0.5f;
388  if (new_x_int < 0 || new_x_int >= thread_data->width || new_y_int < 0 || new_y_int >= thread_data->height) {
389  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
390  } else {
391  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in];
392  }
393  break;
394  case LINEAR:
395  interpolated = 0.0f;
396  new_x = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2];
397  new_x_int = new_x;
398  new_y = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2 + 1];
399  new_y_int = new_y;
400  if (new_x_int < 0 || new_x_int + 1 >= thread_data->width || new_y_int < 0 || new_y_int + 1 >= thread_data->height) {
401  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
402  } else {
403  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] =
404  thread_data->data_in[ new_x_int * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y_int + 1 - new_y)
405  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + new_y_int * thread_data->linesize_in] * (new_x - new_x_int) * (new_y_int + 1 - new_y)
406  + thread_data->data_in[ new_x_int * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x_int + 1 - new_x) * (new_y - new_y_int)
407  + thread_data->data_in[(new_x_int + 1) * 3 + rgb_index + (new_y_int + 1) * thread_data->linesize_in] * (new_x - new_x_int) * (new_y - new_y_int);
408  }
409  break;
410  case LANCZOS:
411  interpolated = 0.0f;
412  norm = 0.0f;
413  new_x = thread_data->distortion_coords[x * 2 + y * thread_data->width * 2];
414  new_x_int = new_x;
415  new_y = thread_data->distortion_coords[x * 2 + 1 + y * thread_data->width * 2];
416  new_y_int = new_y;
417  for (j = 0; j < 4; ++j)
418  for (i = 0; i < 4; ++i) {
419  if (new_x_int + i - 2 < 0 || new_x_int + i - 2 >= thread_data->width || new_y_int + j - 2 < 0 || new_y_int + j - 2 >= thread_data->height)
420  continue;
421  d = square(new_x - (new_x_int + i - 2)) * square(new_y - (new_y_int + j - 2));
422  if (d >= 4.0f)
423  continue;
424  d = thread_data->interpolation[(int)(d * LANCZOS_RESOLUTION)];
425  norm += d;
426  interpolated += thread_data->data_in[(new_x_int + i - 2) * 3 + rgb_index + (new_y_int + j - 2) * thread_data->linesize_in] * d;
427  }
428  if (norm == 0.0f) {
429  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = 0;
430  } else {
431  interpolated /= norm;
432  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = interpolated < 0.0f ? 0.0f : interpolated > 255.0f ? 255.0f : interpolated;
433  }
434  break;
435  }
436  } else {
437  // no distortion correction was applied
438  thread_data->data_out[x * 3 + rgb_index + y * thread_data->linesize_out] = thread_data->data_in[x * 3 + rgb_index + y * thread_data->linesize_in];
439  }
440  }
441 
442  return 0;
443 }
444 
446 {
447  AVFilterContext *ctx = inlink->dst;
448  LensfunContext *lensfun = ctx->priv;
449  AVFilterLink *outlink = ctx->outputs[0];
450  AVFrame *out;
451  VignettingThreadData vignetting_thread_data;
452  DistortionCorrectionThreadData distortion_correction_thread_data;
453 
454  if (lensfun->mode & VIGNETTING) {
456 
457  vignetting_thread_data = (VignettingThreadData) {
458  .width = inlink->w,
459  .height = inlink->h,
460  .data_in = in->data[0],
461  .linesize_in = in->linesize[0],
462  .pixel_composition = LF_CR_3(RED, GREEN, BLUE),
463  .modifier = lensfun->modifier
464  };
465 
466  ctx->internal->execute(ctx,
468  &vignetting_thread_data,
469  NULL,
470  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
471  }
472 
473  if (lensfun->mode & (GEOMETRY_DISTORTION | SUBPIXEL_DISTORTION)) {
474  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
475  if (!out) {
476  av_frame_free(&in);
477  return AVERROR(ENOMEM);
478  }
479  av_frame_copy_props(out, in);
480 
481  distortion_correction_thread_data = (DistortionCorrectionThreadData) {
482  .width = inlink->w,
483  .height = inlink->h,
484  .distortion_coords = lensfun->distortion_coords,
485  .data_in = in->data[0],
486  .data_out = out->data[0],
487  .linesize_in = in->linesize[0],
488  .linesize_out = out->linesize[0],
489  .interpolation = lensfun->interpolation,
490  .mode = lensfun->mode,
491  .interpolation_type = lensfun->interpolation_type
492  };
493 
494  ctx->internal->execute(ctx,
496  &distortion_correction_thread_data,
497  NULL,
498  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
499 
500  av_frame_free(&in);
501  return ff_filter_frame(outlink, out);
502  } else {
503  return ff_filter_frame(outlink, in);
504  }
505 }
506 
508 {
509  LensfunContext *lensfun = ctx->priv;
510 
511  if (lensfun->camera)
512  lf_camera_destroy(lensfun->camera);
513  if (lensfun->lens)
514  lf_lens_destroy(lensfun->lens);
515  if (lensfun->modifier)
516  lf_modifier_destroy(lensfun->modifier);
517  av_freep(&lensfun->distortion_coords);
518  av_freep(&lensfun->interpolation);
519 }
520 
521 static const AVFilterPad lensfun_inputs[] = {
522  {
523  .name = "default",
524  .type = AVMEDIA_TYPE_VIDEO,
525  .config_props = config_props,
526  .filter_frame = filter_frame,
527  },
528  { NULL }
529 };
530 
531 static const AVFilterPad lensfun_outputs[] = {
532  {
533  .name = "default",
534  .type = AVMEDIA_TYPE_VIDEO,
535  },
536  { NULL }
537 };
538 
540  .name = "lensfun",
541  .description = NULL_IF_CONFIG_SMALL("Apply correction to an image based on info derived from the lensfun database."),
542  .priv_size = sizeof(LensfunContext),
543  .init = init,
544  .uninit = uninit,
546  .inputs = lensfun_inputs,
547  .outputs = lensfun_outputs,
548  .priv_class = &lensfun_class,
550 };
#define NULL
Definition: coverity.c:32
#define OFFSET(x)
Definition: vf_lensfun.c:95
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Definition: rpzaenc.c:55
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
AVOption.
Definition: opt.h:248
misc image utilities
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
Definition: mss12.h:40
static const AVFilterPad lensfun_outputs[]
Definition: vf_lensfun.c:531
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
Definition: rpzaenc.c:54
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
Definition: rpzaenc.c:53
static float lanczos_kernel(float x)
Definition: vf_lensfun.c:205
uint8_t
#define av_cold
Definition: attributes.h:88
const char * lens_model
Definition: vf_lensfun.c:77
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
const char * model
Definition: vf_lensfun.c:77
static int query_formats(AVFilterContext *ctx)
Definition: vf_lensfun.c:197
Mode
Frame type (Table 1a in 3GPP TS 26.101)
Definition: amrnbdata.h:39
InterpolationType
Definition: vf_lensfun.c:50
const float * distortion_coords
Definition: vf_lensfun.c:66
static uint32_t reverse(uint32_t num, int bits)
Definition: speedhqenc.c:51
external API header
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
static int distortion_correction_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lensfun.c:316
static av_cold int init(AVFilterContext *ctx)
Definition: vf_lensfun.c:132
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
lfModifier * modifier
Definition: vf_lensfun.c:92
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:115
void * priv
private data for use by the filter
Definition: avfilter.h:356
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
const char * arg
Definition: jacosubdec.c:66
simple assert() macros that are a bit more flexible than ISO C assert().
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_lensfun.c:507
static float square(float x)
Definition: vf_lensfun.c:311
int target_geometry
Definition: vf_lensfun.c:83
lfModifier * modifier
Definition: vf_lensfun.c:61
static const AVOption lensfun_options[]
Definition: vf_lensfun.c:97
AVFilter ff_vf_lensfun
Definition: vf_lensfun.c:539
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:801
#define FFMIN(a, b)
Definition: common.h:105
int interpolation_type
Definition: vf_lensfun.c:85
static int config_props(AVFilterLink *inlink)
Definition: vf_lensfun.c:216
float focal_length
Definition: vf_lensfun.c:79
AVFILTER_DEFINE_CLASS(lensfun)
AVFormatContext * ctx
Definition: movenc.c:48
float aperture
Definition: vf_lensfun.c:80
float * distortion_coords
Definition: vf_lensfun.c:87
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
static int vignetting_filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_lensfun.c:293
const char * make
Definition: vf_lensfun.c:77
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
lfCamera * camera
Definition: vf_lensfun.c:91
int index
Definition: gxfenc.c:89
#define FLAGS
Definition: vf_lensfun.c:96
#define LANCZOS_RESOLUTION
Definition: vf_lensfun.c:42
const char * name
Filter name.
Definition: avfilter.h:149
static const AVFilterPad lensfun_inputs[]
Definition: vf_lensfun.c:521
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
float * interpolation
Definition: vf_lensfun.c:88
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:381
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
lfLens * lens
Definition: vf_lensfun.c:90
avfilter_execute_func * execute
Definition: internal.h:136
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
A list of supported formats for one end of a filter link.
Definition: formats.h:65
An instance of a filter.
Definition: avfilter.h:341
float focus_distance
Definition: vf_lensfun.c:81
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
#define av_malloc_array(a, b)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_lensfun.c:445
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
int i
Definition: input.c:407