FFmpeg
vf_colorconstancy.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Mina Sami
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Color Constancy filter
24  *
25  * @see http://colorconstancy.com/
26  *
27  * @cite
28  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29  */
30 
31 #include "libavutil/imgutils.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #include <math.h>
41 
42 #define GREY_EDGE "greyedge"
43 
44 #define SQRT3 1.73205080757
45 
46 #define NUM_PLANES 3
47 #define MAX_DIFF_ORD 2
48 #define MAX_META_DATA 4
49 #define MAX_DATA 4
50 
51 #define INDEX_TEMP 0
52 #define INDEX_DX 1
53 #define INDEX_DY 2
54 #define INDEX_DXY 3
55 #define INDEX_NORM INDEX_DX
56 #define INDEX_SRC 0
57 #define INDEX_DST 1
58 #define INDEX_ORD 2
59 #define INDEX_DIR 3
60 #define DIR_X 0
61 #define DIR_Y 1
62 
63 /**
64  * Used for passing data between threads.
65  */
66 typedef struct ThreadData {
67  AVFrame *in, *out;
70 } ThreadData;
71 
72 /**
73  * Common struct for all algorithms contexts.
74  */
75 typedef struct ColorConstancyContext {
76  const AVClass *class;
77 
78  int difford;
79  int minknorm; /**< @minknorm = 0 : getMax instead */
80  double sigma;
81 
83  int planeheight[4];
84  int planewidth[4];
85 
87  double *gauss[MAX_DIFF_ORD+1];
88 
89  double white[NUM_PLANES];
91 
92 #define OFFSET(x) offsetof(ColorConstancyContext, x)
93 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
94 
95 #define GINDX(s, i) ( (i) - ((s) >> 2) )
96 
97 /**
98  * Sets gauss filters used for calculating gauss derivatives. Filter size
99  * depends on sigma which is a user option hence we calculate these
100  * filters each time. Also each higher order depends on lower ones. Sigma
101  * can be zero only at difford = 0, then we only convert data to double
102  * instead.
103  *
104  * @param ctx the filter context.
105  *
106  * @return 0 in case of success, a negative value corresponding to an
107  * AVERROR code in case of failure.
108  */
110 {
111  ColorConstancyContext *s = ctx->priv;
112  int filtersize = s->filtersize;
113  int difford = s->difford;
114  double sigma = s->sigma;
115  double sum1, sum2;
116  int i;
117 
118  for (i = 0; i <= difford; ++i) {
119  s->gauss[i] = av_calloc(filtersize, sizeof(*s->gauss[i]));
120  if (!s->gauss[i]) {
121  for (; i >= 0; --i) {
122  av_freep(&s->gauss[i]);
123  }
124  return AVERROR(ENOMEM);
125  }
126  }
127 
128  // Order 0
129  av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
130  sum1 = 0.0;
131  if (!sigma) {
132  s->gauss[0][0] = 1; // Copying data to double instead of convolution
133  } else {
134  for (i = 0; i < filtersize; ++i) {
135  s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
136  sum1 += s->gauss[0][i];
137  }
138  for (i = 0; i < filtersize; ++i) {
139  s->gauss[0][i] /= sum1;
140  }
141  }
142  // Order 1
143  if (difford > 0) {
144  av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
145  sum1 = 0.0;
146  for (i = 0; i < filtersize; ++i) {
147  s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
148  sum1 += s->gauss[1][i] * GINDX(filtersize, i);
149  }
150 
151  for (i = 0; i < filtersize; ++i) {
152  s->gauss[1][i] /= sum1;
153  }
154 
155  // Order 2
156  if (difford > 1) {
157  av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
158  sum1 = 0.0;
159  for (i = 0; i < filtersize; ++i) {
160  s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
161  * s->gauss[0][i];
162  sum1 += s->gauss[2][i];
163  }
164 
165  sum2 = 0.0;
166  for (i = 0; i < filtersize; ++i) {
167  s->gauss[2][i] -= sum1 / (filtersize);
168  sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
169  }
170  for (i = 0; i < filtersize ; ++i) {
171  s->gauss[2][i] /= sum2;
172  }
173  }
174  }
175  return 0;
176 }
177 
178 /**
179  * Frees up buffers used by grey edge for storing derivatives final
180  * and intermidiate results. Number of buffers and number of planes
181  * for last buffer are given so it can be safely called at allocation
182  * failure instances.
183  *
184  * @param td holds the buffers.
185  * @param nb_buff number of buffers to be freed.
186  * @param nb_planes number of planes for last buffer to be freed.
187  */
188 static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
189 {
190  int b, p;
191 
192  for (b = 0; b < nb_buff; ++b) {
193  for (p = 0; p < NUM_PLANES; ++p) {
194  av_freep(&td->data[b][p]);
195  }
196  }
197  // Final buffer may not be fully allocated at fail cases
198  for (p = 0; p < nb_planes; ++p) {
199  av_freep(&td->data[b][p]);
200  }
201 }
202 
203 /**
204  * Allocates buffers used by grey edge for storing derivatives final
205  * and intermidiate results.
206  *
207  * @param ctx the filter context.
208  * @param td holds the buffers.
209  *
210  * @return 0 in case of success, a negative value corresponding to an
211  * AVERROR code in case of failure.
212  */
214 {
215  ColorConstancyContext *s = ctx->priv;
216  int nb_buff = s->difford + 1;
217  int b, p;
218 
219  av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
220  for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
221  for (p = 0; p < NUM_PLANES; ++p) {
222  td->data[b][p] = av_calloc(s->planeheight[p] * s->planewidth[p],
223  sizeof(*td->data[b][p]));
224  if (!td->data[b][p]) {
226  return AVERROR(ENOMEM);
227  }
228  }
229  }
230  return 0;
231 }
232 
233 #define CLAMP(x, mx) av_clip((x), 0, (mx-1))
234 #define INDX2D(r, c, w) ( (r) * (w) + (c) )
235 #define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
236 
237 /**
238  * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
239  * either horizontally or vertically according to meta data given in thread data.
240  * When convoluting horizontally source is always the in frame withing thread data
241  * while when convoluting vertically source is a buffer.
242  *
243  * @param ctx the filter context.
244  * @param arg data to be passed between threads.
245  * @param jobnr current job nubmer.
246  * @param nb_jobs total number of jobs.
247  *
248  * @return 0.
249  */
250 static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
251 {
252  ColorConstancyContext *s = ctx->priv;
253  ThreadData *td = arg;
254  AVFrame *in = td->in;
255  const int ord = td->meta_data[INDEX_ORD];
256  const int dir = td->meta_data[INDEX_DIR];
257  const int src_index = td->meta_data[INDEX_SRC];
258  const int dst_index = td->meta_data[INDEX_DST];
259  const int filtersize = s->filtersize;
260  const double *gauss = s->gauss[ord];
261  int plane;
262 
263  for (plane = 0; plane < NUM_PLANES; ++plane) {
264  const int height = s->planeheight[plane];
265  const int width = s->planewidth[plane];
266  const int in_linesize = in->linesize[plane];
267  double *dst = td->data[dst_index][plane];
268  int slice_start, slice_end;
269  int r, c, g;
270 
271  if (dir == DIR_X) {
272  /** Applying gauss horizontally along each row */
273  const uint8_t *src = in->data[plane];
274  slice_start = (height * jobnr ) / nb_jobs;
275  slice_end = (height * (jobnr + 1)) / nb_jobs;
276 
277  for (r = slice_start; r < slice_end; ++r) {
278  for (c = 0; c < width; ++c) {
279  dst[INDX2D(r, c, width)] = 0;
280  for (g = 0; g < filtersize; ++g) {
281  dst[INDX2D(r, c, width)] += GAUSS(src, r, c + GINDX(filtersize, g),
282  in_linesize, height, width, gauss[g]);
283  }
284  }
285  }
286  } else {
287  /** Applying gauss vertically along each column */
288  const double *src = td->data[src_index][plane];
289  slice_start = (width * jobnr ) / nb_jobs;
290  slice_end = (width * (jobnr + 1)) / nb_jobs;
291 
292  for (c = slice_start; c < slice_end; ++c) {
293  for (r = 0; r < height; ++r) {
294  dst[INDX2D(r, c, width)] = 0;
295  for (g = 0; g < filtersize; ++g) {
296  dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
297  width, height, width, gauss[g]);
298  }
299  }
300  }
301  }
302 
303  }
304  return 0;
305 }
306 
307 /**
308  * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
309  * 1 or 2.
310  *
311  * @param ctx the filter context.
312  * @param arg data to be passed between threads.
313  * @param jobnr current job nubmer.
314  * @param nb_jobs total number of jobs.
315  *
316  * @return 0.
317  */
318 static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
319 {
320  ColorConstancyContext *s = ctx->priv;
321  ThreadData *td = arg;
322  const int difford = s->difford;
323  int plane;
324 
325  for (plane = 0; plane < NUM_PLANES; ++plane) {
326  const int height = s->planeheight[plane];
327  const int width = s->planewidth[plane];
328  const int64_t numpixels = width * (int64_t)height;
329  const int slice_start = (numpixels * jobnr ) / nb_jobs;
330  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
331  const double *dx = td->data[INDEX_DX][plane];
332  const double *dy = td->data[INDEX_DY][plane];
333  double *norm = td->data[INDEX_NORM][plane];
334  int i;
335 
336  if (difford == 1) {
337  for (i = slice_start; i < slice_end; ++i) {
338  norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
339  }
340  } else {
341  const double *dxy = td->data[INDEX_DXY][plane];
342  for (i = slice_start; i < slice_end; ++i) {
343  norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
344  }
345  }
346  }
347 
348  return 0;
349 }
350 
351 /**
352  * Utility function for setting up differentiation data/metadata.
353  *
354  * @param ctx the filter context.
355  * @param td to be used for passing data between threads.
356  * @param ord ord of differentiation.
357  * @param dir direction of differentiation.
358  * @param src index of source used for differentiation.
359  * @param dst index destination used for saving differentiation result.
360  * @param dim maximum dimension in current direction.
361  * @param nb_threads number of threads to use.
362  */
363 static void av_always_inline
365  int src, int dst, int dim, int nb_threads) {
366  td->meta_data[INDEX_ORD] = ord;
367  td->meta_data[INDEX_DIR] = dir;
368  td->meta_data[INDEX_SRC] = src;
369  td->meta_data[INDEX_DST] = dst;
371  NULL, FFMIN(dim, nb_threads));
372 }
373 
374 /**
375  * Main control function for calculating gaussian derivatives.
376  *
377  * @param ctx the filter context.
378  * @param td holds the buffers used for storing results.
379  *
380  * @return 0 in case of success, a negative value corresponding to an
381  * AVERROR code in case of failure.
382  */
384 {
385  ColorConstancyContext *s = ctx->priv;
386  int nb_threads = s->nb_threads;
387  int height = s->planeheight[1];
388  int width = s->planewidth[1];
389 
390  switch(s->difford) {
391  case 0:
392  if (!s->sigma) { // Only copy once
393  get_deriv(ctx, td, 0, DIR_X, 0 , INDEX_NORM, height, nb_threads);
394  } else {
395  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
396  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
397  // save to INDEX_NORM because this will not be normalied and
398  // end gry edge filter expects result to be found in INDEX_NORM
399  }
400  return 0;
401 
402  case 1:
403  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
404  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
405 
406  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
407  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
408  return 0;
409 
410  case 2:
411  get_deriv(ctx, td, 2, DIR_X, 0, INDEX_TEMP, height, nb_threads);
412  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
413 
414  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
415  get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
416 
417  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
418  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY, width , nb_threads);
419  return 0;
420 
421  default:
422  av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
423  return AVERROR(EINVAL);
424  }
425 
426 }
427 
428 /**
429  * Slice function for grey edge algorithm that does partial summing/maximizing
430  * of gaussian derivatives.
431  *
432  * @param ctx the filter context.
433  * @param arg data to be passed between threads.
434  * @param jobnr current job nubmer.
435  * @param nb_jobs total number of jobs.
436  *
437  * @return 0.
438  */
439 static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
440 {
441  ColorConstancyContext *s = ctx->priv;
442  ThreadData *td = arg;
443  AVFrame *in = td->in;
444  int minknorm = s->minknorm;
445  const uint8_t thresh = 255;
446  int plane;
447 
448  for (plane = 0; plane < NUM_PLANES; ++plane) {
449  const int height = s->planeheight[plane];
450  const int width = s->planewidth[plane];
451  const int in_linesize = in->linesize[plane];
452  const int slice_start = (height * jobnr) / nb_jobs;
453  const int slice_end = (height * (jobnr+1)) / nb_jobs;
454  const uint8_t *img_data = in->data[plane];
455  const double *src = td->data[INDEX_NORM][plane];
456  double *dst = td->data[INDEX_DST][plane];
457  int r, c;
458 
459  dst[jobnr] = 0;
460  if (!minknorm) {
461  for (r = slice_start; r < slice_end; ++r) {
462  for (c = 0; c < width; ++c) {
463  dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
464  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
465  }
466  }
467  } else {
468  for (r = slice_start; r < slice_end; ++r) {
469  for (c = 0; c < width; ++c) {
470  dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
471  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
472  }
473  }
474  }
475  }
476  return 0;
477 }
478 
479 /**
480  * Main control function for grey edge algorithm.
481  *
482  * @param ctx the filter context.
483  * @param in frame to perfrom grey edge on.
484  *
485  * @return 0 in case of success, a negative value corresponding to an
486  * AVERROR code in case of failure.
487  */
489 {
490  ColorConstancyContext *s = ctx->priv;
491  ThreadData td;
492  int minknorm = s->minknorm;
493  int difford = s->difford;
494  double *white = s->white;
495  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
496  int plane, job, ret;
497 
498  td.in = in;
500  if (ret) {
501  return ret;
502  }
503  get_derivative(ctx, &td);
504  if (difford > 0) {
506  }
507 
509  if (!minknorm) {
510  for (plane = 0; plane < NUM_PLANES; ++plane) {
511  white[plane] = 0; // All values are absolute
512  for (job = 0; job < nb_jobs; ++job) {
513  white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
514  }
515  }
516  } else {
517  for (plane = 0; plane < NUM_PLANES; ++plane) {
518  white[plane] = 0;
519  for (job = 0; job < nb_jobs; ++job) {
520  white[plane] += td.data[INDEX_DST][plane][job];
521  }
522  white[plane] = pow(white[plane], 1./minknorm);
523  }
524  }
525 
526  cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
527  return 0;
528 }
529 
530 /**
531  * Normalizes estimated illumination since only illumination vector
532  * direction is required for color constancy.
533  *
534  * @param light the estimated illumination to be normalized in place
535  */
536 static void normalize_light(double *light)
537 {
538  double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
539  int plane;
540 
541  // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
542 
543  if (!abs_val) {
544  for (plane = 0; plane < NUM_PLANES; ++plane) {
545  light[plane] = 1.0;
546  }
547  } else {
548  for (plane = 0; plane < NUM_PLANES; ++plane) {
549  light[plane] = (light[plane] / abs_val);
550  if (!light[plane]) { // to avoid division by zero when correcting
551  light[plane] = 1.0;
552  }
553  }
554  }
555 }
556 
557 /**
558  * Redirects to corresponding algorithm estimation function and performs normalization
559  * after estimation.
560  *
561  * @param ctx the filter context.
562  * @param in frame to perfrom estimation on.
563  *
564  * @return 0 in case of success, a negative value corresponding to an
565  * AVERROR code in case of failure.
566  */
568 {
569  ColorConstancyContext *s = ctx->priv;
570  int ret;
571 
572  ret = filter_grey_edge(ctx, in);
573 
574  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
575  s->white[0], s->white[1], s->white[2]);
576  normalize_light(s->white);
577  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
578  s->white[0], s->white[1], s->white[2]);
579 
580  return ret;
581 }
582 
583 /**
584  * Performs simple correction via diagonal transformation model.
585  *
586  * @param ctx the filter context.
587  * @param arg data to be passed between threads.
588  * @param jobnr current job nubmer.
589  * @param nb_jobs total number of jobs.
590  *
591  * @return 0.
592  */
593 static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
594 {
595  ColorConstancyContext *s = ctx->priv;
596  ThreadData *td = arg;
597  AVFrame *in = td->in;
598  AVFrame *out = td->out;
599  int plane;
600 
601  for (plane = 0; plane < NUM_PLANES; ++plane) {
602  const int height = s->planeheight[plane];
603  const int width = s->planewidth[plane];
604  const int64_t numpixels = width * (int64_t)height;
605  const int slice_start = (numpixels * jobnr) / nb_jobs;
606  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
607  const uint8_t *src = in->data[plane];
608  uint8_t *dst = out->data[plane];
609  double temp;
610  unsigned i;
611 
612  for (i = slice_start; i < slice_end; ++i) {
613  temp = src[i] / (s->white[plane] * SQRT3);
614  dst[i] = av_clip_uint8((int)(temp + 0.5));
615  }
616  }
617  return 0;
618 }
619 
620 /**
621  * Main control function for correcting scene illumination based on
622  * estimated illumination.
623  *
624  * @param ctx the filter context.
625  * @param in holds frame to correct
626  * @param out holds corrected frame
627  */
629 {
630  ColorConstancyContext *s = ctx->priv;
631  ThreadData td;
632  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
633 
634  td.in = in;
635  td.out = out;
637 }
638 
640 {
641  AVFilterContext *ctx = inlink->dst;
642  ColorConstancyContext *s = ctx->priv;
644  const double break_off_sigma = 3.0;
645  double sigma = s->sigma;
646  int ret;
647 
648  if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
649  av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
650  return AVERROR(EINVAL);
651  }
652 
653  s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
654  if (ret=set_gauss(ctx)) {
655  return ret;
656  }
657 
658  s->nb_threads = ff_filter_get_nb_threads(ctx);
659  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
660  s->planewidth[0] = s->planewidth[3] = inlink->w;
661  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
662  s->planeheight[0] = s->planeheight[3] = inlink->h;
663 
664  return 0;
665 }
666 
668 {
669  AVFilterContext *ctx = inlink->dst;
670  AVFilterLink *outlink = ctx->outputs[0];
671  AVFrame *out;
672  int ret;
673  int direct = 0;
674 
676  if (ret) {
677  av_frame_free(&in);
678  return ret;
679  }
680 
681  if (av_frame_is_writable(in)) {
682  direct = 1;
683  out = in;
684  } else {
685  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
686  if (!out) {
687  av_frame_free(&in);
688  return AVERROR(ENOMEM);
689  }
691  }
693 
694  if (!direct)
695  av_frame_free(&in);
696 
697  return ff_filter_frame(outlink, out);
698 }
699 
701 {
702  ColorConstancyContext *s = ctx->priv;
703  int difford = s->difford;
704  int i;
705 
706  for (i = 0; i <= difford; ++i) {
707  av_freep(&s->gauss[i]);
708  }
709 }
710 
712  {
713  .name = "default",
714  .type = AVMEDIA_TYPE_VIDEO,
715  .config_props = config_props,
716  .filter_frame = filter_frame,
717  },
718 };
719 
721  {
722  .name = "default",
723  .type = AVMEDIA_TYPE_VIDEO,
724  },
725 };
726 
727 #if CONFIG_GREYEDGE_FILTER
728 
729 static const AVOption greyedge_options[] = {
730  { "difford", "set differentiation order", OFFSET(difford), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS },
731  { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 0, 20, FLAGS },
732  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
733  { NULL }
734 };
735 
736 AVFILTER_DEFINE_CLASS(greyedge);
737 
738 const AVFilter ff_vf_greyedge = {
739  .name = GREY_EDGE,
740  .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
741  .priv_size = sizeof(ColorConstancyContext),
742  .priv_class = &greyedge_class,
743  .uninit = uninit,
746  // TODO: support more formats
747  // FIXME: error when saving to .jpg
750 };
751 
752 #endif /* CONFIG_GREY_EDGE_FILTER */
FLAGS
#define FLAGS
Definition: vf_colorconstancy.c:93
INDEX_ORD
#define INDEX_ORD
Definition: vf_colorconstancy.c:58
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
ColorConstancyContext
Common struct for all algorithms contexts.
Definition: vf_colorconstancy.c:75
ColorConstancyContext::difford
int difford
Definition: vf_colorconstancy.c:78
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:61
td
#define td
Definition: regdef.h:70
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
filter_slice_grey_edge
static int filter_slice_grey_edge(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice function for grey edge algorithm that does partial summing/maximizing of gaussian derivatives.
Definition: vf_colorconstancy.c:439
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_colorconstancy.c:700
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
get_derivative
static int get_derivative(AVFilterContext *ctx, ThreadData *td)
Main control function for calculating gaussian derivatives.
Definition: vf_colorconstancy.c:383
GAUSS
#define GAUSS(s, sr, sc, sls, sh, sw, g)
Definition: vf_colorconstancy.c:235
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
SQRT3
#define SQRT3
Definition: vf_colorconstancy.c:44
OFFSET
#define OFFSET(x)
Definition: vf_colorconstancy.c:92
ColorConstancyContext::white
double white[NUM_PLANES]
Definition: vf_colorconstancy.c:89
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
slice_get_derivative
static int slice_get_derivative(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice calculation of gaussian derivatives.
Definition: vf_colorconstancy.c:250
ColorConstancyContext::planewidth
int planewidth[4]
Definition: vf_colorconstancy.c:84
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:473
video.h
normalize_light
static void normalize_light(double *light)
Normalizes estimated illumination since only illumination vector direction is required for color cons...
Definition: vf_colorconstancy.c:536
chromatic_adaptation
static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
Main control function for correcting scene illumination based on estimated illumination.
Definition: vf_colorconstancy.c:628
MAX_DATA
#define MAX_DATA
Definition: vf_colorconstancy.c:49
GREY_EDGE
#define GREY_EDGE
Definition: vf_colorconstancy.c:42
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
ColorConstancyContext::nb_threads
int nb_threads
Definition: vf_colorconstancy.c:82
filter_grey_edge
static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
Main control function for grey edge algorithm.
Definition: vf_colorconstancy.c:488
formats.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorconstancy.c:667
INDEX_NORM
#define INDEX_NORM
Definition: vf_colorconstancy.c:55
get_deriv
static void av_always_inline get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir, int src, int dst, int dim, int nb_threads)
Utility function for setting up differentiation data/metadata.
Definition: vf_colorconstancy.c:364
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
ff_vf_greyedge
const AVFilter ff_vf_greyedge
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
INDEX_SRC
#define INDEX_SRC
Definition: vf_colorconstancy.c:56
DIR_X
#define DIR_X
Definition: vf_colorconstancy.c:60
width
#define width
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_colorconstancy.c:639
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
g
const char * g
Definition: vf_curves.c:117
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2042
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ColorConstancyContext::minknorm
int minknorm
@minknorm = 0 : getMax instead
Definition: vf_colorconstancy.c:79
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
ColorConstancyContext::filtersize
int filtersize
Definition: vf_colorconstancy.c:86
GINDX
#define GINDX(s, i)
Definition: vf_colorconstancy.c:95
src
#define src
Definition: vp8dsp.c:255
ColorConstancyContext::planeheight
int planeheight[4]
Definition: vf_colorconstancy.c:83
setup_derivative_buffers
static int setup_derivative_buffers(AVFilterContext *ctx, ThreadData *td)
Allocates buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:213
exp
int8_t exp
Definition: eval.c:72
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
INDX2D
#define INDX2D(r, c, w)
Definition: vf_colorconstancy.c:234
cleanup_derivative_buffers
static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
Frees up buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:188
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
height
#define height
INDEX_TEMP
#define INDEX_TEMP
Definition: vf_colorconstancy.c:51
set_gauss
static int set_gauss(AVFilterContext *ctx)
Sets gauss filters used for calculating gauss derivatives.
Definition: vf_colorconstancy.c:109
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:146
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:326
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:181
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
illumination_estimation
static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
Redirects to corresponding algorithm estimation function and performs normalization after estimation.
Definition: vf_colorconstancy.c:567
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ThreadData::data
double * data[MAX_DATA][NUM_PLANES]
Definition: vf_colorconstancy.c:69
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
diagonal_transformation
static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Performs simple correction via diagonal transformation model.
Definition: vf_colorconstancy.c:593
INDEX_DX
#define INDEX_DX
Definition: vf_colorconstancy.c:52
MAX_DIFF_ORD
#define MAX_DIFF_ORD
Definition: vf_colorconstancy.c:47
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
NUM_PLANES
#define NUM_PLANES
Definition: vf_colorconstancy.c:46
AVFilter
Filter definition.
Definition: avfilter.h:165
dim
int dim
Definition: vorbis_enc_data.h:425
ret
ret
Definition: filter_design.txt:187
colorconstancy_outputs
static const AVFilterPad colorconstancy_outputs[]
Definition: vf_colorconstancy.c:720
DIR_Y
#define DIR_Y
Definition: vf_colorconstancy.c:61
INDEX_DY
#define INDEX_DY
Definition: vf_colorconstancy.c:53
ThreadData::meta_data
int meta_data[MAX_META_DATA]
Definition: vf_colorconstancy.c:68
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
slice_normalize
static int slice_normalize(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice Frobius normalization of gaussian derivatives.
Definition: vf_colorconstancy.c:318
temp
else temp
Definition: vf_mcdeint.c:248
MAX_META_DATA
#define MAX_META_DATA
Definition: vf_colorconstancy.c:48
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
colorconstancy_inputs
static const AVFilterPad colorconstancy_inputs[]
Definition: vf_colorconstancy.c:711
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
ColorConstancyContext::gauss
double * gauss[MAX_DIFF_ORD+1]
Definition: vf_colorconstancy.c:87
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
INDEX_DIR
#define INDEX_DIR
Definition: vf_colorconstancy.c:59
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
INDEX_DXY
#define INDEX_DXY
Definition: vf_colorconstancy.c:54
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
ColorConstancyContext::sigma
double sigma
Definition: vf_colorconstancy.c:80
INDEX_DST
#define INDEX_DST
Definition: vf_colorconstancy.c:57