FFmpeg
vf_colorconstancy.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Mina Sami
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Color Constancy filter
24  *
25  * @see http://colorconstancy.com/
26  *
27  * @cite
28  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29  */
30 
31 #include "config_components.h"
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 
37 #include "avfilter.h"
38 #include "formats.h"
39 #include "internal.h"
40 #include "video.h"
41 
42 #include <math.h>
43 
44 #define GREY_EDGE "greyedge"
45 
46 #define SQRT3 1.73205080757
47 
48 #define NUM_PLANES 3
49 #define MAX_DIFF_ORD 2
50 #define MAX_META_DATA 4
51 #define MAX_DATA 4
52 
53 #define INDEX_TEMP 0
54 #define INDEX_DX 1
55 #define INDEX_DY 2
56 #define INDEX_DXY 3
57 #define INDEX_NORM INDEX_DX
58 #define INDEX_SRC 0
59 #define INDEX_DST 1
60 #define INDEX_ORD 2
61 #define INDEX_DIR 3
62 #define DIR_X 0
63 #define DIR_Y 1
64 
65 /**
66  * Used for passing data between threads.
67  */
68 typedef struct ThreadData {
69  AVFrame *in, *out;
72 } ThreadData;
73 
74 /**
75  * Common struct for all algorithms contexts.
76  */
77 typedef struct ColorConstancyContext {
78  const AVClass *class;
79 
80  int difford;
81  int minknorm; /**< @minknorm = 0 : getMax instead */
82  double sigma;
83 
85  int planeheight[4];
86  int planewidth[4];
87 
89  double *gauss[MAX_DIFF_ORD+1];
90 
91  double white[NUM_PLANES];
93 
94 #define OFFSET(x) offsetof(ColorConstancyContext, x)
95 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
96 
97 #define GINDX(s, i) ( (i) - ((s) >> 2) )
98 
99 /**
100  * Sets gauss filters used for calculating gauss derivatives. Filter size
101  * depends on sigma which is a user option hence we calculate these
102  * filters each time. Also each higher order depends on lower ones. Sigma
103  * can be zero only at difford = 0, then we only convert data to double
104  * instead.
105  *
106  * @param ctx the filter context.
107  *
108  * @return 0 in case of success, a negative value corresponding to an
109  * AVERROR code in case of failure.
110  */
112 {
113  ColorConstancyContext *s = ctx->priv;
114  int filtersize = s->filtersize;
115  int difford = s->difford;
116  double sigma = s->sigma;
117  double sum1, sum2;
118  int i;
119 
120  for (i = 0; i <= difford; ++i) {
121  s->gauss[i] = av_calloc(filtersize, sizeof(*s->gauss[i]));
122  if (!s->gauss[i]) {
123  for (; i >= 0; --i) {
124  av_freep(&s->gauss[i]);
125  }
126  return AVERROR(ENOMEM);
127  }
128  }
129 
130  // Order 0
131  av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
132  sum1 = 0.0;
133  if (!sigma) {
134  s->gauss[0][0] = 1; // Copying data to double instead of convolution
135  } else {
136  for (i = 0; i < filtersize; ++i) {
137  s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
138  sum1 += s->gauss[0][i];
139  }
140  for (i = 0; i < filtersize; ++i) {
141  s->gauss[0][i] /= sum1;
142  }
143  }
144  // Order 1
145  if (difford > 0) {
146  av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
147  sum1 = 0.0;
148  for (i = 0; i < filtersize; ++i) {
149  s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
150  sum1 += s->gauss[1][i] * GINDX(filtersize, i);
151  }
152 
153  for (i = 0; i < filtersize; ++i) {
154  s->gauss[1][i] /= sum1;
155  }
156 
157  // Order 2
158  if (difford > 1) {
159  av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
160  sum1 = 0.0;
161  for (i = 0; i < filtersize; ++i) {
162  s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
163  * s->gauss[0][i];
164  sum1 += s->gauss[2][i];
165  }
166 
167  sum2 = 0.0;
168  for (i = 0; i < filtersize; ++i) {
169  s->gauss[2][i] -= sum1 / (filtersize);
170  sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
171  }
172  for (i = 0; i < filtersize ; ++i) {
173  s->gauss[2][i] /= sum2;
174  }
175  }
176  }
177  return 0;
178 }
179 
180 /**
181  * Frees up buffers used by grey edge for storing derivatives final
182  * and intermidiate results. Number of buffers and number of planes
183  * for last buffer are given so it can be safely called at allocation
184  * failure instances.
185  *
186  * @param td holds the buffers.
187  * @param nb_buff number of buffers to be freed.
188  * @param nb_planes number of planes for last buffer to be freed.
189  */
190 static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
191 {
192  int b, p;
193 
194  for (b = 0; b < nb_buff; ++b) {
195  for (p = 0; p < NUM_PLANES; ++p) {
196  av_freep(&td->data[b][p]);
197  }
198  }
199  // Final buffer may not be fully allocated at fail cases
200  for (p = 0; p < nb_planes; ++p) {
201  av_freep(&td->data[b][p]);
202  }
203 }
204 
205 /**
206  * Allocates buffers used by grey edge for storing derivatives final
207  * and intermidiate results.
208  *
209  * @param ctx the filter context.
210  * @param td holds the buffers.
211  *
212  * @return 0 in case of success, a negative value corresponding to an
213  * AVERROR code in case of failure.
214  */
216 {
217  ColorConstancyContext *s = ctx->priv;
218  int nb_buff = s->difford + 1;
219  int b, p;
220 
221  av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
222  for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
223  for (p = 0; p < NUM_PLANES; ++p) {
224  td->data[b][p] = av_calloc(s->planeheight[p] * s->planewidth[p],
225  sizeof(*td->data[b][p]));
226  if (!td->data[b][p]) {
228  return AVERROR(ENOMEM);
229  }
230  }
231  }
232  return 0;
233 }
234 
235 #define CLAMP(x, mx) av_clip((x), 0, (mx-1))
236 #define INDX2D(r, c, w) ( (r) * (w) + (c) )
237 #define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
238 
239 /**
240  * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
241  * either horizontally or vertically according to meta data given in thread data.
242  * When convoluting horizontally source is always the in frame withing thread data
243  * while when convoluting vertically source is a buffer.
244  *
245  * @param ctx the filter context.
246  * @param arg data to be passed between threads.
247  * @param jobnr current job nubmer.
248  * @param nb_jobs total number of jobs.
249  *
250  * @return 0.
251  */
252 static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
253 {
254  ColorConstancyContext *s = ctx->priv;
255  ThreadData *td = arg;
256  AVFrame *in = td->in;
257  const int ord = td->meta_data[INDEX_ORD];
258  const int dir = td->meta_data[INDEX_DIR];
259  const int src_index = td->meta_data[INDEX_SRC];
260  const int dst_index = td->meta_data[INDEX_DST];
261  const int filtersize = s->filtersize;
262  const double *gauss = s->gauss[ord];
263  int plane;
264 
265  for (plane = 0; plane < NUM_PLANES; ++plane) {
266  const int height = s->planeheight[plane];
267  const int width = s->planewidth[plane];
268  const int in_linesize = in->linesize[plane];
269  double *dst = td->data[dst_index][plane];
270  int slice_start, slice_end;
271  int r, c, g;
272 
273  if (dir == DIR_X) {
274  /** Applying gauss horizontally along each row */
275  const uint8_t *src = in->data[plane];
276  slice_start = (height * jobnr ) / nb_jobs;
277  slice_end = (height * (jobnr + 1)) / nb_jobs;
278 
279  for (r = slice_start; r < slice_end; ++r) {
280  for (c = 0; c < width; ++c) {
281  dst[INDX2D(r, c, width)] = 0;
282  for (g = 0; g < filtersize; ++g) {
283  dst[INDX2D(r, c, width)] += GAUSS(src, r, c + GINDX(filtersize, g),
284  in_linesize, height, width, gauss[g]);
285  }
286  }
287  }
288  } else {
289  /** Applying gauss vertically along each column */
290  const double *src = td->data[src_index][plane];
291  slice_start = (width * jobnr ) / nb_jobs;
292  slice_end = (width * (jobnr + 1)) / nb_jobs;
293 
294  for (c = slice_start; c < slice_end; ++c) {
295  for (r = 0; r < height; ++r) {
296  dst[INDX2D(r, c, width)] = 0;
297  for (g = 0; g < filtersize; ++g) {
298  dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
299  width, height, width, gauss[g]);
300  }
301  }
302  }
303  }
304 
305  }
306  return 0;
307 }
308 
309 /**
310  * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
311  * 1 or 2.
312  *
313  * @param ctx the filter context.
314  * @param arg data to be passed between threads.
315  * @param jobnr current job nubmer.
316  * @param nb_jobs total number of jobs.
317  *
318  * @return 0.
319  */
320 static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
321 {
322  ColorConstancyContext *s = ctx->priv;
323  ThreadData *td = arg;
324  const int difford = s->difford;
325  int plane;
326 
327  for (plane = 0; plane < NUM_PLANES; ++plane) {
328  const int height = s->planeheight[plane];
329  const int width = s->planewidth[plane];
330  const int64_t numpixels = width * (int64_t)height;
331  const int slice_start = (numpixels * jobnr ) / nb_jobs;
332  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
333  const double *dx = td->data[INDEX_DX][plane];
334  const double *dy = td->data[INDEX_DY][plane];
335  double *norm = td->data[INDEX_NORM][plane];
336  int i;
337 
338  if (difford == 1) {
339  for (i = slice_start; i < slice_end; ++i) {
340  norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
341  }
342  } else {
343  const double *dxy = td->data[INDEX_DXY][plane];
344  for (i = slice_start; i < slice_end; ++i) {
345  norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
346  }
347  }
348  }
349 
350  return 0;
351 }
352 
353 /**
354  * Utility function for setting up differentiation data/metadata.
355  *
356  * @param ctx the filter context.
357  * @param td to be used for passing data between threads.
358  * @param ord ord of differentiation.
359  * @param dir direction of differentiation.
360  * @param src index of source used for differentiation.
361  * @param dst index destination used for saving differentiation result.
362  * @param dim maximum dimension in current direction.
363  * @param nb_threads number of threads to use.
364  */
365 static void av_always_inline
367  int src, int dst, int dim, int nb_threads) {
368  td->meta_data[INDEX_ORD] = ord;
369  td->meta_data[INDEX_DIR] = dir;
370  td->meta_data[INDEX_SRC] = src;
371  td->meta_data[INDEX_DST] = dst;
373  NULL, FFMIN(dim, nb_threads));
374 }
375 
376 /**
377  * Main control function for calculating gaussian derivatives.
378  *
379  * @param ctx the filter context.
380  * @param td holds the buffers used for storing results.
381  *
382  * @return 0 in case of success, a negative value corresponding to an
383  * AVERROR code in case of failure.
384  */
386 {
387  ColorConstancyContext *s = ctx->priv;
388  int nb_threads = s->nb_threads;
389  int height = s->planeheight[1];
390  int width = s->planewidth[1];
391 
392  switch(s->difford) {
393  case 0:
394  if (!s->sigma) { // Only copy once
395  get_deriv(ctx, td, 0, DIR_X, 0 , INDEX_NORM, height, nb_threads);
396  } else {
397  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
398  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
399  // save to INDEX_NORM because this will not be normalied and
400  // end gry edge filter expects result to be found in INDEX_NORM
401  }
402  return 0;
403 
404  case 1:
405  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
406  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
407 
408  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
409  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
410  return 0;
411 
412  case 2:
413  get_deriv(ctx, td, 2, DIR_X, 0, INDEX_TEMP, height, nb_threads);
414  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
415 
416  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
417  get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
418 
419  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
420  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY, width , nb_threads);
421  return 0;
422 
423  default:
424  av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
425  return AVERROR(EINVAL);
426  }
427 
428 }
429 
430 /**
431  * Slice function for grey edge algorithm that does partial summing/maximizing
432  * of gaussian derivatives.
433  *
434  * @param ctx the filter context.
435  * @param arg data to be passed between threads.
436  * @param jobnr current job nubmer.
437  * @param nb_jobs total number of jobs.
438  *
439  * @return 0.
440  */
441 static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
442 {
443  ColorConstancyContext *s = ctx->priv;
444  ThreadData *td = arg;
445  AVFrame *in = td->in;
446  int minknorm = s->minknorm;
447  const uint8_t thresh = 255;
448  int plane;
449 
450  for (plane = 0; plane < NUM_PLANES; ++plane) {
451  const int height = s->planeheight[plane];
452  const int width = s->planewidth[plane];
453  const int in_linesize = in->linesize[plane];
454  const int slice_start = (height * jobnr) / nb_jobs;
455  const int slice_end = (height * (jobnr+1)) / nb_jobs;
456  const uint8_t *img_data = in->data[plane];
457  const double *src = td->data[INDEX_NORM][plane];
458  double *dst = td->data[INDEX_DST][plane];
459  int r, c;
460 
461  dst[jobnr] = 0;
462  if (!minknorm) {
463  for (r = slice_start; r < slice_end; ++r) {
464  for (c = 0; c < width; ++c) {
465  dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
466  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
467  }
468  }
469  } else {
470  for (r = slice_start; r < slice_end; ++r) {
471  for (c = 0; c < width; ++c) {
472  dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
473  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
474  }
475  }
476  }
477  }
478  return 0;
479 }
480 
481 /**
482  * Main control function for grey edge algorithm.
483  *
484  * @param ctx the filter context.
485  * @param in frame to perfrom grey edge on.
486  *
487  * @return 0 in case of success, a negative value corresponding to an
488  * AVERROR code in case of failure.
489  */
491 {
492  ColorConstancyContext *s = ctx->priv;
493  ThreadData td;
494  int minknorm = s->minknorm;
495  int difford = s->difford;
496  double *white = s->white;
497  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
498  int plane, job, ret;
499 
500  td.in = in;
502  if (ret) {
503  return ret;
504  }
505  get_derivative(ctx, &td);
506  if (difford > 0) {
508  }
509 
511  if (!minknorm) {
512  for (plane = 0; plane < NUM_PLANES; ++plane) {
513  white[plane] = 0; // All values are absolute
514  for (job = 0; job < nb_jobs; ++job) {
515  white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
516  }
517  }
518  } else {
519  for (plane = 0; plane < NUM_PLANES; ++plane) {
520  white[plane] = 0;
521  for (job = 0; job < nb_jobs; ++job) {
522  white[plane] += td.data[INDEX_DST][plane][job];
523  }
524  white[plane] = pow(white[plane], 1./minknorm);
525  }
526  }
527 
528  cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
529  return 0;
530 }
531 
532 /**
533  * Normalizes estimated illumination since only illumination vector
534  * direction is required for color constancy.
535  *
536  * @param light the estimated illumination to be normalized in place
537  */
538 static void normalize_light(double *light)
539 {
540  double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
541  int plane;
542 
543  // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
544 
545  if (!abs_val) {
546  for (plane = 0; plane < NUM_PLANES; ++plane) {
547  light[plane] = 1.0;
548  }
549  } else {
550  for (plane = 0; plane < NUM_PLANES; ++plane) {
551  light[plane] = (light[plane] / abs_val);
552  if (!light[plane]) { // to avoid division by zero when correcting
553  light[plane] = 1.0;
554  }
555  }
556  }
557 }
558 
559 /**
560  * Redirects to corresponding algorithm estimation function and performs normalization
561  * after estimation.
562  *
563  * @param ctx the filter context.
564  * @param in frame to perfrom estimation on.
565  *
566  * @return 0 in case of success, a negative value corresponding to an
567  * AVERROR code in case of failure.
568  */
570 {
571  ColorConstancyContext *s = ctx->priv;
572  int ret;
573 
574  ret = filter_grey_edge(ctx, in);
575 
576  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
577  s->white[0], s->white[1], s->white[2]);
578  normalize_light(s->white);
579  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
580  s->white[0], s->white[1], s->white[2]);
581 
582  return ret;
583 }
584 
585 /**
586  * Performs simple correction via diagonal transformation model.
587  *
588  * @param ctx the filter context.
589  * @param arg data to be passed between threads.
590  * @param jobnr current job nubmer.
591  * @param nb_jobs total number of jobs.
592  *
593  * @return 0.
594  */
595 static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
596 {
597  ColorConstancyContext *s = ctx->priv;
598  ThreadData *td = arg;
599  AVFrame *in = td->in;
600  AVFrame *out = td->out;
601  int plane;
602 
603  for (plane = 0; plane < NUM_PLANES; ++plane) {
604  const int height = s->planeheight[plane];
605  const int width = s->planewidth[plane];
606  const int64_t numpixels = width * (int64_t)height;
607  const int slice_start = (numpixels * jobnr) / nb_jobs;
608  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
609  const uint8_t *src = in->data[plane];
610  uint8_t *dst = out->data[plane];
611  double temp;
612  unsigned i;
613 
614  for (i = slice_start; i < slice_end; ++i) {
615  temp = src[i] / (s->white[plane] * SQRT3);
616  dst[i] = av_clip_uint8((int)(temp + 0.5));
617  }
618  }
619  return 0;
620 }
621 
622 /**
623  * Main control function for correcting scene illumination based on
624  * estimated illumination.
625  *
626  * @param ctx the filter context.
627  * @param in holds frame to correct
628  * @param out holds corrected frame
629  */
631 {
632  ColorConstancyContext *s = ctx->priv;
633  ThreadData td;
634  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
635 
636  td.in = in;
637  td.out = out;
639 }
640 
642 {
643  AVFilterContext *ctx = inlink->dst;
644  ColorConstancyContext *s = ctx->priv;
646  const double break_off_sigma = 3.0;
647  double sigma = s->sigma;
648  int ret;
649 
650  if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
651  av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
652  return AVERROR(EINVAL);
653  }
654 
655  s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
656  if (ret=set_gauss(ctx)) {
657  return ret;
658  }
659 
660  s->nb_threads = ff_filter_get_nb_threads(ctx);
661  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
662  s->planewidth[0] = s->planewidth[3] = inlink->w;
663  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
664  s->planeheight[0] = s->planeheight[3] = inlink->h;
665 
666  return 0;
667 }
668 
670 {
671  AVFilterContext *ctx = inlink->dst;
672  AVFilterLink *outlink = ctx->outputs[0];
673  AVFrame *out;
674  int ret;
675  int direct = 0;
676 
678  if (ret) {
679  av_frame_free(&in);
680  return ret;
681  }
682 
683  if (av_frame_is_writable(in)) {
684  direct = 1;
685  out = in;
686  } else {
687  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
688  if (!out) {
689  av_frame_free(&in);
690  return AVERROR(ENOMEM);
691  }
693  }
695 
696  if (!direct)
697  av_frame_free(&in);
698 
699  return ff_filter_frame(outlink, out);
700 }
701 
703 {
704  ColorConstancyContext *s = ctx->priv;
705  int difford = s->difford;
706  int i;
707 
708  for (i = 0; i <= difford; ++i) {
709  av_freep(&s->gauss[i]);
710  }
711 }
712 
714  {
715  .name = "default",
716  .type = AVMEDIA_TYPE_VIDEO,
717  .config_props = config_props,
718  .filter_frame = filter_frame,
719  },
720 };
721 
723  {
724  .name = "default",
725  .type = AVMEDIA_TYPE_VIDEO,
726  },
727 };
728 
729 #if CONFIG_GREYEDGE_FILTER
730 
731 static const AVOption greyedge_options[] = {
732  { "difford", "set differentiation order", OFFSET(difford), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS },
733  { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 0, 20, FLAGS },
734  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
735  { NULL }
736 };
737 
738 AVFILTER_DEFINE_CLASS(greyedge);
739 
740 const AVFilter ff_vf_greyedge = {
741  .name = GREY_EDGE,
742  .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
743  .priv_size = sizeof(ColorConstancyContext),
744  .priv_class = &greyedge_class,
745  .uninit = uninit,
748  // TODO: support more formats
749  // FIXME: error when saving to .jpg
752 };
753 
754 #endif /* CONFIG_GREY_EDGE_FILTER */
FLAGS
#define FLAGS
Definition: vf_colorconstancy.c:95
INDEX_ORD
#define INDEX_ORD
Definition: vf_colorconstancy.c:60
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:101
ColorConstancyContext
Common struct for all algorithms contexts.
Definition: vf_colorconstancy.c:77
ColorConstancyContext::difford
int difford
Definition: vf_colorconstancy.c:80
td
#define td
Definition: regdef.h:70
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
filter_slice_grey_edge
static int filter_slice_grey_edge(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice function for grey edge algorithm that does partial summing/maximizing of gaussian derivatives.
Definition: vf_colorconstancy.c:441
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_colorconstancy.c:702
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
get_derivative
static int get_derivative(AVFilterContext *ctx, ThreadData *td)
Main control function for calculating gaussian derivatives.
Definition: vf_colorconstancy.c:385
GAUSS
#define GAUSS(s, sr, sc, sls, sh, sw, g)
Definition: vf_colorconstancy.c:237
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
SQRT3
#define SQRT3
Definition: vf_colorconstancy.c:46
OFFSET
#define OFFSET(x)
Definition: vf_colorconstancy.c:94
ColorConstancyContext::white
double white[NUM_PLANES]
Definition: vf_colorconstancy.c:91
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
slice_get_derivative
static int slice_get_derivative(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice calculation of gaussian derivatives.
Definition: vf_colorconstancy.c:252
ColorConstancyContext::planewidth
int planewidth[4]
Definition: vf_colorconstancy.c:86
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:473
video.h
normalize_light
static void normalize_light(double *light)
Normalizes estimated illumination since only illumination vector direction is required for color cons...
Definition: vf_colorconstancy.c:538
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
chromatic_adaptation
static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
Main control function for correcting scene illumination based on estimated illumination.
Definition: vf_colorconstancy.c:630
MAX_DATA
#define MAX_DATA
Definition: vf_colorconstancy.c:51
GREY_EDGE
#define GREY_EDGE
Definition: vf_colorconstancy.c:44
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
ColorConstancyContext::nb_threads
int nb_threads
Definition: vf_colorconstancy.c:84
filter_grey_edge
static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
Main control function for grey edge algorithm.
Definition: vf_colorconstancy.c:490
formats.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorconstancy.c:669
INDEX_NORM
#define INDEX_NORM
Definition: vf_colorconstancy.c:57
get_deriv
static void av_always_inline get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir, int src, int dst, int dim, int nb_threads)
Utility function for setting up differentiation data/metadata.
Definition: vf_colorconstancy.c:366
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
ff_vf_greyedge
const AVFilter ff_vf_greyedge
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
INDEX_SRC
#define INDEX_SRC
Definition: vf_colorconstancy.c:58
DIR_X
#define DIR_X
Definition: vf_colorconstancy.c:62
width
#define width
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_colorconstancy.c:641
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
g
const char * g
Definition: vf_curves.c:127
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2006
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ColorConstancyContext::minknorm
int minknorm
@minknorm = 0 : getMax instead
Definition: vf_colorconstancy.c:81
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
ColorConstancyContext::filtersize
int filtersize
Definition: vf_colorconstancy.c:88
GINDX
#define GINDX(s, i)
Definition: vf_colorconstancy.c:97
ColorConstancyContext::planeheight
int planeheight[4]
Definition: vf_colorconstancy.c:85
setup_derivative_buffers
static int setup_derivative_buffers(AVFilterContext *ctx, ThreadData *td)
Allocates buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:215
exp
int8_t exp
Definition: eval.c:72
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
INDX2D
#define INDX2D(r, c, w)
Definition: vf_colorconstancy.c:236
cleanup_derivative_buffers
static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
Frees up buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:190
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:524
height
#define height
INDEX_TEMP
#define INDEX_TEMP
Definition: vf_colorconstancy.c:53
set_gauss
static int set_gauss(AVFilterContext *ctx)
Sets gauss filters used for calculating gauss derivatives.
Definition: vf_colorconstancy.c:111
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:142
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:329
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: internal.h:184
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
illumination_estimation
static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
Redirects to corresponding algorithm estimation function and performs normalization after estimation.
Definition: vf_colorconstancy.c:569
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:777
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ThreadData::data
double * data[MAX_DATA][NUM_PLANES]
Definition: vf_colorconstancy.c:71
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
diagonal_transformation
static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Performs simple correction via diagonal transformation model.
Definition: vf_colorconstancy.c:595
INDEX_DX
#define INDEX_DX
Definition: vf_colorconstancy.c:54
MAX_DIFF_ORD
#define MAX_DIFF_ORD
Definition: vf_colorconstancy.c:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
NUM_PLANES
#define NUM_PLANES
Definition: vf_colorconstancy.c:48
AVFilter
Filter definition.
Definition: avfilter.h:161
dim
int dim
Definition: vorbis_enc_data.h:425
ret
ret
Definition: filter_design.txt:187
colorconstancy_outputs
static const AVFilterPad colorconstancy_outputs[]
Definition: vf_colorconstancy.c:722
DIR_Y
#define DIR_Y
Definition: vf_colorconstancy.c:63
INDEX_DY
#define INDEX_DY
Definition: vf_colorconstancy.c:55
ThreadData::meta_data
int meta_data[MAX_META_DATA]
Definition: vf_colorconstancy.c:70
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
slice_normalize
static int slice_normalize(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice Frobius normalization of gaussian derivatives.
Definition: vf_colorconstancy.c:320
temp
else temp
Definition: vf_mcdeint.c:248
MAX_META_DATA
#define MAX_META_DATA
Definition: vf_colorconstancy.c:50
av_clip_uint8
#define av_clip_uint8
Definition: common.h:101
colorconstancy_inputs
static const AVFilterPad colorconstancy_inputs[]
Definition: vf_colorconstancy.c:713
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ColorConstancyContext::gauss
double * gauss[MAX_DIFF_ORD+1]
Definition: vf_colorconstancy.c:89
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
INDEX_DIR
#define INDEX_DIR
Definition: vf_colorconstancy.c:61
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
INDEX_DXY
#define INDEX_DXY
Definition: vf_colorconstancy.c:56
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:146
ColorConstancyContext::sigma
double sigma
Definition: vf_colorconstancy.c:82
INDEX_DST
#define INDEX_DST
Definition: vf_colorconstancy.c:59