FFmpeg
vf_colorconstancy.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Mina Sami
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Color Constancy filter
24  *
25  * @see http://colorconstancy.com/
26  *
27  * @cite
28  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
29  */
30 
31 #include "libavutil/imgutils.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 #include <math.h>
41 
42 #define GREY_EDGE "greyedge"
43 
44 #define SQRT3 1.73205080757
45 
46 #define NUM_PLANES 3
47 #define MAX_DIFF_ORD 2
48 #define MAX_META_DATA 4
49 #define MAX_DATA 4
50 
51 #define INDEX_TEMP 0
52 #define INDEX_DX 1
53 #define INDEX_DY 2
54 #define INDEX_DXY 3
55 #define INDEX_NORM INDEX_DX
56 #define INDEX_SRC 0
57 #define INDEX_DST 1
58 #define INDEX_ORD 2
59 #define INDEX_DIR 3
60 #define DIR_X 0
61 #define DIR_Y 1
62 
63 /**
64  * Used for passing data between threads.
65  */
66 typedef struct ThreadData {
67  AVFrame *in, *out;
70 } ThreadData;
71 
72 /**
73  * Common struct for all algorithms contexts.
74  */
75 typedef struct ColorConstancyContext {
76  const AVClass *class;
77 
78  int difford;
79  int minknorm; /**< @minknorm = 0 : getMax instead */
80  double sigma;
81 
83  int planeheight[4];
84  int planewidth[4];
85 
87  double *gauss[MAX_DIFF_ORD+1];
88 
89  double white[NUM_PLANES];
91 
92 #define OFFSET(x) offsetof(ColorConstancyContext, x)
93 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
94 
95 #define GINDX(s, i) ( (i) - ((s) >> 2) )
96 
97 /**
98  * Sets gauss filters used for calculating gauss derivatives. Filter size
99  * depends on sigma which is a user option hence we calculate these
100  * filters each time. Also each higher order depends on lower ones. Sigma
101  * can be zero only at difford = 0, then we only convert data to double
102  * instead.
103  *
104  * @param ctx the filter context.
105  *
106  * @return 0 in case of success, a negative value corresponding to an
107  * AVERROR code in case of failure.
108  */
110 {
111  ColorConstancyContext *s = ctx->priv;
112  int filtersize = s->filtersize;
113  int difford = s->difford;
114  double sigma = s->sigma;
115  double sum1, sum2;
116  int i;
117 
118  for (i = 0; i <= difford; ++i) {
119  s->gauss[i] = av_mallocz_array(filtersize, sizeof(*s->gauss[i]));
120  if (!s->gauss[i]) {
121  for (; i >= 0; --i) {
122  av_freep(&s->gauss[i]);
123  }
124  av_log(ctx, AV_LOG_ERROR, "Out of memory while allocating gauss buffers.\n");
125  return AVERROR(ENOMEM);
126  }
127  }
128 
129  // Order 0
130  av_log(ctx, AV_LOG_TRACE, "Setting 0-d gauss with filtersize = %d.\n", filtersize);
131  sum1 = 0.0;
132  if (!sigma) {
133  s->gauss[0][0] = 1; // Copying data to double instead of convolution
134  } else {
135  for (i = 0; i < filtersize; ++i) {
136  s->gauss[0][i] = exp(- pow(GINDX(filtersize, i), 2.) / (2 * sigma * sigma)) / ( sqrt(2 * M_PI) * sigma );
137  sum1 += s->gauss[0][i];
138  }
139  for (i = 0; i < filtersize; ++i) {
140  s->gauss[0][i] /= sum1;
141  }
142  }
143  // Order 1
144  if (difford > 0) {
145  av_log(ctx, AV_LOG_TRACE, "Setting 1-d gauss with filtersize = %d.\n", filtersize);
146  sum1 = 0.0;
147  for (i = 0; i < filtersize; ++i) {
148  s->gauss[1][i] = - (GINDX(filtersize, i) / pow(sigma, 2)) * s->gauss[0][i];
149  sum1 += s->gauss[1][i] * GINDX(filtersize, i);
150  }
151 
152  for (i = 0; i < filtersize; ++i) {
153  s->gauss[1][i] /= sum1;
154  }
155 
156  // Order 2
157  if (difford > 1) {
158  av_log(ctx, AV_LOG_TRACE, "Setting 2-d gauss with filtersize = %d.\n", filtersize);
159  sum1 = 0.0;
160  for (i = 0; i < filtersize; ++i) {
161  s->gauss[2][i] = ( pow(GINDX(filtersize, i), 2) / pow(sigma, 4) - 1/pow(sigma, 2) )
162  * s->gauss[0][i];
163  sum1 += s->gauss[2][i];
164  }
165 
166  sum2 = 0.0;
167  for (i = 0; i < filtersize; ++i) {
168  s->gauss[2][i] -= sum1 / (filtersize);
169  sum2 += (0.5 * GINDX(filtersize, i) * GINDX(filtersize, i) * s->gauss[2][i]);
170  }
171  for (i = 0; i < filtersize ; ++i) {
172  s->gauss[2][i] /= sum2;
173  }
174  }
175  }
176  return 0;
177 }
178 
179 /**
180  * Frees up buffers used by grey edge for storing derivatives final
181  * and intermidiate results. Number of buffers and number of planes
182  * for last buffer are given so it can be safely called at allocation
183  * failure instances.
184  *
185  * @param td holds the buffers.
186  * @param nb_buff number of buffers to be freed.
187  * @param nb_planes number of planes for last buffer to be freed.
188  */
189 static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
190 {
191  int b, p;
192 
193  for (b = 0; b < nb_buff; ++b) {
194  for (p = 0; p < NUM_PLANES; ++p) {
195  av_freep(&td->data[b][p]);
196  }
197  }
198  // Final buffer may not be fully allocated at fail cases
199  for (p = 0; p < nb_planes; ++p) {
200  av_freep(&td->data[b][p]);
201  }
202 }
203 
204 /**
205  * Allocates buffers used by grey edge for storing derivatives final
206  * and intermidiate results.
207  *
208  * @param ctx the filter context.
209  * @param td holds the buffers.
210  *
211  * @return 0 in case of success, a negative value corresponding to an
212  * AVERROR code in case of failure.
213  */
215 {
216  ColorConstancyContext *s = ctx->priv;
217  int nb_buff = s->difford + 1;
218  int b, p;
219 
220  av_log(ctx, AV_LOG_TRACE, "Allocating %d buffer(s) for grey edge.\n", nb_buff);
221  for (b = 0; b <= nb_buff; ++b) { // We need difford + 1 buffers
222  for (p = 0; p < NUM_PLANES; ++p) {
223  td->data[b][p] = av_mallocz_array(s->planeheight[p] * s->planewidth[p], sizeof(*td->data[b][p]));
224  if (!td->data[b][p]) {
226  av_log(ctx, AV_LOG_ERROR, "Out of memory while allocating derivatives buffers.\n");
227  return AVERROR(ENOMEM);
228  }
229  }
230  }
231  return 0;
232 }
233 
234 #define CLAMP(x, mx) av_clip((x), 0, (mx-1))
235 #define INDX2D(r, c, w) ( (r) * (w) + (c) )
236 #define GAUSS(s, sr, sc, sls, sh, sw, g) ( (s)[ INDX2D(CLAMP((sr), (sh)), CLAMP((sc), (sw)), (sls)) ] * (g) )
237 
238 /**
239  * Slice calculation of gaussian derivatives. Applies 1-D gaussian derivative filter
240  * either horizontally or vertically according to meta data given in thread data.
241  * When convoluting horizontally source is always the in frame withing thread data
242  * while when convoluting vertically source is a buffer.
243  *
244  * @param ctx the filter context.
245  * @param arg data to be passed between threads.
246  * @param jobnr current job nubmer.
247  * @param nb_jobs total number of jobs.
248  *
249  * @return 0.
250  */
251 static int slice_get_derivative(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
252 {
253  ColorConstancyContext *s = ctx->priv;
254  ThreadData *td = arg;
255  AVFrame *in = td->in;
256  const int ord = td->meta_data[INDEX_ORD];
257  const int dir = td->meta_data[INDEX_DIR];
258  const int src_index = td->meta_data[INDEX_SRC];
259  const int dst_index = td->meta_data[INDEX_DST];
260  const int filtersize = s->filtersize;
261  const double *gauss = s->gauss[ord];
262  int plane;
263 
264  for (plane = 0; plane < NUM_PLANES; ++plane) {
265  const int height = s->planeheight[plane];
266  const int width = s->planewidth[plane];
267  const int in_linesize = in->linesize[plane];
268  double *dst = td->data[dst_index][plane];
269  int slice_start, slice_end;
270  int r, c, g;
271 
272  if (dir == DIR_X) {
273  /** Applying gauss horizontally along each row */
274  const uint8_t *src = in->data[plane];
275  slice_start = (height * jobnr ) / nb_jobs;
276  slice_end = (height * (jobnr + 1)) / nb_jobs;
277 
278  for (r = slice_start; r < slice_end; ++r) {
279  for (c = 0; c < width; ++c) {
280  dst[INDX2D(r, c, width)] = 0;
281  for (g = 0; g < filtersize; ++g) {
282  dst[INDX2D(r, c, width)] += GAUSS(src, r, c + GINDX(filtersize, g),
283  in_linesize, height, width, gauss[GINDX(filtersize, g)]);
284  }
285  }
286  }
287  } else {
288  /** Applying gauss vertically along each column */
289  const double *src = td->data[src_index][plane];
290  slice_start = (width * jobnr ) / nb_jobs;
291  slice_end = (width * (jobnr + 1)) / nb_jobs;
292 
293  for (c = slice_start; c < slice_end; ++c) {
294  for (r = 0; r < height; ++r) {
295  dst[INDX2D(r, c, width)] = 0;
296  for (g = 0; g < filtersize; ++g) {
297  dst[INDX2D(r, c, width)] += GAUSS(src, r + GINDX(filtersize, g), c,
298  width, height, width, gauss[GINDX(filtersize, g)]);
299  }
300  }
301  }
302  }
303 
304  }
305  return 0;
306 }
307 
308 /**
309  * Slice Frobius normalization of gaussian derivatives. Only called for difford values of
310  * 1 or 2.
311  *
312  * @param ctx the filter context.
313  * @param arg data to be passed between threads.
314  * @param jobnr current job nubmer.
315  * @param nb_jobs total number of jobs.
316  *
317  * @return 0.
318  */
319 static int slice_normalize(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
320 {
321  ColorConstancyContext *s = ctx->priv;
322  ThreadData *td = arg;
323  const int difford = s->difford;
324  int plane;
325 
326  for (plane = 0; plane < NUM_PLANES; ++plane) {
327  const int height = s->planeheight[plane];
328  const int width = s->planewidth[plane];
329  const int64_t numpixels = width * (int64_t)height;
330  const int slice_start = (numpixels * jobnr ) / nb_jobs;
331  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
332  const double *dx = td->data[INDEX_DX][plane];
333  const double *dy = td->data[INDEX_DY][plane];
334  double *norm = td->data[INDEX_NORM][plane];
335  int i;
336 
337  if (difford == 1) {
338  for (i = slice_start; i < slice_end; ++i) {
339  norm[i] = sqrt( pow(dx[i], 2) + pow(dy[i], 2));
340  }
341  } else {
342  const double *dxy = td->data[INDEX_DXY][plane];
343  for (i = slice_start; i < slice_end; ++i) {
344  norm[i] = sqrt( pow(dx[i], 2) + 4 * pow(dxy[i], 2) + pow(dy[i], 2) );
345  }
346  }
347  }
348 
349  return 0;
350 }
351 
352 /**
353  * Utility function for setting up differentiation data/metadata.
354  *
355  * @param ctx the filter context.
356  * @param td to be used for passing data between threads.
357  * @param ord ord of differentiation.
358  * @param dir direction of differentiation.
359  * @param src index of source used for differentiation.
360  * @param dst index destination used for saving differentiation result.
361  * @param dim maximum dimension in current direction.
362  * @param nb_threads number of threads to use.
363  */
364 static void av_always_inline
366  int src, int dst, int dim, int nb_threads) {
367  td->meta_data[INDEX_ORD] = ord;
368  td->meta_data[INDEX_DIR] = dir;
369  td->meta_data[INDEX_SRC] = src;
370  td->meta_data[INDEX_DST] = dst;
371  ctx->internal->execute(ctx, slice_get_derivative, td, NULL, FFMIN(dim, nb_threads));
372 }
373 
374 /**
375  * Main control function for calculating gaussian derivatives.
376  *
377  * @param ctx the filter context.
378  * @param td holds the buffers used for storing results.
379  *
380  * @return 0 in case of success, a negative value corresponding to an
381  * AVERROR code in case of failure.
382  */
384 {
385  ColorConstancyContext *s = ctx->priv;
386  int nb_threads = s->nb_threads;
387  int height = s->planeheight[1];
388  int width = s->planewidth[1];
389 
390  switch(s->difford) {
391  case 0:
392  if (!s->sigma) { // Only copy once
393  get_deriv(ctx, td, 0, DIR_X, 0 , INDEX_NORM, height, nb_threads);
394  } else {
395  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
396  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_NORM, width , nb_threads);
397  // save to INDEX_NORM because this will not be normalied and
398  // end gry edge filter expects result to be found in INDEX_NORM
399  }
400  return 0;
401 
402  case 1:
403  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
404  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
405 
406  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
407  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
408  return 0;
409 
410  case 2:
411  get_deriv(ctx, td, 2, DIR_X, 0, INDEX_TEMP, height, nb_threads);
412  get_deriv(ctx, td, 0, DIR_Y, INDEX_TEMP, INDEX_DX, width , nb_threads);
413 
414  get_deriv(ctx, td, 0, DIR_X, 0, INDEX_TEMP, height, nb_threads);
415  get_deriv(ctx, td, 2, DIR_Y, INDEX_TEMP, INDEX_DY, width , nb_threads);
416 
417  get_deriv(ctx, td, 1, DIR_X, 0, INDEX_TEMP, height, nb_threads);
418  get_deriv(ctx, td, 1, DIR_Y, INDEX_TEMP, INDEX_DXY, width , nb_threads);
419  return 0;
420 
421  default:
422  av_log(ctx, AV_LOG_ERROR, "Unsupported difford value: %d.\n", s->difford);
423  return AVERROR(EINVAL);
424  }
425 
426 }
427 
428 /**
429  * Slice function for grey edge algorithm that does partial summing/maximizing
430  * of gaussian derivatives.
431  *
432  * @param ctx the filter context.
433  * @param arg data to be passed between threads.
434  * @param jobnr current job nubmer.
435  * @param nb_jobs total number of jobs.
436  *
437  * @return 0.
438  */
439 static int filter_slice_grey_edge(AVFilterContext* ctx, void* arg, int jobnr, int nb_jobs)
440 {
441  ColorConstancyContext *s = ctx->priv;
442  ThreadData *td = arg;
443  AVFrame *in = td->in;
444  int minknorm = s->minknorm;
445  const uint8_t thresh = 255;
446  int plane;
447 
448  for (plane = 0; plane < NUM_PLANES; ++plane) {
449  const int height = s->planeheight[plane];
450  const int width = s->planewidth[plane];
451  const int in_linesize = in->linesize[plane];
452  const int slice_start = (height * jobnr) / nb_jobs;
453  const int slice_end = (height * (jobnr+1)) / nb_jobs;
454  const uint8_t *img_data = in->data[plane];
455  const double *src = td->data[INDEX_NORM][plane];
456  double *dst = td->data[INDEX_DST][plane];
457  int r, c;
458 
459  dst[jobnr] = 0;
460  if (!minknorm) {
461  for (r = slice_start; r < slice_end; ++r) {
462  for (c = 0; c < width; ++c) {
463  dst[jobnr] = FFMAX( dst[jobnr], fabs(src[INDX2D(r, c, width)])
464  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
465  }
466  }
467  } else {
468  for (r = slice_start; r < slice_end; ++r) {
469  for (c = 0; c < width; ++c) {
470  dst[jobnr] += ( pow( fabs(src[INDX2D(r, c, width)] / 255.), minknorm)
471  * (img_data[INDX2D(r, c, in_linesize)] < thresh) );
472  }
473  }
474  }
475  }
476  return 0;
477 }
478 
479 /**
480  * Main control function for grey edge algorithm.
481  *
482  * @param ctx the filter context.
483  * @param in frame to perfrom grey edge on.
484  *
485  * @return 0 in case of success, a negative value corresponding to an
486  * AVERROR code in case of failure.
487  */
489 {
490  ColorConstancyContext *s = ctx->priv;
491  ThreadData td;
492  int minknorm = s->minknorm;
493  int difford = s->difford;
494  double *white = s->white;
495  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
496  int plane, job, ret;
497 
498  td.in = in;
500  if (ret) {
501  return ret;
502  }
503  get_derivative(ctx, &td);
504  if (difford > 0) {
505  ctx->internal->execute(ctx, slice_normalize, &td, NULL, nb_jobs);
506  }
507 
508  ctx->internal->execute(ctx, filter_slice_grey_edge, &td, NULL, nb_jobs);
509  if (!minknorm) {
510  for (plane = 0; plane < NUM_PLANES; ++plane) {
511  white[plane] = 0; // All values are absolute
512  for (job = 0; job < nb_jobs; ++job) {
513  white[plane] = FFMAX(white[plane] , td.data[INDEX_DST][plane][job]);
514  }
515  }
516  } else {
517  for (plane = 0; plane < NUM_PLANES; ++plane) {
518  white[plane] = 0;
519  for (job = 0; job < nb_jobs; ++job) {
520  white[plane] += td.data[INDEX_DST][plane][job];
521  }
522  white[plane] = pow(white[plane], 1./minknorm);
523  }
524  }
525 
526  cleanup_derivative_buffers(&td, difford + 1, NUM_PLANES);
527  return 0;
528 }
529 
530 /**
531  * Normalizes estimated illumination since only illumination vector
532  * direction is required for color constancy.
533  *
534  * @param light the estimated illumination to be normalized in place
535  */
536 static void normalize_light(double *light)
537 {
538  double abs_val = pow( pow(light[0], 2.0) + pow(light[1], 2.0) + pow(light[2], 2.0), 0.5);
539  int plane;
540 
541  // TODO: check if setting to 1.0 when estimated = 0.0 is the best thing to do
542 
543  if (!abs_val) {
544  for (plane = 0; plane < NUM_PLANES; ++plane) {
545  light[plane] = 1.0;
546  }
547  } else {
548  for (plane = 0; plane < NUM_PLANES; ++plane) {
549  light[plane] = (light[plane] / abs_val);
550  if (!light[plane]) { // to avoid division by zero when correcting
551  light[plane] = 1.0;
552  }
553  }
554  }
555 }
556 
557 /**
558  * Redirects to corresponding algorithm estimation function and performs normalization
559  * after estimation.
560  *
561  * @param ctx the filter context.
562  * @param in frame to perfrom estimation on.
563  *
564  * @return 0 in case of success, a negative value corresponding to an
565  * AVERROR code in case of failure.
566  */
568 {
569  ColorConstancyContext *s = ctx->priv;
570  int ret;
571 
573 
574  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
575  s->white[0], s->white[1], s->white[2]);
576  normalize_light(s->white);
577  av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
578  s->white[0], s->white[1], s->white[2]);
579 
580  return ret;
581 }
582 
583 /**
584  * Performs simple correction via diagonal transformation model.
585  *
586  * @param ctx the filter context.
587  * @param arg data to be passed between threads.
588  * @param jobnr current job nubmer.
589  * @param nb_jobs total number of jobs.
590  *
591  * @return 0.
592  */
593 static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
594 {
595  ColorConstancyContext *s = ctx->priv;
596  ThreadData *td = arg;
597  AVFrame *in = td->in;
598  AVFrame *out = td->out;
599  int plane;
600 
601  for (plane = 0; plane < NUM_PLANES; ++plane) {
602  const int height = s->planeheight[plane];
603  const int width = s->planewidth[plane];
604  const int64_t numpixels = width * (int64_t)height;
605  const int slice_start = (numpixels * jobnr) / nb_jobs;
606  const int slice_end = (numpixels * (jobnr+1)) / nb_jobs;
607  const uint8_t *src = in->data[plane];
608  uint8_t *dst = out->data[plane];
609  double temp;
610  unsigned i;
611 
612  for (i = slice_start; i < slice_end; ++i) {
613  temp = src[i] / (s->white[plane] * SQRT3);
614  dst[i] = av_clip_uint8((int)(temp + 0.5));
615  }
616  }
617  return 0;
618 }
619 
620 /**
621  * Main control function for correcting scene illumination based on
622  * estimated illumination.
623  *
624  * @param ctx the filter context.
625  * @param in holds frame to correct
626  * @param out holds corrected frame
627  */
629 {
630  ColorConstancyContext *s = ctx->priv;
631  ThreadData td;
632  int nb_jobs = FFMIN3(s->planeheight[1], s->planewidth[1], s->nb_threads);
633 
634  td.in = in;
635  td.out = out;
636  ctx->internal->execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
637 }
638 
640 {
641  static const enum AVPixelFormat pix_fmts[] = {
642  // TODO: support more formats
643  // FIXME: error when saving to .jpg
646  };
647 
649 }
650 
652 {
653  AVFilterContext *ctx = inlink->dst;
654  ColorConstancyContext *s = ctx->priv;
656  const double break_off_sigma = 3.0;
657  double sigma = s->sigma;
658  int ret;
659 
660  if (!floor(break_off_sigma * sigma + 0.5) && s->difford) {
661  av_log(ctx, AV_LOG_ERROR, "floor(%f * sigma) must be > 0 when difford > 0.\n", break_off_sigma);
662  return AVERROR(EINVAL);
663  }
664 
665  s->filtersize = 2 * floor(break_off_sigma * sigma + 0.5) + 1;
666  if (ret=set_gauss(ctx)) {
667  return ret;
668  }
669 
670  s->nb_threads = ff_filter_get_nb_threads(ctx);
671  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
672  s->planewidth[0] = s->planewidth[3] = inlink->w;
673  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
674  s->planeheight[0] = s->planeheight[3] = inlink->h;
675 
676  return 0;
677 }
678 
680 {
681  AVFilterContext *ctx = inlink->dst;
682  AVFilterLink *outlink = ctx->outputs[0];
683  AVFrame *out;
684  int ret;
685 
687  if (ret) {
688  return ret;
689  }
690 
691  if (av_frame_is_writable(in)) {
692  out = in;
693  } else {
694  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
695  if (!out) {
696  av_log(ctx, AV_LOG_ERROR, "Out of memory while allocating output video buffer.\n");
697  return AVERROR(ENOMEM);
698  }
700  }
702 
703  return ff_filter_frame(outlink, out);
704 }
705 
707 {
708  ColorConstancyContext *s = ctx->priv;
709  int difford = s->difford;
710  int i;
711 
712  for (i = 0; i <= difford; ++i) {
713  av_freep(&s->gauss[i]);
714  }
715 }
716 
718  {
719  .name = "default",
720  .type = AVMEDIA_TYPE_VIDEO,
721  .config_props = config_props,
722  .filter_frame = filter_frame,
723  },
724  { NULL }
725 };
726 
728  {
729  .name = "default",
730  .type = AVMEDIA_TYPE_VIDEO,
731  },
732  { NULL }
733 };
734 
735 #if CONFIG_GREYEDGE_FILTER
736 
737 static const AVOption greyedge_options[] = {
738  { "difford", "set differentiation order", OFFSET(difford), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, FLAGS },
739  { "minknorm", "set Minkowski norm", OFFSET(minknorm), AV_OPT_TYPE_INT, {.i64=1}, 0, 20, FLAGS },
740  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0, 1024.0, FLAGS },
741  { NULL }
742 };
743 
744 AVFILTER_DEFINE_CLASS(greyedge);
745 
747  .name = GREY_EDGE,
748  .description = NULL_IF_CONFIG_SMALL("Estimates scene illumination by grey edge assumption."),
749  .priv_size = sizeof(ColorConstancyContext),
750  .priv_class = &greyedge_class,
752  .uninit = uninit,
756 };
757 
758 #endif /* CONFIG_GREY_EDGE_FILTER */
FLAGS
#define FLAGS
Definition: vf_colorconstancy.c:93
INDEX_ORD
#define INDEX_ORD
Definition: vf_colorconstancy.c:58
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
ColorConstancyContext
Common struct for all algorithms contexts.
Definition: vf_colorconstancy.c:75
ColorConstancyContext::difford
int difford
Definition: vf_colorconstancy.c:78
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
out
FILE * out
Definition: movenc.c:54
filter_slice_grey_edge
static int filter_slice_grey_edge(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice function for grey edge algorithm that does partial summing/maximizing of gaussian derivatives.
Definition: vf_colorconstancy.c:439
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_colorconstancy.c:706
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
get_derivative
static int get_derivative(AVFilterContext *ctx, ThreadData *td)
Main control function for calculating gaussian derivatives.
Definition: vf_colorconstancy.c:383
GAUSS
#define GAUSS(s, sr, sc, sls, sh, sw, g)
Definition: vf_colorconstancy.c:236
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
SQRT3
#define SQRT3
Definition: vf_colorconstancy.c:44
OFFSET
#define OFFSET(x)
Definition: vf_colorconstancy.c:92
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
ColorConstancyContext::white
double white[NUM_PLANES]
Definition: vf_colorconstancy.c:89
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
slice_get_derivative
static int slice_get_derivative(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice calculation of gaussian derivatives.
Definition: vf_colorconstancy.c:251
ColorConstancyContext::planewidth
int planewidth[4]
Definition: vf_colorconstancy.c:84
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:488
video.h
normalize_light
static void normalize_light(double *light)
Normalizes estimated illumination since only illumination vector direction is required for color cons...
Definition: vf_colorconstancy.c:536
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1795
chromatic_adaptation
static void chromatic_adaptation(AVFilterContext *ctx, AVFrame *in, AVFrame *out)
Main control function for correcting scene illumination based on estimated illumination.
Definition: vf_colorconstancy.c:628
MAX_DATA
#define MAX_DATA
Definition: vf_colorconstancy.c:49
GREY_EDGE
#define GREY_EDGE
Definition: vf_colorconstancy.c:42
ColorConstancyContext::nb_threads
int nb_threads
Definition: vf_colorconstancy.c:82
filter_grey_edge
static int filter_grey_edge(AVFilterContext *ctx, AVFrame *in)
Main control function for grey edge algorithm.
Definition: vf_colorconstancy.c:488
formats.h
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorconstancy.c:679
plane
int plane
Definition: avisynth_c.h:384
INDEX_NORM
#define INDEX_NORM
Definition: vf_colorconstancy.c:55
get_deriv
static void av_always_inline get_deriv(AVFilterContext *ctx, ThreadData *td, int ord, int dir, int src, int dst, int dim, int nb_threads)
Utility function for setting up differentiation data/metadata.
Definition: vf_colorconstancy.c:365
ff_vf_greyedge
AVFilter ff_vf_greyedge
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
src
#define src
Definition: vp8dsp.c:254
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
INDEX_SRC
#define INDEX_SRC
Definition: vf_colorconstancy.c:56
DIR_X
#define DIR_X
Definition: vf_colorconstancy.c:60
width
#define width
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_colorconstancy.c:651
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
g
const char * g
Definition: vf_curves.c:115
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2026
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ColorConstancyContext::minknorm
int minknorm
@minknorm = 0 : getMax instead
Definition: vf_colorconstancy.c:79
arg
const char * arg
Definition: jacosubdec.c:66
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
ColorConstancyContext::filtersize
int filtersize
Definition: vf_colorconstancy.c:86
GINDX
#define GINDX(s, i)
Definition: vf_colorconstancy.c:95
ColorConstancyContext::planeheight
int planeheight[4]
Definition: vf_colorconstancy.c:83
setup_derivative_buffers
static int setup_derivative_buffers(AVFilterContext *ctx, ThreadData *td)
Allocates buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:214
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
exp
int8_t exp
Definition: eval.c:72
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
INDX2D
#define INDX2D(r, c, w)
Definition: vf_colorconstancy.c:235
cleanup_derivative_buffers
static void cleanup_derivative_buffers(ThreadData *td, int nb_buff, int nb_planes)
Frees up buffers used by grey edge for storing derivatives final and intermidiate results.
Definition: vf_colorconstancy.c:189
desc
const char * desc
Definition: nvenc.c:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
INDEX_TEMP
#define INDEX_TEMP
Definition: vf_colorconstancy.c:51
set_gauss
static int set_gauss(AVFilterContext *ctx)
Sets gauss filters used for calculating gauss derivatives.
Definition: vf_colorconstancy.c:109
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
illumination_estimation
static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
Redirects to corresponding algorithm estimation function and performs normalization after estimation.
Definition: vf_colorconstancy.c:567
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
ThreadData
Used for passing data between threads.
Definition: af_adeclick.c:487
av_always_inline
#define av_always_inline
Definition: attributes.h:43
ThreadData::data
double * data[MAX_DATA][NUM_PLANES]
Definition: vf_colorconstancy.c:69
diagonal_transformation
static int diagonal_transformation(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Performs simple correction via diagonal transformation model.
Definition: vf_colorconstancy.c:593
uint8_t
uint8_t
Definition: audio_convert.c:194
INDEX_DX
#define INDEX_DX
Definition: vf_colorconstancy.c:52
MAX_DIFF_ORD
#define MAX_DIFF_ORD
Definition: vf_colorconstancy.c:47
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
NUM_PLANES
#define NUM_PLANES
Definition: vf_colorconstancy.c:46
AVFilter
Filter definition.
Definition: avfilter.h:144
dim
int dim
Definition: vorbis_enc_data.h:451
ret
ret
Definition: filter_design.txt:187
colorconstancy_outputs
static const AVFilterPad colorconstancy_outputs[]
Definition: vf_colorconstancy.c:727
DIR_Y
#define DIR_Y
Definition: vf_colorconstancy.c:61
INDEX_DY
#define INDEX_DY
Definition: vf_colorconstancy.c:53
ThreadData::meta_data
int meta_data[MAX_META_DATA]
Definition: vf_colorconstancy.c:68
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
slice_normalize
static int slice_normalize(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Slice Frobius normalization of gaussian derivatives.
Definition: vf_colorconstancy.c:319
temp
else temp
Definition: vf_mcdeint.c:256
MAX_META_DATA
#define MAX_META_DATA
Definition: vf_colorconstancy.c:48
colorconstancy_inputs
static const AVFilterPad colorconstancy_inputs[]
Definition: vf_colorconstancy.c:717
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1082
ColorConstancyContext::gauss
double * gauss[MAX_DIFF_ORD+1]
Definition: vf_colorconstancy.c:87
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
INDEX_DIR
#define INDEX_DIR
Definition: vf_colorconstancy.c:59
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
INDEX_DXY
#define INDEX_DXY
Definition: vf_colorconstancy.c:54
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colorconstancy.c:639
ColorConstancyContext::sigma
double sigma
Definition: vf_colorconstancy.c:80
INDEX_DST
#define INDEX_DST
Definition: vf_colorconstancy.c:57