FFmpeg
cms.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2024 Niklas Haas
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <string.h>
23 
24 #include "libavutil/attributes.h"
25 #include "libavutil/avassert.h"
26 #include "libavutil/csp.h"
27 #include "libavutil/slicethread.h"
28 
29 #include "cms.h"
30 #include "csputils.h"
31 #include "libswscale/swscale.h"
32 #include "utils.h"
33 
35 {
36  /* If the encoding space is different, we must go through a conversion */
37  if (map->src.prim != map->dst.prim || map->src.trc != map->dst.trc)
38  return false;
39 
40  /* If the black point changes, we have to perform black point compensation */
41  if (av_cmp_q(map->src.min_luma, map->dst.min_luma))
42  return false;
43 
44  switch (map->intent) {
47  return ff_prim_superset(&map->dst.gamut, &map->src.gamut) &&
48  av_cmp_q(map->src.max_luma, map->dst.max_luma) <= 0;
51  return ff_prim_equal(&map->dst.gamut, &map->src.gamut) &&
52  !av_cmp_q(map->src.max_luma, map->dst.max_luma);
53  default:
54  av_assert0(!"Invalid gamut mapping intent?");
55  return true;
56  }
57 }
58 
59 /* Approximation of gamut hull at a given intensity level */
60 static const float hull(float I)
61 {
62  return ((I - 6.0f) * I + 9.0f) * I;
63 }
64 
65 /* For some minimal type safety, and code cleanliness */
66 typedef struct RGB {
67  float R, G, B; /* nits */
68 } RGB;
69 
70 typedef struct IPT {
71  float I, P, T;
72 } IPT;
73 
74 typedef struct ICh {
75  float I, C, h;
76 } ICh;
77 
79 {
80  return (ICh) {
81  .I = c.I,
82  .C = sqrtf(c.P * c.P + c.T * c.T),
83  .h = atan2f(c.T, c.P),
84  };
85 }
86 
88 {
89  return (IPT) {
90  .I = c.I,
91  .P = c.C * cosf(c.h),
92  .T = c.C * sinf(c.h),
93  };
94 }
95 
96 /* Helper struct containing pre-computed cached values describing a gamut */
97 typedef struct Gamut {
104  float Iavg_frame;
105  float Imax_frame;
106  float Imin, Imax;
107  float Lb, Lw;
109  ICh peak; /* updated as needed in loop body when hue changes */
110 } Gamut;
111 
113 {
115  const AVColorPrimariesDesc content = {
116  .prim = fmt.gamut,
117  .wp = encoding->wp,
118  };
119 
120  const float Lw = av_q2d(fmt.max_luma), Lb = av_q2d(fmt.min_luma);
121  const float Imax = pq_oetf(Lw);
122 
123  return (Gamut) {
124  .encoding2lms = ff_sws_ipt_rgb2lms(encoding),
125  .lms2encoding = ff_sws_ipt_lms2rgb(encoding),
126  .lms2content = ff_sws_ipt_lms2rgb(&content),
127  .content2lms = ff_sws_ipt_rgb2lms(&content),
128  .eotf = av_csp_itu_eotf(fmt.trc),
129  .eotf_inv = av_csp_itu_eotf_inv(fmt.trc),
130  .wp = encoding->wp,
131  .Imin = pq_oetf(Lb),
132  .Imax = Imax,
133  .Imax_frame = fmt.frame_peak.den ? pq_oetf(av_q2d(fmt.frame_peak)) : Imax,
134  .Iavg_frame = fmt.frame_avg.den ? pq_oetf(av_q2d(fmt.frame_avg)) : 0.0f,
135  .Lb = Lb,
136  .Lw = Lw,
137  };
138 }
139 
141 {
142  const float L = rgb2lms.m[0][0] * c.R +
143  rgb2lms.m[0][1] * c.G +
144  rgb2lms.m[0][2] * c.B;
145  const float M = rgb2lms.m[1][0] * c.R +
146  rgb2lms.m[1][1] * c.G +
147  rgb2lms.m[1][2] * c.B;
148  const float S = rgb2lms.m[2][0] * c.R +
149  rgb2lms.m[2][1] * c.G +
150  rgb2lms.m[2][2] * c.B;
151  const float Lp = pq_oetf(L);
152  const float Mp = pq_oetf(M);
153  const float Sp = pq_oetf(S);
154  return (IPT) {
155  .I = 0.4000f * Lp + 0.4000f * Mp + 0.2000f * Sp,
156  .P = 4.4550f * Lp - 4.8510f * Mp + 0.3960f * Sp,
157  .T = 0.8056f * Lp + 0.3572f * Mp - 1.1628f * Sp,
158  };
159 }
160 
162 {
163  const float Lp = c.I + 0.0975689f * c.P + 0.205226f * c.T;
164  const float Mp = c.I - 0.1138760f * c.P + 0.133217f * c.T;
165  const float Sp = c.I + 0.0326151f * c.P - 0.676887f * c.T;
166  const float L = pq_eotf(Lp);
167  const float M = pq_eotf(Mp);
168  const float S = pq_eotf(Sp);
169  return (RGB) {
170  .R = lms2rgb.m[0][0] * L +
171  lms2rgb.m[0][1] * M +
172  lms2rgb.m[0][2] * S,
173  .G = lms2rgb.m[1][0] * L +
174  lms2rgb.m[1][1] * M +
175  lms2rgb.m[1][2] * S,
176  .B = lms2rgb.m[2][0] * L +
177  lms2rgb.m[2][1] * M +
178  lms2rgb.m[2][2] * S,
179  };
180 }
181 
182 static inline bool ingamut(IPT c, Gamut gamut)
183 {
184  const float min_rgb = gamut.Lb - 1e-4f;
185  const float max_rgb = gamut.Lw + 1e-2f;
186  const float Lp = c.I + 0.0975689f * c.P + 0.205226f * c.T;
187  const float Mp = c.I - 0.1138760f * c.P + 0.133217f * c.T;
188  const float Sp = c.I + 0.0326151f * c.P - 0.676887f * c.T;
189  if (Lp < gamut.Imin || Lp > gamut.Imax ||
190  Mp < gamut.Imin || Mp > gamut.Imax ||
191  Sp < gamut.Imin || Sp > gamut.Imax)
192  {
193  /* Values outside legal LMS range */
194  return false;
195  } else {
196  const float L = pq_eotf(Lp);
197  const float M = pq_eotf(Mp);
198  const float S = pq_eotf(Sp);
199  RGB rgb = {
200  .R = gamut.lms2content.m[0][0] * L +
201  gamut.lms2content.m[0][1] * M +
202  gamut.lms2content.m[0][2] * S,
203  .G = gamut.lms2content.m[1][0] * L +
204  gamut.lms2content.m[1][1] * M +
205  gamut.lms2content.m[1][2] * S,
206  .B = gamut.lms2content.m[2][0] * L +
207  gamut.lms2content.m[2][1] * M +
208  gamut.lms2content.m[2][2] * S,
209  };
210  return rgb.R >= min_rgb && rgb.R <= max_rgb &&
211  rgb.G >= min_rgb && rgb.G <= max_rgb &&
212  rgb.B >= min_rgb && rgb.B <= max_rgb;
213  }
214 }
215 
216 static const float maxDelta = 5e-5f;
217 
218 // Find gamut intersection using specified bounds
219 static inline ICh
220 desat_bounded(float I, float h, float Cmin, float Cmax, Gamut gamut)
221 {
222  if (I <= gamut.Imin)
223  return (ICh) { .I = gamut.Imin, .C = 0, .h = h };
224  else if (I >= gamut.Imax)
225  return (ICh) { .I = gamut.Imax, .C = 0, .h = h };
226  else {
227  const float maxDI = I * maxDelta;
228  ICh res = { .I = I, .C = (Cmin + Cmax) / 2, .h = h };
229  do {
230  if (ingamut(ich2ipt(res), gamut)) {
231  Cmin = res.C;
232  } else {
233  Cmax = res.C;
234  }
235  res.C = (Cmin + Cmax) / 2;
236  } while (Cmax - Cmin > maxDI);
237 
238  return res;
239  }
240 }
241 
242 // Finds maximally saturated in-gamut color (for given hue)
243 static inline ICh saturate(float hue, Gamut gamut)
244 {
245  static const float invphi = 0.6180339887498948f;
246  static const float invphi2 = 0.38196601125010515f;
247 
248  ICh lo = { .I = gamut.Imin, .h = hue };
249  ICh hi = { .I = gamut.Imax, .h = hue };
250  float de = hi.I - lo.I;
251  ICh a = { .I = lo.I + invphi2 * de };
252  ICh b = { .I = lo.I + invphi * de };
253  a = desat_bounded(a.I, hue, 0.0f, 0.5f, gamut);
254  b = desat_bounded(b.I, hue, 0.0f, 0.5f, gamut);
255 
256  while (de > maxDelta) {
257  de *= invphi;
258  if (a.C > b.C) {
259  hi = b;
260  b = a;
261  a.I = lo.I + invphi2 * de;
262  a = desat_bounded(a.I, hue, lo.C - maxDelta, 0.5f, gamut);
263  } else {
264  lo = a;
265  a = b;
266  b.I = lo.I + invphi * de;
267  b = desat_bounded(b.I, hue, hi.C - maxDelta, 0.5f, gamut);
268  }
269  }
270 
271  return a.C > b.C ? a : b;
272 }
273 
274 static float softclip(float value, float source, float target)
275 {
276  const float j = SOFTCLIP_KNEE;
277  float peak, x, a, b, scale;
278  if (!target)
279  return 0.0f;
280 
281  peak = source / target;
282  x = fminf(value / target, peak);
283  if (x <= j || peak <= 1.0)
284  return value;
285 
286  /* Apply simple mobius function */
287  a = -j*j * (peak - 1.0f) / (j*j - 2.0f * j + peak);
288  b = (j*j - 2.0f * j * peak + peak) / fmaxf(1e-6f, peak - 1.0f);
289  scale = (b*b + 2.0f * b*j + j*j) / (b - a);
290 
291  return scale * (x + a) / (x + b) * target;
292 }
293 
294 /**
295  * Something like fmixf(base, c, x) but follows an exponential curve, note
296  * that this can be used to extend 'c' outwards for x > 1
297  */
298 static inline ICh mix_exp(ICh c, float x, float gamma, float base)
299 {
300  return (ICh) {
301  .I = base + (c.I - base) * powf(x, gamma),
302  .C = c.C * x,
303  .h = c.h,
304  };
305 }
306 
307 /**
308  * Drop gamma for colors approaching black and achromatic to avoid numerical
309  * instabilities, and excessive brightness boosting of grain, while also
310  * strongly boosting gamma for values exceeding the target peak
311  */
312 static inline float scale_gamma(float gamma, ICh ich, Gamut gamut)
313 {
314  const float Imin = gamut.Imin;
315  const float Irel = fmaxf((ich.I - Imin) / (gamut.peak.I - Imin), 0.0f);
316  return gamma * powf(Irel, 3) * fminf(ich.C / gamut.peak.C, 1.0f);
317 }
318 
319 /* Clip a color along the exponential curve given by `gamma` */
320 static inline IPT clip_gamma(IPT ipt, float gamma, Gamut gamut)
321 {
322  float lo = 0.0f, hi = 1.0f, x = 0.5f;
323  const float maxDI = fmaxf(ipt.I * maxDelta, 1e-7f);
324  ICh ich;
325 
326  if (ipt.I <= gamut.Imin)
327  return (IPT) { .I = gamut.Imin };
328  if (ingamut(ipt, gamut))
329  return ipt;
330 
331  ich = ipt2ich(ipt);
332  if (!gamma)
333  return ich2ipt(desat_bounded(ich.I, ich.h, 0.0f, ich.C, gamut));
334 
335  gamma = scale_gamma(gamma, ich, gamut);
336  do {
337  ICh test = mix_exp(ich, x, gamma, gamut.peak.I);
338  if (ingamut(ich2ipt(test), gamut)) {
339  lo = x;
340  } else {
341  hi = x;
342  }
343  x = (lo + hi) / 2.0f;
344  } while (hi - lo > maxDI);
345 
346  return ich2ipt(mix_exp(ich, x, gamma, gamut.peak.I));
347 }
348 
349 typedef struct CmsCtx CmsCtx;
350 struct CmsCtx {
351  /* Tone mapping parameters */
352  float Qa, Qb, Qc, Pa, Pb, src_knee, dst_knee; /* perceptual */
353  float I_scale, I_offset; /* linear methods */
354 
355  /* Colorspace parameters */
357  Gamut tmp; /* after tone mapping */
359  SwsMatrix3x3 adaptation; /* for absolute intent */
360 
361  /* Invocation parameters */
363  float (*tone_map)(const CmsCtx *ctx, float I);
364  IPT (*adapt_colors)(const CmsCtx *ctx, IPT ipt);
367 
368  /* Threading parameters */
373 };
374 
375 /**
376  * Helper function to pick a knee point based on the * HDR10+ brightness
377  * metadata and scene brightness average matching.
378  *
379  * Inspired by SMPTE ST2094-10, with some modifications
380  */
381 static void st2094_pick_knee(float src_max, float src_min, float src_avg,
382  float dst_max, float dst_min,
383  float *out_src_knee, float *out_dst_knee)
384 {
385  const float min_knee = PERCEPTUAL_KNEE_MIN;
386  const float max_knee = PERCEPTUAL_KNEE_MAX;
387  const float def_knee = PERCEPTUAL_KNEE_DEF;
388  const float src_knee_min = fmixf(src_min, src_max, min_knee);
389  const float src_knee_max = fmixf(src_min, src_max, max_knee);
390  const float dst_knee_min = fmixf(dst_min, dst_max, min_knee);
391  const float dst_knee_max = fmixf(dst_min, dst_max, max_knee);
392  float src_knee, target, adapted, tuning, adaptation, dst_knee;
393 
394  /* Choose source knee based on dynamic source scene brightness */
395  src_knee = src_avg ? src_avg : fmixf(src_min, src_max, def_knee);
396  src_knee = av_clipf(src_knee, src_knee_min, src_knee_max);
397 
398  /* Choose target adaptation point based on linearly re-scaling source knee */
399  target = (src_knee - src_min) / (src_max - src_min);
400  adapted = fmixf(dst_min, dst_max, target);
401 
402  /**
403  * Choose the destnation knee by picking the perceptual adaptation point
404  * between the source knee and the desired target. This moves the knee
405  * point, on the vertical axis, closer to the 1:1 (neutral) line.
406  *
407  * Adjust the adaptation strength towards 1 based on how close the knee
408  * point is to its extreme values (min/max knee)
409  */
410  tuning = smoothstepf(max_knee, def_knee, target) *
411  smoothstepf(min_knee, def_knee, target);
412  adaptation = fmixf(1.0f, PERCEPTUAL_ADAPTATION, tuning);
413  dst_knee = fmixf(src_knee, adapted, adaptation);
414  dst_knee = av_clipf(dst_knee, dst_knee_min, dst_knee_max);
415 
416  *out_src_knee = src_knee;
417  *out_dst_knee = dst_knee;
418 }
419 
420 static void tone_map_setup(CmsCtx *ctx, bool dynamic)
421 {
422  const float dst_min = ctx->dst.Imin;
423  const float dst_max = ctx->dst.Imax;
424  const float src_min = ctx->src.Imin;
425  const float src_max = dynamic ? ctx->src.Imax_frame : ctx->src.Imax;
426  const float src_avg = dynamic ? ctx->src.Iavg_frame : 0.0f;
427  float slope, ratio, in_min, in_max, out_min, out_max, t;
428 
429  switch (ctx->map.intent) {
431  st2094_pick_knee(src_max, src_min, src_avg, dst_max, dst_min,
432  &ctx->src_knee, &ctx->dst_knee);
433 
434  /* Solve for linear knee (Pa = 0) */
435  slope = (ctx->dst_knee - dst_min) / (ctx->src_knee - src_min);
436 
437  /**
438  * Tune the slope at the knee point slightly: raise it to a user-provided
439  * gamma exponent, multiplied by an extra tuning coefficient designed to
440  * make the slope closer to 1.0 when the difference in peaks is low, and
441  * closer to linear when the difference between peaks is high.
442  */
443  ratio = src_max / dst_max - 1.0f;
444  ratio = av_clipf(SLOPE_TUNING * ratio, SLOPE_OFFSET, 1.0f + SLOPE_OFFSET);
445  slope = powf(slope, (1.0f - PERCEPTUAL_CONTRAST) * ratio);
446 
447  /* Normalize everything the pivot to make the math easier */
448  in_min = src_min - ctx->src_knee;
449  in_max = src_max - ctx->src_knee;
450  out_min = dst_min - ctx->dst_knee;
451  out_max = dst_max - ctx->dst_knee;
452 
453  /**
454  * Solve P of order 2 for:
455  * P(in_min) = out_min
456  * P'(0.0) = slope
457  * P(0.0) = 0.0
458  */
459  ctx->Pa = (out_min - slope * in_min) / (in_min * in_min);
460  ctx->Pb = slope;
461 
462  /**
463  * Solve Q of order 3 for:
464  * Q(in_max) = out_max
465  * Q''(in_max) = 0.0
466  * Q(0.0) = 0.0
467  * Q'(0.0) = slope
468  */
469  t = 2 * in_max * in_max;
470  ctx->Qa = (slope * in_max - out_max) / (in_max * t);
471  ctx->Qb = -3 * (slope * in_max - out_max) / t;
472  ctx->Qc = slope;
473  break;
475  /* Linear stretch */
476  ctx->I_scale = (dst_max - dst_min) / (src_max - src_min);
477  ctx->I_offset = dst_min - src_min * ctx->I_scale;
478  break;
480  /* Pure black point adaptation */
481  ctx->I_scale = src_max / (src_max - src_min) /
482  (dst_max / (dst_max - dst_min));
483  ctx->I_offset = dst_min - src_min * ctx->I_scale;
484  break;
486  /* Hard clip */
487  ctx->I_scale = 1.0f;
488  ctx->I_offset = 0.0f;
489  break;
490  }
491 }
492 
494 {
495  float I = ipt.I, desat;
496 
497  if (ctx->map.intent == SWS_INTENT_PERCEPTUAL) {
498  const float Pa = ctx->Pa, Pb = ctx->Pb;
499  const float Qa = ctx->Qa, Qb = ctx->Qb, Qc = ctx->Qc;
500  I -= ctx->src_knee;
501  I = I > 0 ? ((Qa * I + Qb) * I + Qc) * I : (Pa * I + Pb) * I;
502  I += ctx->dst_knee;
503  } else {
504  I = ctx->I_scale * I + ctx->I_offset;
505  }
506 
507  /**
508  * Avoids raising saturation excessively when raising brightness, and
509  * also desaturates when reducing brightness greatly to account for the
510  * reduction in gamut volume.
511  */
512  desat = fminf(ipt.I / I, hull(I) / hull(ipt.I));
513  return (IPT) {
514  .I = I,
515  .P = ipt.P * desat,
516  .T = ipt.T * desat,
517  };
518 }
519 
520 static IPT perceptual(const CmsCtx *ctx, IPT ipt)
521 {
522  ICh ich = ipt2ich(ipt);
523  IPT mapped = rgb2ipt(ipt2rgb(ipt, ctx->tmp.lms2content), ctx->dst.content2lms);
524  RGB rgb;
525  float maxRGB;
526 
527  /* Protect in gamut region */
528  const float maxC = fmaxf(ctx->tmp.peak.C, ctx->dst.peak.C);
529  float k = smoothstepf(PERCEPTUAL_DEADZONE, 1.0f, ich.C / maxC);
530  k *= PERCEPTUAL_STRENGTH;
531  ipt.I = fmixf(ipt.I, mapped.I, k);
532  ipt.P = fmixf(ipt.P, mapped.P, k);
533  ipt.T = fmixf(ipt.T, mapped.T, k);
534 
535  rgb = ipt2rgb(ipt, ctx->dst.lms2content);
536  maxRGB = fmaxf(rgb.R, fmaxf(rgb.G, rgb.B));
537  rgb.R = fmaxf(softclip(rgb.R, maxRGB, ctx->dst.Lw), ctx->dst.Lb);
538  rgb.G = fmaxf(softclip(rgb.G, maxRGB, ctx->dst.Lw), ctx->dst.Lb);
539  rgb.B = fmaxf(softclip(rgb.B, maxRGB, ctx->dst.Lw), ctx->dst.Lb);
540 
541  return rgb2ipt(rgb, ctx->dst.content2lms);
542 }
543 
544 static IPT relative(const CmsCtx *ctx, IPT ipt)
545 {
546  return clip_gamma(ipt, COLORIMETRIC_GAMMA, ctx->dst);
547 }
548 
549 static IPT absolute(const CmsCtx *ctx, IPT ipt)
550 {
551  RGB rgb = ipt2rgb(ipt, ctx->dst.lms2encoding);
552  float c[3] = { rgb.R, rgb.G, rgb.B };
553  ff_sws_matrix3x3_apply(&ctx->adaptation, c);
554  ipt = rgb2ipt((RGB) { c[0], c[1], c[2] }, ctx->dst.encoding2lms);
555 
556  return clip_gamma(ipt, COLORIMETRIC_GAMMA, ctx->dst);
557 }
558 
559 static IPT saturation(const CmsCtx * ctx, IPT ipt)
560 {
561  RGB rgb = ipt2rgb(ipt, ctx->tmp.lms2content);
562  return rgb2ipt(rgb, ctx->dst.content2lms);
563 }
564 
565 static av_always_inline av_const uint16_t av_round16f(float x)
566 {
567  return av_clip_uint16(x * (UINT16_MAX - 1) + 0.5f);
568 }
569 
570 /* Call this whenever the hue changes inside the loop body */
571 static av_always_inline void update_hue_peaks(CmsCtx *ctx, float P, float T)
572 {
573  const float hue = atan2f(T, P);
574  switch (ctx->map.intent) {
576  ctx->tmp.peak = saturate(hue, ctx->tmp);
577  /* fall through */
580  ctx->dst.peak = saturate(hue, ctx->dst);
581  return;
582  default:
583  return;
584  }
585 }
586 
587 static void generate_slice(void *priv, int jobnr, int threadnr, int nb_jobs,
588  int nb_threads)
589 {
590  CmsCtx ctx = *(const CmsCtx *) priv;
591 
592  const int slice_start = jobnr * ctx.slice_size;
593  const int slice_stride = ctx.size_input * ctx.size_input;
594  const int slice_end = FFMIN((jobnr + 1) * ctx.slice_size, ctx.size_input);
595  v3u16_t *input = &ctx.input[slice_start * slice_stride];
596 
597  const int output_slice_h = (ctx.size_output_PT + nb_jobs - 1) / nb_jobs;
598  const int output_start = jobnr * output_slice_h;
599  const int output_stride = ctx.size_output_PT * ctx.size_output_I;
600  const int output_end = FFMIN((jobnr + 1) * output_slice_h, ctx.size_output_PT);
601  v3u16_t *output = ctx.output ? &ctx.output[output_start * output_stride] : NULL;
602 
603  const float I_scale = 1.0f / (ctx.src.Imax - ctx.src.Imin);
604  const float I_offset = -ctx.src.Imin * I_scale;
605  const float PT_offset = (float) (1 << 15) / (UINT16_MAX - 1);
606 
607  const float input_scale = 1.0f / (ctx.size_input - 1);
608  const float output_scale_PT = 1.0f / (ctx.size_output_PT - 1);
609  const float output_scale_I = (ctx.tmp.Imax - ctx.tmp.Imin) /
610  (ctx.size_output_I - 1);
611 
612  for (int Bx = slice_start; Bx < slice_end; Bx++) {
613  const float B = input_scale * Bx;
614  for (int Gx = 0; Gx < ctx.size_input; Gx++) {
615  const float G = input_scale * Gx;
616  for (int Rx = 0; Rx < ctx.size_input; Rx++) {
617  double c[3] = { input_scale * Rx, G, B };
618  RGB rgb;
619  IPT ipt;
620 
621  ctx.src.eotf(ctx.src.Lw, ctx.src.Lb, c);
622  rgb = (RGB) { c[0], c[1], c[2] };
623  ipt = rgb2ipt(rgb, ctx.src.encoding2lms);
624 
625  if (output) {
626  /* Save intermediate value to 3DLUT */
627  *input++ = (v3u16_t) {
628  av_round16f(I_scale * ipt.I + I_offset),
629  av_round16f(ipt.P + PT_offset),
630  av_round16f(ipt.T + PT_offset),
631  };
632  } else {
633  update_hue_peaks(&ctx, ipt.P, ipt.T);
634 
635  ipt = tone_map_apply(&ctx, ipt);
636  ipt = ctx.adapt_colors(&ctx, ipt);
637  rgb = ipt2rgb(ipt, ctx.dst.lms2encoding);
638 
639  c[0] = rgb.R;
640  c[1] = rgb.G;
641  c[2] = rgb.B;
642  ctx.dst.eotf_inv(ctx.dst.Lw, ctx.dst.Lb, c);
643  *input++ = (v3u16_t) {
644  av_round16f(c[0]),
645  av_round16f(c[1]),
646  av_round16f(c[2]),
647  };
648  }
649  }
650  }
651  }
652 
653  if (!output)
654  return;
655 
656  /* Generate split gamut mapping LUT */
657  for (int Tx = output_start; Tx < output_end; Tx++) {
658  const float T = output_scale_PT * Tx - PT_offset;
659  for (int Px = 0; Px < ctx.size_output_PT; Px++) {
660  const float P = output_scale_PT * Px - PT_offset;
661  update_hue_peaks(&ctx, P, T);
662 
663  for (int Ix = 0; Ix < ctx.size_output_I; Ix++) {
664  const float I = output_scale_I * Ix + ctx.tmp.Imin;
665  IPT ipt = ctx.adapt_colors(&ctx, (IPT) { I, P, T });
666  RGB rgb = ipt2rgb(ipt, ctx.dst.lms2encoding);
667  double c[3] = { rgb.R, rgb.G, rgb.B };
668  ctx.dst.eotf_inv(ctx.dst.Lw, ctx.dst.Lb, c);
669  *output++ = (v3u16_t) {
670  av_round16f(c[0]),
671  av_round16f(c[1]),
672  av_round16f(c[2]),
673  };
674  }
675  }
676  }
677 }
678 
680 {
681  return sws_color_map_generate_dynamic(lut, NULL, size, 1, 1, map);
682 }
683 
685  int size_input, int size_I, int size_PT,
686  const SwsColorMap *map)
687 {
688  AVSliceThread *slicethread;
689  int ret, num_slices;
690 
691  CmsCtx ctx = {
692  .map = *map,
693  .input = input,
694  .output = output,
695  .size_input = size_input,
696  .size_output_I = size_I,
697  .size_output_PT = size_PT,
698  .src = gamut_from_colorspace(map->src),
699  .dst = gamut_from_colorspace(map->dst),
700  };
701 
702  switch (ctx.map.intent) {
703  case SWS_INTENT_PERCEPTUAL: ctx.adapt_colors = perceptual; break;
704  case SWS_INTENT_RELATIVE_COLORIMETRIC: ctx.adapt_colors = relative; break;
705  case SWS_INTENT_SATURATION: ctx.adapt_colors = saturation; break;
706  case SWS_INTENT_ABSOLUTE_COLORIMETRIC: ctx.adapt_colors = absolute; break;
707  default: return AVERROR(EINVAL);
708  }
709 
710  if (!output) {
711  /* Tone mapping is handled in a separate step when using dynamic TM */
712  tone_map_setup(&ctx, false);
713  }
714 
715  /* Intermediate color space after tone mapping */
716  ctx.tmp = ctx.src;
717  ctx.tmp.Lb = ctx.dst.Lb;
718  ctx.tmp.Lw = ctx.dst.Lw;
719  ctx.tmp.Imin = ctx.dst.Imin;
720  ctx.tmp.Imax = ctx.dst.Imax;
721 
722  if (ctx.map.intent == SWS_INTENT_ABSOLUTE_COLORIMETRIC) {
723  /**
724  * The IPT transform already implies an explicit white point adaptation
725  * from src to dst, so to get absolute colorimetric semantics we have
726  * to explicitly undo this adaptation with a * corresponding inverse.
727  */
728  ctx.adaptation = ff_sws_get_adaptation(&ctx.map.dst.gamut,
729  ctx.dst.wp, ctx.src.wp);
730  }
731 
732  ret = avpriv_slicethread_create(&slicethread, &ctx, generate_slice, NULL, 0);
733  if (ret < 0)
734  return ret;
735 
736  ctx.slice_size = (ctx.size_input + ret - 1) / ret;
737  num_slices = (ctx.size_input + ctx.slice_size - 1) / ctx.slice_size;
738  avpriv_slicethread_execute(slicethread, num_slices, 0);
739  avpriv_slicethread_free(&slicethread);
740  return 0;
741 }
742 
744 {
745  CmsCtx ctx = {
746  .map = *map,
747  .src = gamut_from_colorspace(map->src),
748  .dst = gamut_from_colorspace(map->dst),
749  };
750 
751  const float src_scale = (ctx.src.Imax - ctx.src.Imin) / (size - 1);
752  const float src_offset = ctx.src.Imin;
753  const float dst_scale = 1.0f / (ctx.dst.Imax - ctx.dst.Imin);
754  const float dst_offset = -ctx.dst.Imin * dst_scale;
755 
756  tone_map_setup(&ctx, true);
757 
758  for (int i = 0; i < size; i++) {
759  const float I = src_scale * i + src_offset;
760  IPT ipt = tone_map_apply(&ctx, (IPT) { I, 1.0f });
761  lut[i] = (v2u16_t) {
762  av_round16f(dst_scale * ipt.I + dst_offset),
763  av_clip_uint16(ipt.P * (1 << 15) + 0.5f),
764  };
765  }
766 }
M
#define M(a, b)
Definition: vp3dsp.c:48
Gamut::Imax_frame
float Imax_frame
Definition: cms.c:105
RGB
Definition: cms.c:66
hull
static const float hull(float I)
Definition: cms.c:60
RGB::B
float B
Definition: cms.c:67
Gamut::wp
AVCIExy wp
Definition: cms.c:108
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
generate_slice
static void generate_slice(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
Definition: cms.c:587
PERCEPTUAL_CONTRAST
#define PERCEPTUAL_CONTRAST
Definition: cms.h:44
PERCEPTUAL_KNEE_MIN
#define PERCEPTUAL_KNEE_MIN
Definition: cms.h:33
SWS_INTENT_SATURATION
@ SWS_INTENT_SATURATION
Saturation mapping.
Definition: swscale.h:168
AVColorPrimariesDesc::wp
AVWhitepointCoefficients wp
Definition: csp.h:79
AVColorPrimariesDesc
Struct that contains both white point location and primaries location, providing the complete descrip...
Definition: csp.h:78
Gamut::eotf
av_csp_eotf_function eotf
Definition: cms.c:102
saturation
static IPT saturation(const CmsCtx *ctx, IPT ipt)
Definition: cms.c:559
v3u16_t
Definition: csputils.h:77
atan2f
#define atan2f(y, x)
Definition: libm.h:45
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
saturate
static ICh saturate(float hue, Gamut gamut)
Definition: cms.c:243
Gamut::lms2encoding
SwsMatrix3x3 lms2encoding
Definition: cms.c:99
avpriv_slicethread_execute
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
Definition: slicethread.c:271
CmsCtx::input
v3u16_t * input
Definition: cms.c:365
SLOPE_TUNING
#define SLOPE_TUNING
Definition: cms.h:47
smoothstepf
static float smoothstepf(float edge0, float edge1, float x)
Definition: csputils.h:37
av_const
#define av_const
Definition: attributes.h:84
b
#define b
Definition: input.c:41
ff_sws_matrix3x3_apply
void ff_sws_matrix3x3_apply(const SwsMatrix3x3 *mat, float vec[3])
Definition: csputils.c:85
test
Definition: idctdsp.c:35
ff_prim_equal
static int ff_prim_equal(const AVPrimaryCoefficients *a, const AVPrimaryCoefficients *b)
Definition: utils.h:45
Gamut::content2lms
SwsMatrix3x3 content2lms
Definition: cms.c:101
base
uint8_t base
Definition: vp3data.h:128
SwsMatrix3x3::m
float m[3][3]
Definition: csputils.h:48
PERCEPTUAL_KNEE_DEF
#define PERCEPTUAL_KNEE_DEF
Definition: cms.h:35
PERCEPTUAL_STRENGTH
#define PERCEPTUAL_STRENGTH
Definition: cms.h:51
AVSliceThread
struct AVSliceThread AVSliceThread
Definition: slicethread.h:22
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
Gamut::peak
ICh peak
Definition: cms.c:109
clip_gamma
static IPT clip_gamma(IPT ipt, float gamma, Gamut gamut)
Definition: cms.c:320
SwsColorMap
Definition: cms.h:60
ICh
Definition: cms.c:74
SwsColor::gamut
AVPrimaryCoefficients gamut
Definition: utils.h:61
ipt2rgb
static av_always_inline RGB ipt2rgb(IPT c, const SwsMatrix3x3 lms2rgb)
Definition: cms.c:161
rgb
Definition: rpzaenc.c:60
tone_map_apply
static av_always_inline IPT tone_map_apply(const CmsCtx *ctx, IPT ipt)
Definition: cms.c:493
sws_color_map_noop
bool sws_color_map_noop(const SwsColorMap *map)
Returns true if the given color map is a semantic no-op - that is, the overall RGB end to end transfo...
Definition: cms.c:34
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1720
cosf
#define cosf(x)
Definition: libm.h:78
avpriv_slicethread_create
int avpriv_slicethread_create(AVSliceThread **pctx, void *priv, void(*worker_func)(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads), void(*main_func)(void *priv), int nb_threads)
Create slice threading context.
Definition: slicethread.c:262
update_hue_peaks
static av_always_inline void update_hue_peaks(CmsCtx *ctx, float P, float T)
Definition: cms.c:571
SwsColor::trc
enum AVColorTransferCharacteristic trc
Definition: utils.h:60
utils.h
lms2rgb
static const float lms2rgb[3][3]
Definition: vf_grayworld.c:73
CmsCtx::adapt_colors
IPT(* adapt_colors)(const CmsCtx *ctx, IPT ipt)
Definition: cms.c:364
sws_color_map_generate_static
int sws_color_map_generate_static(v3u16_t *lut, int size, const SwsColorMap *map)
Generates a single end-to-end color mapping 3DLUT embedding a static tone mapping curve.
Definition: cms.c:679
T
#define T(x)
Definition: vpx_arith.h:29
avassert.h
float
float
Definition: af_crystalizer.c:122
av_csp_primaries_desc_from_id
const AVColorPrimariesDesc * av_csp_primaries_desc_from_id(enum AVColorPrimaries prm)
Retrieves a complete gamut description from an enum constant describing the color primaries.
Definition: csp.c:90
Gamut::eotf_inv
av_csp_eotf_function eotf_inv
Definition: cms.c:103
sws_color_map_generate_dynamic
int sws_color_map_generate_dynamic(v3u16_t *input, v3u16_t *output, int size_input, int size_I, int size_PT, const SwsColorMap *map)
Generates a split pair of 3DLUTS, going to IPT and back, allowing an arbitrary dynamic EETF to be nes...
Definition: cms.c:684
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
fminf
float fminf(float, float)
pq_eotf
static float pq_eotf(float x)
Definition: csputils.h:91
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
ctx
AVFormatContext * ctx
Definition: movenc.c:49
ff_sws_ipt_rgb2lms
SwsMatrix3x3 ff_sws_ipt_rgb2lms(const AVColorPrimariesDesc *prim)
Definition: csputils.c:232
Gamut::Imin
float Imin
Definition: cms.c:106
CmsCtx::Pa
float Pa
Definition: cms.c:352
ICh::C
float C
Definition: cms.c:75
SwsColor::frame_peak
AVRational frame_peak
Definition: utils.h:64
CmsCtx::tmp
Gamut tmp
Definition: cms.c:357
v2u16_t
Definition: csputils.h:73
absolute
static IPT absolute(const CmsCtx *ctx, IPT ipt)
Definition: cms.c:549
NULL
#define NULL
Definition: coverity.c:32
ff_sws_ipt_lms2rgb
SwsMatrix3x3 ff_sws_ipt_lms2rgb(const AVColorPrimariesDesc *prim)
Definition: csputils.c:252
CmsCtx::map
SwsColorMap map
Definition: cms.c:362
SWS_INTENT_PERCEPTUAL
@ SWS_INTENT_PERCEPTUAL
Perceptual tone mapping.
Definition: swscale.h:166
CmsCtx::size_output_PT
int size_output_PT
Definition: cms.c:372
ingamut
static bool ingamut(IPT c, Gamut gamut)
Definition: cms.c:182
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
CmsCtx::Pb
float Pb
Definition: cms.c:352
COLORIMETRIC_GAMMA
#define COLORIMETRIC_GAMMA
Definition: cms.h:57
CmsCtx
Definition: cms.c:350
sinf
#define sinf(x)
Definition: libm.h:419
av_clipf
av_clipf
Definition: af_crystalizer.c:122
CmsCtx::src_knee
float src_knee
Definition: cms.c:352
CmsCtx::adaptation
SwsMatrix3x3 adaptation
Definition: cms.c:359
SWS_INTENT_ABSOLUTE_COLORIMETRIC
@ SWS_INTENT_ABSOLUTE_COLORIMETRIC
Absolute colorimetric clipping.
Definition: swscale.h:169
AVCIExy
Struct containing chromaticity x and y values for the standard CIE 1931 chromaticity definition.
Definition: csp.h:56
ff_sws_get_adaptation
SwsMatrix3x3 ff_sws_get_adaptation(const AVPrimaryCoefficients *prim, AVWhitepointCoefficients from, AVWhitepointCoefficients to)
Definition: csputils.c:204
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
source
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
Definition: filter_design.txt:255
relative
static IPT relative(const CmsCtx *ctx, IPT ipt)
Definition: cms.c:544
RGB::R
float R
Definition: cms.c:67
f
f
Definition: af_crystalizer.c:122
powf
#define powf(x, y)
Definition: libm.h:50
CmsCtx::Qa
float Qa
Definition: cms.c:352
P
#define P
fmaxf
float fmaxf(float, float)
PERCEPTUAL_ADAPTATION
#define PERCEPTUAL_ADAPTATION
Definition: cms.h:38
size
int size
Definition: twinvq_data.h:10344
fmixf
#define fmixf(a, b, x)
Definition: csputils.h:35
PERCEPTUAL_KNEE_MAX
#define PERCEPTUAL_KNEE_MAX
Definition: cms.h:34
ff_prim_superset
bool ff_prim_superset(const AVPrimaryCoefficients *a, const AVPrimaryCoefficients *b)
Returns true if 'b' is entirely contained in 'a'.
Definition: csputils.c:281
CmsCtx::size_input
int size_input
Definition: cms.c:370
Gamut::Lw
float Lw
Definition: cms.c:107
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
csp.h
SwsColor
Definition: utils.h:58
attributes.h
SwsMatrix3x3
Definition: csputils.h:47
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
slicethread.h
SwsColor::frame_avg
AVRational frame_avg
Definition: utils.h:65
Gamut::Lb
float Lb
Definition: cms.c:107
IPT
Definition: cms.c:70
CmsCtx::Qb
float Qb
Definition: cms.c:352
softclip
static float softclip(float value, float source, float target)
Definition: cms.c:274
ipt2ich
static av_always_inline ICh ipt2ich(IPT c)
Definition: cms.c:78
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
CmsCtx::src
Gamut src
Definition: cms.c:356
mix_exp
static ICh mix_exp(ICh c, float x, float gamma, float base)
Something like fmixf(base, c, x) but follows an exponential curve, note that this can be used to exte...
Definition: cms.c:298
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
RGB::G
float G
Definition: cms.c:67
IPT::P
float P
Definition: cms.c:71
desat_bounded
static ICh desat_bounded(float I, float h, float Cmin, float Cmax, Gamut gamut)
Definition: cms.c:220
st2094_pick_knee
static void st2094_pick_knee(float src_max, float src_min, float src_avg, float dst_max, float dst_min, float *out_src_knee, float *out_dst_knee)
Helper function to pick a knee point based on the * HDR10+ brightness metadata and scene brightness a...
Definition: cms.c:381
av_csp_eotf_function
void(* av_csp_eotf_function)(double Lw, double Lb, double c[3])
Function pointer representing an ITU EOTF transfer for a given reference display configuration.
Definition: csp.h:162
scale_gamma
static float scale_gamma(float gamma, ICh ich, Gamut gamut)
Drop gamma for colors approaching black and achromatic to avoid numerical instabilities,...
Definition: cms.c:312
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:736
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
CmsCtx::I_offset
float I_offset
Definition: cms.c:353
ret
ret
Definition: filter_design.txt:187
rgb2lms
static const float rgb2lms[3][3]
Definition: vf_grayworld.c:67
IPT::I
float I
Definition: cms.c:71
CmsCtx::dst
Gamut dst
Definition: cms.c:358
Gamut
Definition: cms.c:97
SWS_INTENT_RELATIVE_COLORIMETRIC
@ SWS_INTENT_RELATIVE_COLORIMETRIC
Relative colorimetric clipping.
Definition: swscale.h:167
CmsCtx::dst_knee
float dst_knee
Definition: cms.c:352
CmsCtx::Qc
float Qc
Definition: cms.c:352
perceptual
static IPT perceptual(const CmsCtx *ctx, IPT ipt)
Definition: cms.c:520
ICh::I
float I
Definition: cms.c:75
maxDelta
static const float maxDelta
Definition: cms.c:216
AVRational::den
int den
Denominator.
Definition: rational.h:60
CmsCtx::size_output_I
int size_output_I
Definition: cms.c:371
CmsCtx::output
v3u16_t * output
Definition: cms.c:366
L
#define L(x)
Definition: vpx_arith.h:36
Gamut::encoding2lms
SwsMatrix3x3 encoding2lms
Definition: cms.c:98
SwsColor::min_luma
AVRational min_luma
Definition: utils.h:62
G
#define G
Definition: huffyuv.h:43
sws_tone_map_generate
void sws_tone_map_generate(v2u16_t *lut, int size, const SwsColorMap *map)
Generate a 1D LUT of size size adapting intensity (I) levels from the source to the destination color...
Definition: cms.c:743
IPT::T
float T
Definition: cms.c:71
cms.h
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
tone_map_setup
static void tone_map_setup(CmsCtx *ctx, bool dynamic)
Definition: cms.c:420
pq_oetf
static float pq_oetf(float x)
Definition: csputils.h:99
gamut_from_colorspace
static Gamut gamut_from_colorspace(SwsColor fmt)
Definition: cms.c:112
SOFTCLIP_KNEE
#define SOFTCLIP_KNEE
Definition: cms.h:54
SwsColor::max_luma
AVRational max_luma
Definition: utils.h:63
CmsCtx::I_scale
float I_scale
Definition: cms.c:353
av_round16f
static av_always_inline av_const uint16_t av_round16f(float x)
Definition: cms.c:565
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
PERCEPTUAL_DEADZONE
#define PERCEPTUAL_DEADZONE
Definition: cms.h:41
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
CmsCtx::slice_size
int slice_size
Definition: cms.c:369
AVColorPrimariesDesc::prim
AVPrimaryCoefficients prim
Definition: csp.h:80
ich2ipt
static av_always_inline IPT ich2ipt(ICh c)
Definition: cms.c:87
SLOPE_OFFSET
#define SLOPE_OFFSET
Definition: cms.h:48
av_csp_itu_eotf
av_csp_eotf_function av_csp_itu_eotf(enum AVColorTransferCharacteristic trc)
Returns the ITU EOTF corresponding to a given TRC.
Definition: csp.c:605
Gamut::lms2content
SwsMatrix3x3 lms2content
Definition: cms.c:100
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
avpriv_slicethread_free
void avpriv_slicethread_free(AVSliceThread **pctx)
Destroy slice threading context.
Definition: slicethread.c:276
CmsCtx::tone_map
float(* tone_map)(const CmsCtx *ctx, float I)
Definition: cms.c:363
h
h
Definition: vp9dsp_template.c:2070
Gamut::Imax
float Imax
Definition: cms.c:106
rgb2ipt
static av_always_inline IPT rgb2ipt(RGB c, const SwsMatrix3x3 rgb2lms)
Definition: cms.c:140
ICh::h
float h
Definition: cms.c:75
csputils.h
swscale.h
SwsColor::prim
enum AVColorPrimaries prim
Definition: utils.h:59
Gamut::Iavg_frame
float Iavg_frame
Definition: cms.c:104
av_csp_itu_eotf_inv
av_csp_eotf_function av_csp_itu_eotf_inv(enum AVColorTransferCharacteristic trc)
Returns the mathematical inverse of the corresponding EOTF.
Definition: csp.c:631