FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/mem_internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/pixfmt.h"
31 
32 #include "avfilter.h"
33 #include "colorspacedsp.h"
34 #include "formats.h"
35 #include "internal.h"
36 #include "video.h"
37 #include "colorspace.h"
38 
39 enum DitherMode {
43 };
44 
45 enum Colorspace {
56 };
57 
58 enum Whitepoint {
64 };
65 
72 };
73 
85 };
86 
87 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
98 };
99 
100 static const enum AVColorSpace default_csp[CS_NB + 1] = {
111 };
112 
116 };
117 
119  double alpha, beta, gamma, delta;
120 };
121 
122 typedef struct ColorSpaceContext {
123  const AVClass *class;
124 
126 
127  enum Colorspace user_all, user_iall;
128  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
129  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
130  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
131  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
132  enum AVPixelFormat in_format, user_format;
136 
137  int16_t *rgb[3];
138  ptrdiff_t rgb_stride;
139  unsigned rgb_sz;
141 
144  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
145 
148  int16_t *lin_lut, *delin_lut;
149 
152  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
153  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
154  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
155  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
162 
165 
166 // FIXME deal with odd width/heights
167 // FIXME faster linearize/delinearize implementation (integer pow)
168 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
169 // FIXME test that the values in (de)lin_lut don't exceed their container storage
170 // type size (only useful if we keep the LUT and don't move to fast integer pow)
171 // FIXME dithering if bitdepth goes down?
172 // FIXME bitexact for fate integration?
173 
174 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
175 // find any actual tables that document their real values...
176 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
178  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
179  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
180  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
181  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
182  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
183  [AVCOL_TRC_LINEAR] = { 1.0, 0.0, 1.0, 0.0 },
184  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
185  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
186  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
187  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
188 };
189 
190 static const struct TransferCharacteristics *
192 {
193  const struct TransferCharacteristics *coeffs;
194 
195  if (trc >= AVCOL_TRC_NB)
196  return NULL;
197  coeffs = &transfer_characteristics[trc];
198  if (!coeffs->alpha)
199  return NULL;
200 
201  return coeffs;
202 }
203 
205  [WP_D65] = { 0.3127, 0.3290 },
206  [WP_C] = { 0.3100, 0.3160 },
207  [WP_DCI] = { 0.3140, 0.3510 },
208  [WP_E] = { 1/3.0f, 1/3.0f },
209 };
210 
212  [AVCOL_PRI_BT709] = { WP_D65, { 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 } },
213  [AVCOL_PRI_BT470M] = { WP_C, { 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 } },
214  [AVCOL_PRI_BT470BG] = { WP_D65, { 0.640, 0.330, 0.290, 0.600, 0.150, 0.060 } },
215  [AVCOL_PRI_SMPTE170M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
216  [AVCOL_PRI_SMPTE240M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
217  [AVCOL_PRI_SMPTE428] = { WP_E, { 0.735, 0.265, 0.274, 0.718, 0.167, 0.009 } },
218  [AVCOL_PRI_SMPTE431] = { WP_DCI, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
219  [AVCOL_PRI_SMPTE432] = { WP_D65, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
220  [AVCOL_PRI_FILM] = { WP_C, { 0.681, 0.319, 0.243, 0.692, 0.145, 0.049 } },
221  [AVCOL_PRI_BT2020] = { WP_D65, { 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 } },
222  [AVCOL_PRI_JEDEC_P22] = { WP_D65, { 0.630, 0.340, 0.295, 0.605, 0.155, 0.077 } },
223 };
224 
226 {
227  const struct ColorPrimaries *p;
228 
229  if (prm >= AVCOL_PRI_NB)
230  return NULL;
231  p = &color_primaries[prm];
232  if (!p->coeff.xr)
233  return NULL;
234 
235  return p;
236 }
237 
239 {
240  int n;
241  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
242  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
243  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
244  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
245  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
246 
247  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
248  if (!s->lin_lut)
249  return AVERROR(ENOMEM);
250  s->delin_lut = &s->lin_lut[32768];
251  for (n = 0; n < 32768; n++) {
252  double v = (n - 2048.0) / 28672.0, d, l;
253 
254  // delinearize
255  if (v <= -out_beta) {
256  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
257  } else if (v < out_beta) {
258  d = out_delta * v;
259  } else {
260  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
261  }
262  s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
263 
264  // linearize
265  if (v <= -in_beta * in_delta) {
266  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
267  } else if (v < in_beta * in_delta) {
268  l = v * in_idelta;
269  } else {
270  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
271  }
272  s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
273  }
274 
275  return 0;
276 }
277 
278 /*
279  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
280  * This function uses the Bradford mechanism.
281  */
282 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
283  enum Whitepoint src, enum Whitepoint dst)
284 {
285  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
286  [WP_ADAPT_BRADFORD] = {
287  { 0.8951, 0.2664, -0.1614 },
288  { -0.7502, 1.7135, 0.0367 },
289  { 0.0389, -0.0685, 1.0296 },
290  }, [WP_ADAPT_VON_KRIES] = {
291  { 0.40024, 0.70760, -0.08081 },
292  { -0.22630, 1.16532, 0.04570 },
293  { 0.00000, 0.00000, 0.91822 },
294  },
295  };
296  const double (*ma)[3] = ma_tbl[wp_adapt];
297  const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
298  double zw_src = 1.0 - wp_src->xw - wp_src->yw;
299  const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
300  double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
301  double mai[3][3], fac[3][3], tmp[3][3];
302  double rs, gs, bs, rd, gd, bd;
303 
304  ff_matrix_invert_3x3(ma, mai);
305  rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
306  gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
307  bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
308  rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
309  gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
310  bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
311  fac[0][0] = rd / rs;
312  fac[1][1] = gd / gs;
313  fac[2][2] = bd / bs;
314  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
315  ff_matrix_mul_3x3(tmp, ma, fac);
316  ff_matrix_mul_3x3(out, tmp, mai);
317 }
318 
319 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
320  int w, int h, const int16_t *lut)
321 {
322  int y, x, n;
323 
324  for (n = 0; n < 3; n++) {
325  int16_t *data = buf[n];
326 
327  for (y = 0; y < h; y++) {
328  for (x = 0; x < w; x++)
329  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
330 
331  data += stride;
332  }
333  }
334 }
335 
336 typedef struct ThreadData {
337  AVFrame *in, *out;
338  ptrdiff_t in_linesize[3], out_linesize[3];
340 } ThreadData;
341 
342 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
343 {
344  const ThreadData *td = data;
345  ColorSpaceContext *s = ctx->priv;
346  uint8_t *in_data[3], *out_data[3];
347  int16_t *rgb[3];
348  int h_in = (td->in->height + 1) >> 1;
349  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
350  int w = td->in->width, h = h2 - h1;
351 
352  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
353  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
354  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
355  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
356  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
357  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
358  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
359  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
360  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
361 
362  // FIXME for simd, also make sure we do pictures with negative stride
363  // top-down so we don't overwrite lines with padding of data before it
364  // in the same buffer (same as swscale)
365 
366  if (s->yuv2yuv_fastmode) {
367  // FIXME possibly use a fast mode in case only the y range changes?
368  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
369  // are non-zero
370  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
371  s->yuv2yuv_coeffs, s->yuv_offset);
372  } else {
373  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
374  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
375  // 2 lines, for yuv420.)
376  /*
377  * General design:
378  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
379  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
380  * of RGB in psuedo-restricted 15+sign bits. That means that the float
381  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
382  * range is used for overflow/underflow outside the representable
383  * range of this RGB type. rgb2yuv is the exact opposite.
384  * - gamma correction is done using a LUT since that appears to work
385  * fairly fast.
386  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
387  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
388  * read chroma pixels at luma resolution. If you want some more fancy
389  * filter, you can use swscale to convert to yuv444p.
390  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
391  */
392  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
393  s->yuv2rgb_coeffs, s->yuv_offset[0]);
394  if (!s->rgb2rgb_passthrough) {
395  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
396  if (!s->lrgb2lrgb_passthrough)
397  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
398  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
399  }
400  if (s->dither == DITHER_FSB) {
401  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
402  s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
403  } else {
404  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
405  s->rgb2yuv_coeffs, s->yuv_offset[1]);
406  }
407  }
408 
409  return 0;
410 }
411 
412 static int get_range_off(AVFilterContext *ctx, int *off,
413  int *y_rng, int *uv_rng,
414  enum AVColorRange rng, int depth)
415 {
416  switch (rng) {
418  ColorSpaceContext *s = ctx->priv;
419 
420  if (!s->did_warn_range) {
421  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
422  s->did_warn_range = 1;
423  }
424  }
425  // fall-through
426  case AVCOL_RANGE_MPEG:
427  *off = 16 << (depth - 8);
428  *y_rng = 219 << (depth - 8);
429  *uv_rng = 224 << (depth - 8);
430  break;
431  case AVCOL_RANGE_JPEG:
432  *off = 0;
433  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
434  break;
435  default:
436  return AVERROR(EINVAL);
437  }
438 
439  return 0;
440 }
441 
443  const AVFrame *in, const AVFrame *out)
444 {
445  ColorSpaceContext *s = ctx->priv;
446  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
447  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
448  int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
449 
450 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
451 #define supported_subsampling(lcw, lch) \
452  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
453 #define supported_format(d) \
454  ((d) != NULL && (d)->nb_components == 3 && \
455  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
456  supported_depth((d)->comp[0].depth) && \
457  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
458 
459  if (!supported_format(in_desc)) {
461  "Unsupported input format %d (%s) or bitdepth (%d)\n",
463  in_desc ? in_desc->comp[0].depth : -1);
464  return AVERROR(EINVAL);
465  }
466  if (!supported_format(out_desc)) {
468  "Unsupported output format %d (%s) or bitdepth (%d)\n",
469  out->format, av_get_pix_fmt_name(out->format),
470  out_desc ? out_desc->comp[0].depth : -1);
471  return AVERROR(EINVAL);
472  }
473 
474  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
475  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
476  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
477  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
478  if (in->colorspace != s->in_csp ||
479  in->color_range != s->in_rng) s->in_lumacoef = NULL;
480  if (out->colorspace != s->out_csp ||
481  out->color_range != s->out_rng) s->out_lumacoef = NULL;
482 
483  if (!s->out_primaries || !s->in_primaries) {
484  s->in_prm = in->color_primaries;
485  if (s->user_iall != CS_UNSPECIFIED)
486  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
487  if (s->user_iprm != AVCOL_PRI_UNSPECIFIED)
488  s->in_prm = s->user_iprm;
489  s->in_primaries = get_color_primaries(s->in_prm);
490  if (!s->in_primaries) {
492  "Unsupported input primaries %d (%s)\n",
493  s->in_prm, av_color_primaries_name(s->in_prm));
494  return AVERROR(EINVAL);
495  }
496  s->out_prm = out->color_primaries;
497  s->out_primaries = get_color_primaries(s->out_prm);
498  if (!s->out_primaries) {
499  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
500  if (s->user_all == CS_UNSPECIFIED) {
501  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
502  } else {
504  "Unsupported output color property %d\n", s->user_all);
505  }
506  } else {
508  "Unsupported output primaries %d (%s)\n",
509  s->out_prm, av_color_primaries_name(s->out_prm));
510  }
511  return AVERROR(EINVAL);
512  }
513  s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
514  sizeof(*s->in_primaries));
515  if (!s->lrgb2lrgb_passthrough) {
516  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
517  const struct WhitepointCoefficients *wp_out, *wp_in;
518 
519  wp_out = &whitepoint_coefficients[s->out_primaries->wp];
520  wp_in = &whitepoint_coefficients[s->in_primaries->wp];
521  ff_fill_rgb2xyz_table(&s->out_primaries->coeff, wp_out, rgb2xyz);
522  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
523  ff_fill_rgb2xyz_table(&s->in_primaries->coeff, wp_in, rgb2xyz);
524  if (s->out_primaries->wp != s->in_primaries->wp &&
525  s->wp_adapt != WP_ADAPT_IDENTITY) {
526  double wpconv[3][3], tmp[3][3];
527 
528  fill_whitepoint_conv_table(wpconv, s->wp_adapt, s->in_primaries->wp,
529  s->out_primaries->wp);
530  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
531  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
532  } else {
533  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
534  }
535  for (m = 0; m < 3; m++)
536  for (n = 0; n < 3; n++) {
537  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
538  for (o = 1; o < 8; o++)
539  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
540  }
541 
542  emms = 1;
543  }
544  }
545 
546  if (!s->in_txchr) {
547  av_freep(&s->lin_lut);
548  s->in_trc = in->color_trc;
549  if (s->user_iall != CS_UNSPECIFIED)
550  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
551  if (s->user_itrc != AVCOL_TRC_UNSPECIFIED)
552  s->in_trc = s->user_itrc;
553  s->in_txchr = get_transfer_characteristics(s->in_trc);
554  if (!s->in_txchr) {
556  "Unsupported input transfer characteristics %d (%s)\n",
557  s->in_trc, av_color_transfer_name(s->in_trc));
558  return AVERROR(EINVAL);
559  }
560  }
561 
562  if (!s->out_txchr) {
563  av_freep(&s->lin_lut);
564  s->out_trc = out->color_trc;
565  s->out_txchr = get_transfer_characteristics(s->out_trc);
566  if (!s->out_txchr) {
567  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
568  if (s->user_all == CS_UNSPECIFIED) {
570  "Please specify output transfer characteristics\n");
571  } else {
573  "Unsupported output color property %d\n", s->user_all);
574  }
575  } else {
577  "Unsupported output transfer characteristics %d (%s)\n",
578  s->out_trc, av_color_transfer_name(s->out_trc));
579  }
580  return AVERROR(EINVAL);
581  }
582  }
583 
584  s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
585  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
586  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
587  res = fill_gamma_table(s);
588  if (res < 0)
589  return res;
590  emms = 1;
591  }
592 
593  if (!s->in_lumacoef) {
594  s->in_csp = in->colorspace;
595  if (s->user_iall != CS_UNSPECIFIED)
596  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
597  if (s->user_icsp != AVCOL_SPC_UNSPECIFIED)
598  s->in_csp = s->user_icsp;
599  s->in_rng = in->color_range;
600  if (s->user_irng != AVCOL_RANGE_UNSPECIFIED)
601  s->in_rng = s->user_irng;
602  s->in_lumacoef = ff_get_luma_coefficients(s->in_csp);
603  if (!s->in_lumacoef) {
605  "Unsupported input colorspace %d (%s)\n",
606  s->in_csp, av_color_space_name(s->in_csp));
607  return AVERROR(EINVAL);
608  }
609  redo_yuv2rgb = 1;
610  }
611 
612  if (!s->out_lumacoef) {
613  s->out_csp = out->colorspace;
614  s->out_rng = out->color_range;
615  s->out_lumacoef = ff_get_luma_coefficients(s->out_csp);
616  if (!s->out_lumacoef) {
617  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
618  if (s->user_all == CS_UNSPECIFIED) {
620  "Please specify output transfer characteristics\n");
621  } else {
623  "Unsupported output color property %d\n", s->user_all);
624  }
625  } else {
627  "Unsupported output transfer characteristics %d (%s)\n",
628  s->out_csp, av_color_space_name(s->out_csp));
629  }
630  return AVERROR(EINVAL);
631  }
632  redo_rgb2yuv = 1;
633  }
634 
635  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
636  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
637  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
638  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
639  !memcmp(s->in_lumacoef, s->out_lumacoef,
640  sizeof(*s->in_lumacoef)) &&
641  in_desc->comp[0].depth == out_desc->comp[0].depth;
642  if (!s->yuv2yuv_passthrough) {
643  if (redo_yuv2rgb) {
644  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
645  int off, bits, in_rng;
646 
647  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
648  s->in_rng, in_desc->comp[0].depth);
649  if (res < 0) {
651  "Unsupported input color range %d (%s)\n",
652  s->in_rng, av_color_range_name(s->in_rng));
653  return res;
654  }
655  for (n = 0; n < 8; n++)
656  s->yuv_offset[0][n] = off;
657  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
659  bits = 1 << (in_desc->comp[0].depth - 1);
660  for (n = 0; n < 3; n++) {
661  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
662  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
663  for (o = 1; o < 8; o++)
664  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
665  }
666  }
667  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
668  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
669  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
670  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
671  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
672  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
673  emms = 1;
674  }
675 
676  if (redo_rgb2yuv) {
677  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
678  int off, out_rng, bits;
679 
680  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
681  s->out_rng, out_desc->comp[0].depth);
682  if (res < 0) {
684  "Unsupported output color range %d (%s)\n",
685  s->out_rng, av_color_range_name(s->out_rng));
686  return res;
687  }
688  for (n = 0; n < 8; n++)
689  s->yuv_offset[1][n] = off;
690  ff_fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
691  bits = 1 << (29 - out_desc->comp[0].depth);
692  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
693  for (m = 0; m < 3; m++) {
694  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
695  for (o = 1; o < 8; o++)
696  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
697  }
698  }
699  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
700  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
701  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
702  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
703  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
704  emms = 1;
705  }
706 
707  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
708  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
709  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
710  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
711  double yuv2yuv[3][3];
712  int in_rng, out_rng;
713 
715  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
716  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
717  s->yuv2yuv_coeffs[m][n][0] =
718  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
719  (in_rng * (1 << odepth)));
720  for (o = 1; o < 8; o++)
721  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
722  }
723  }
724  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
725  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
726  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
727  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
728  }
729  }
730 
731  if (emms)
732  emms_c();
733 
734  return 0;
735 }
736 
738 {
739  ColorSpaceContext *s = ctx->priv;
740 
741  ff_colorspacedsp_init(&s->dsp);
742 
743  return 0;
744 }
745 
747 {
748  ColorSpaceContext *s = ctx->priv;
749 
750  av_freep(&s->rgb[0]);
751  av_freep(&s->rgb[1]);
752  av_freep(&s->rgb[2]);
753  s->rgb_sz = 0;
754  av_freep(&s->dither_scratch_base[0][0]);
755  av_freep(&s->dither_scratch_base[0][1]);
756  av_freep(&s->dither_scratch_base[1][0]);
757  av_freep(&s->dither_scratch_base[1][1]);
758  av_freep(&s->dither_scratch_base[2][0]);
759  av_freep(&s->dither_scratch_base[2][1]);
760 
761  av_freep(&s->lin_lut);
762 }
763 
765 {
766  AVFilterContext *ctx = link->dst;
767  AVFilterLink *outlink = ctx->outputs[0];
768  ColorSpaceContext *s = ctx->priv;
769  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
770  // input one if it is writable *OR* the actual literal values of in_*
771  // and out_* are identical (not just their respective properties)
772  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
773  int res;
774  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
775  unsigned rgb_sz = rgb_stride * in->height;
776  ThreadData td;
777 
778  if (!out) {
779  av_frame_free(&in);
780  return AVERROR(ENOMEM);
781  }
782  res = av_frame_copy_props(out, in);
783  if (res < 0) {
784  av_frame_free(&in);
785  av_frame_free(&out);
786  return res;
787  }
788 
789  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
790  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
791  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
793 
794  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
795  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
796  out->color_trc = AVCOL_TRC_BT2020_12;
797  } else {
798  out->color_trc = s->user_trc;
799  }
800  out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
801  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
802  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
803  in->color_range : s->user_rng;
804  if (rgb_sz != s->rgb_sz) {
806  int uvw = in->width >> desc->log2_chroma_w;
807 
808  av_freep(&s->rgb[0]);
809  av_freep(&s->rgb[1]);
810  av_freep(&s->rgb[2]);
811  s->rgb_sz = 0;
812  av_freep(&s->dither_scratch_base[0][0]);
813  av_freep(&s->dither_scratch_base[0][1]);
814  av_freep(&s->dither_scratch_base[1][0]);
815  av_freep(&s->dither_scratch_base[1][1]);
816  av_freep(&s->dither_scratch_base[2][0]);
817  av_freep(&s->dither_scratch_base[2][1]);
818 
819  s->rgb[0] = av_malloc(rgb_sz);
820  s->rgb[1] = av_malloc(rgb_sz);
821  s->rgb[2] = av_malloc(rgb_sz);
822  s->dither_scratch_base[0][0] =
823  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
824  s->dither_scratch_base[0][1] =
825  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
826  s->dither_scratch_base[1][0] =
827  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
828  s->dither_scratch_base[1][1] =
829  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
830  s->dither_scratch_base[2][0] =
831  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
832  s->dither_scratch_base[2][1] =
833  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
834  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
835  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
836  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
837  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
838  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
839  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
840  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
841  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
842  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
843  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
844  uninit(ctx);
845  av_frame_free(&in);
846  av_frame_free(&out);
847  return AVERROR(ENOMEM);
848  }
849  s->rgb_sz = rgb_sz;
850  }
851  res = create_filtergraph(ctx, in, out);
852  if (res < 0) {
853  av_frame_free(&in);
854  av_frame_free(&out);
855  return res;
856  }
857  s->rgb_stride = rgb_stride / sizeof(int16_t);
858  td.in = in;
859  td.out = out;
860  td.in_linesize[0] = in->linesize[0];
861  td.in_linesize[1] = in->linesize[1];
862  td.in_linesize[2] = in->linesize[2];
863  td.out_linesize[0] = out->linesize[0];
864  td.out_linesize[1] = out->linesize[1];
865  td.out_linesize[2] = out->linesize[2];
866  td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h;
867  td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h;
868  if (s->yuv2yuv_passthrough) {
869  res = av_frame_copy(out, in);
870  if (res < 0) {
871  av_frame_free(&in);
872  av_frame_free(&out);
873  return res;
874  }
875  } else {
877  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
878  }
879  av_frame_free(&in);
880 
881  return ff_filter_frame(outlink, out);
882 }
883 
885 {
886  static const enum AVPixelFormat pix_fmts[] = {
892  };
893  int res;
894  ColorSpaceContext *s = ctx->priv;
896 
897  if (!formats)
898  return AVERROR(ENOMEM);
899  if (s->user_format == AV_PIX_FMT_NONE)
901  res = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats);
902  if (res < 0)
903  return res;
904  formats = NULL;
905  res = ff_add_format(&formats, s->user_format);
906  if (res < 0)
907  return res;
908 
909  return ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats);
910 }
911 
912 static int config_props(AVFilterLink *outlink)
913 {
914  AVFilterContext *ctx = outlink->dst;
915  AVFilterLink *inlink = outlink->src->inputs[0];
916 
917  if (inlink->w % 2 || inlink->h % 2) {
918  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
919  inlink->w, inlink->h);
920  return AVERROR_PATCHWELCOME;
921  }
922 
923  outlink->w = inlink->w;
924  outlink->h = inlink->h;
925  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
926  outlink->time_base = inlink->time_base;
927 
928  return 0;
929 }
930 
931 #define OFFSET(x) offsetof(ColorSpaceContext, x)
932 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
933 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
934 
935 static const AVOption colorspace_options[] = {
936  { "all", "Set all color properties together",
937  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
938  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
939  ENUM("bt470m", CS_BT470M, "all"),
940  ENUM("bt470bg", CS_BT470BG, "all"),
941  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
942  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
943  ENUM("bt709", CS_BT709, "all"),
944  ENUM("smpte170m", CS_SMPTE170M, "all"),
945  ENUM("smpte240m", CS_SMPTE240M, "all"),
946  ENUM("bt2020", CS_BT2020, "all"),
947 
948  { "space", "Output colorspace",
949  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
950  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
951  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
952  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
953  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
954  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
955  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
956  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
957  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
958  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
959  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
960 
961  { "range", "Output color range",
962  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
964  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
965  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
966  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
967  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
968 
969  { "primaries", "Output color primaries",
970  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
971  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
972  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
973  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
974  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
975  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
976  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
977  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
978  ENUM("film", AVCOL_PRI_FILM, "prm"),
979  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
980  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
981  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
982  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
983  ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"),
984 
985  { "trc", "Output transfer characteristics",
986  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
987  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
988  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
989  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
990  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
991  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
992  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
993  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
994  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
995  ENUM("linear", AVCOL_TRC_LINEAR, "trc"),
996  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
997  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
998  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
999  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
1000  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
1001  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
1002 
1003  { "format", "Output pixel format",
1004  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
1006  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
1007  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
1008  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
1009  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
1010  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
1011  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
1012  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1013  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1014  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1015 
1016  { "fast", "Ignore primary chromaticity and gamma correction",
1017  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1018  0, 1, FLAGS },
1019 
1020  { "dither", "Dithering mode",
1021  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1022  DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
1023  ENUM("none", DITHER_NONE, "dither"),
1024  ENUM("fsb", DITHER_FSB, "dither"),
1025 
1026  { "wpadapt", "Whitepoint adaptation method",
1027  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1028  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
1029  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1030  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1031  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1032 
1033  { "iall", "Set all input color properties together",
1034  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1035  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1036  { "ispace", "Input colorspace",
1037  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1038  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
1039  { "irange", "Input color range",
1040  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1042  { "iprimaries", "Input color primaries",
1043  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1044  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1045  { "itrc", "Input transfer characteristics",
1046  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1047  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1048 
1049  { NULL }
1050 };
1051 
1052 AVFILTER_DEFINE_CLASS(colorspace);
1053 
1054 static const AVFilterPad inputs[] = {
1055  {
1056  .name = "default",
1057  .type = AVMEDIA_TYPE_VIDEO,
1058  .filter_frame = filter_frame,
1059  },
1060 };
1061 
1062 static const AVFilterPad outputs[] = {
1063  {
1064  .name = "default",
1065  .type = AVMEDIA_TYPE_VIDEO,
1066  .config_props = config_props,
1067  },
1068 };
1069 
1071  .name = "colorspace",
1072  .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1073  .init = init,
1074  .uninit = uninit,
1075  .priv_size = sizeof(ColorSpaceContext),
1076  .priv_class = &colorspace_class,
1081 };
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:570
stride
int stride
Definition: mace.c:144
ColorSpaceContext::fast_mode
int fast_mode
Definition: vf_colorspace.c:133
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:566
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ColorSpaceContext::yuv2yuv_passthrough
int yuv2yuv_passthrough
Definition: vf_colorspace.c:151
AVCOL_PRI_EBU3213
@ AVCOL_PRI_EBU3213
EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors.
Definition: pixfmt.h:485
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ColorSpaceContext::rgb2yuv_fsb
rgb2yuv_fsb_fn rgb2yuv_fsb
Definition: vf_colorspace.c:158
WP_ADAPT_VON_KRIES
@ WP_ADAPT_VON_KRIES
Definition: vf_colorspace.c:68
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:381
ColorSpaceContext::user_format
enum AVPixelFormat in_format user_format
Definition: vf_colorspace.c:132
ColorSpaceContext::delin_lut
int16_t * delin_lut
Definition: vf_colorspace.c:148
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:494
mem_internal.h
out
FILE * out
Definition: movenc.c:54
WP_NB
@ WP_NB
Definition: vf_colorspace.c:63
NB_WP_ADAPT
@ NB_WP_ADAPT
Definition: vf_colorspace.c:71
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
ColorSpaceContext::dither_scratch_base
int * dither_scratch_base[3][2]
Definition: vf_colorspace.c:140
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
ff_matrix_invert_3x3
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:503
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ColorSpaceContext::yuv2rgb
yuv2rgb_fn yuv2rgb
Definition: vf_colorspace.c:156
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ColorSpaceContext::out_txchr
const struct TransferCharacteristics * out_txchr
Definition: vf_colorspace.c:146
CS_SMPTE240M
@ CS_SMPTE240M
Definition: vf_colorspace.c:53
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:568
TransferCharacteristics::gamma
double gamma
Definition: vf_colorspace.c:119
WP_ADAPT_BRADFORD
@ WP_ADAPT_BRADFORD
Definition: vf_colorspace.c:67
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:577
ColorSpaceContext::rgb_sz
unsigned rgb_sz
Definition: vf_colorspace.c:139
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorspace)
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AVCOL_TRC_NB
@ AVCOL_TRC_NB
Not part of ABI.
Definition: pixfmt.h:516
pixdesc.h
AVFrame::width
int width
Definition: frame.h:389
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVOption
AVOption.
Definition: opt.h:247
AVCOL_SPC_NB
@ AVCOL_SPC_NB
Not part of ABI.
Definition: pixfmt.h:540
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:497
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:168
data
const char data[16]
Definition: mxf.c:143
LumaCoefficients
Definition: colorspace.h:28
rgb2yuv
static const char rgb2yuv[]
Definition: vf_scale_vulkan.c:68
ColorSpaceContext::rgb2yuv_dbl_coeffs
double rgb2yuv_dbl_coeffs[3][3]
Definition: vf_colorspace.c:160
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:404
convert
Definition: convert.py:1
AVCOL_PRI_JEDEC_P22
@ AVCOL_PRI_JEDEC_P22
Definition: pixfmt.h:486
ThreadData::out_ss_h
int out_ss_h
Definition: vf_colorspace.c:339
ColorSpaceContext::in_uv_rng
int in_uv_rng
Definition: vf_colorspace.c:161
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:524
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:510
CS_BT709
@ CS_BT709
Definition: vf_colorspace.c:51
WP_ADAPT_IDENTITY
@ WP_ADAPT_IDENTITY
Definition: vf_colorspace.c:70
ColorSpaceContext::lrgb2lrgb_coeffs
int16_t lrgb2lrgb_coeffs[3][3][8]
Definition: vf_colorspace.c:144
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:469
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
WhitepointCoefficients::xw
double xw
Definition: colorspace.h:37
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:473
get_transfer_characteristics
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
Definition: vf_colorspace.c:191
video.h
ColorSpaceContext::wp_adapt
enum WhitepointAdaptation wp_adapt
Definition: vf_colorspace.c:135
colorspace_options
static const AVOption colorspace_options[]
Definition: vf_colorspace.c:935
Colorspace
Colorspace
Definition: vf_colorspace.c:45
ColorSpaceContext::rgb2rgb_passthrough
int rgb2rgb_passthrough
Definition: vf_colorspace.c:147
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:278
DITHER_FSB
@ DITHER_FSB
Definition: vf_colorspace.c:41
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:529
rgb
Definition: rpzaenc.c:59
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:508
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3048
ColorPrimaries::wp
enum Whitepoint wp
Definition: vf_colorspace.c:114
WP_D65
@ WP_D65
Definition: vf_colorspace.c:59
WP_DCI
@ WP_DCI
Definition: vf_colorspace.c:61
ThreadData::out_linesize
ptrdiff_t out_linesize[3]
Definition: vf_colorspace.c:338
colorspace.h
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:598
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:500
ColorSpaceContext
Definition: vf_colorspace.c:122
PrimaryCoefficients
Definition: colorspace.h:32
whitepoint_coefficients
static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB]
Definition: vf_colorspace.c:204
CS_BT2020
@ CS_BT2020
Definition: vf_colorspace.c:54
CS_BT601_6_525
@ CS_BT601_6_525
Definition: vf_colorspace.c:49
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:499
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
ColorSpaceContext::yuv_offset
int16_t yuv_offset[2][8]
Definition: vf_colorspace.c:155
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
ff_get_luma_coefficients
const struct LumaCoefficients * ff_get_luma_coefficients(enum AVColorSpace csp)
Definition: colorspace.c:128
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
fill_whitepoint_conv_table
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, enum Whitepoint src, enum Whitepoint dst)
Definition: vf_colorspace.c:282
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:699
AVCOL_PRI_RESERVED0
@ AVCOL_PRI_RESERVED0
Definition: pixfmt.h:470
ColorSpaceContext::in_lumacoef
const struct LumaCoefficients * in_lumacoef
Definition: vf_colorspace.c:150
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
DITHER_NB
@ DITHER_NB
Definition: vf_colorspace.c:42
AVCOL_PRI_NB
@ AVCOL_PRI_NB
Not part of ABI.
Definition: pixfmt.h:487
CS_BT470BG
@ CS_BT470BG
Definition: vf_colorspace.c:48
CS_UNSPECIFIED
@ CS_UNSPECIFIED
Definition: vf_colorspace.c:46
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:530
ColorSpaceContext::yuv2rgb_coeffs
int16_t yuv2rgb_coeffs[3][3][8]
Definition: vf_colorspace.c:152
get_range_off
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
Definition: vf_colorspace.c:412
ff_fill_rgb2yuv_table
void ff_fill_rgb2yuv_table(const struct LumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:141
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:555
ColorSpaceDSPContext
Definition: colorspacedsp.h:59
bits
uint8_t bits
Definition: vp3data.h:141
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_colorspace.c:764
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
PrimaryCoefficients::xr
double xr
Definition: colorspace.h:33
default_trc
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:74
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVCOL_PRI_SMPTE428
@ AVCOL_PRI_SMPTE428
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:481
Whitepoint
Whitepoint
Definition: vf_colorspace.c:58
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
Definition: pixfmt.h:478
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:472
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
ColorSpaceContext::yuv2yuv
yuv2yuv_fn yuv2yuv
Definition: vf_colorspace.c:159
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:476
ColorSpaceContext::rgb2yuv_coeffs
int16_t rgb2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:153
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:477
ColorSpaceContext::user_irng
enum AVColorRange in_rng out_rng user_rng user_irng
Definition: vf_colorspace.c:129
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_colorspace.c:737
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2988
ColorSpaceContext::yuv2yuv_coeffs
int16_t yuv2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:154
ff_matrix_mul_3x3
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_colorspace.c:912
CS_NB
@ CS_NB
Definition: vf_colorspace.c:55
AVCOL_TRC_RESERVED0
@ AVCOL_TRC_RESERVED0
Definition: pixfmt.h:495
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
TransferCharacteristics::alpha
double alpha
Definition: vf_colorspace.c:119
av_clip_int16
#define av_clip_int16
Definition: common.h:111
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
ff_fill_rgb2xyz_table
void ff_fill_rgb2xyz_table(const struct PrimaryCoefficients *coeffs, const struct WhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:68
CS_SMPTE170M
@ CS_SMPTE170M
Definition: vf_colorspace.c:52
ColorSpaceContext::user_itrc
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
Definition: vf_colorspace.c:130
AVCOL_TRC_IEC61966_2_4
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
Definition: pixfmt.h:506
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:410
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
Definition: pixfmt.h:471
src
#define src
Definition: vp8dsp.c:255
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:420
fill_gamma_table
static int fill_gamma_table(ColorSpaceContext *s)
Definition: vf_colorspace.c:238
ColorSpaceContext::lin_lut
int16_t * lin_lut
Definition: vf_colorspace.c:148
ColorSpaceContext::out_primaries
const struct ColorPrimaries * out_primaries
Definition: vf_colorspace.c:142
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3006
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:509
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
Definition: pixfmt.h:532
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:405
ColorSpaceContext::in_txchr
const struct TransferCharacteristics * in_txchr
Definition: vf_colorspace.c:146
ColorSpaceContext::user_iall
enum Colorspace user_all user_iall
Definition: vf_colorspace.c:127
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:563
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:480
uninit
static void uninit(AVFilterContext *ctx)
Definition: vf_colorspace.c:746
ColorSpaceContext::out_y_rng
int out_y_rng
Definition: vf_colorspace.c:161
ColorSpaceContext::lrgb2lrgb_passthrough
int lrgb2lrgb_passthrough
Definition: vf_colorspace.c:143
AVCOL_PRI_SMPTE431
@ AVCOL_PRI_SMPTE431
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:483
yuv2yuv_fn
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:502
AVCOL_PRI_FILM
@ AVCOL_PRI_FILM
colour filters using Illuminant C
Definition: pixfmt.h:479
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:678
ColorSpaceContext::yuv2yuv_fastmode
int yuv2yuv_fastmode
Definition: vf_colorspace.c:151
OFFSET
#define OFFSET(x)
Definition: vf_colorspace.c:931
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:409
ColorSpaceContext::in_primaries
const struct ColorPrimaries * in_primaries
Definition: vf_colorspace.c:142
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
transfer_characteristics
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
Definition: vf_colorspace.c:177
TransferCharacteristics
Definition: vf_colorspace.c:118
ColorSpaceContext::rgb2yuv
rgb2yuv_fn rgb2yuv
Definition: vf_colorspace.c:157
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
WP_E
@ WP_E
Definition: vf_colorspace.c:62
ColorSpaceContext::in_y_rng
int in_y_rng
Definition: vf_colorspace.c:161
ColorSpaceContext::yuv2rgb_dbl_coeffs
double yuv2rgb_dbl_coeffs[3][3]
Definition: vf_colorspace.c:160
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:496
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:146
WhitepointCoefficients
Definition: colorspace.h:36
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:531
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
convert
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
Definition: vf_colorspace.c:342
yuv2yuv
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp_yuv2yuv_template.c:40
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colorspace.c:884
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
WhitepointCoefficients::yw
double yw
Definition: colorspace.h:37
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:534
ColorPrimaries::coeff
struct PrimaryCoefficients coeff
Definition: vf_colorspace.c:115
ColorSpaceContext::dither_scratch
int * dither_scratch[3][2]
Definition: vf_colorspace.c:140
ColorPrimaries
Definition: vf_colorspace.c:113
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:523
CS_BT601_6_625
@ CS_BT601_6_625
Definition: vf_colorspace.c:50
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
DitherMode
DitherMode
Definition: vf_colorspace.c:39
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:526
FLAGS
#define FLAGS
Definition: vf_colorspace.c:932
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:580
AVFilter
Filter definition.
Definition: avfilter.h:165
ColorSpaceContext::dsp
ColorSpaceDSPContext dsp
Definition: vf_colorspace.c:125
NB_WP_ADAPT_NON_IDENTITY
@ NB_WP_ADAPT_NON_IDENTITY
Definition: vf_colorspace.c:69
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:474
pixfmt.h
outputs
static const AVFilterPad outputs[]
Definition: vf_colorspace.c:1062
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:408
CS_BT470M
@ CS_BT470M
Definition: vf_colorspace.c:47
yuv2rgb_fn
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
ColorSpaceContext::out_lumacoef
const struct LumaCoefficients * out_lumacoef
Definition: vf_colorspace.c:150
ColorSpaceContext::user_icsp
enum AVColorSpace in_csp out_csp user_csp user_icsp
Definition: vf_colorspace.c:128
ColorSpaceContext::dither
enum DitherMode dither
Definition: vf_colorspace.c:134
AVFrame::height
int height
Definition: frame.h:389
default_csp
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:100
default_prm
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:87
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:528
WP_C
@ WP_C
Definition: vf_colorspace.c:60
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
colorspacedsp.h
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
rgb2yuv_fn
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
ColorSpaceContext::out_uv_rng
int out_uv_rng
Definition: vf_colorspace.c:161
AVCOL_TRC_SMPTE170M
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:501
inputs
static const AVFilterPad inputs[]
Definition: vf_colorspace.c:1054
get_color_primaries
static const struct ColorPrimaries * get_color_primaries(enum AVColorPrimaries prm)
Definition: vf_colorspace.c:225
ThreadData::in_ss_h
int in_ss_h
Definition: vf_colorspace.c:339
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
ColorSpaceContext::did_warn_range
int did_warn_range
Definition: vf_colorspace.c:163
WhitepointAdaptation
WhitepointAdaptation
Definition: vf_colorspace.c:66
ColorSpaceContext::user_iprm
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
Definition: vf_colorspace.c:131
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ENUM
#define ENUM(x, y, z)
Definition: vf_colorspace.c:933
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
rgb2yuv_fsb_fn
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVCOL_PRI_SMPTE432
@ AVCOL_PRI_SMPTE432
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:484
ff_vf_colorspace
const AVFilter ff_vf_colorspace
Definition: vf_colorspace.c:1070
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DITHER_NONE
@ DITHER_NONE
Definition: vf_colorspace.c:40
TransferCharacteristics::beta
double beta
Definition: vf_colorspace.c:119
d
d
Definition: ffmpeg_filter.c:153
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ma
#define ma
Definition: vf_colormatrix.c:100
TransferCharacteristics::delta
double delta
Definition: vf_colorspace.c:119
h
h
Definition: vp9dsp_template.c:2038
supported_format
#define supported_format(d)
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:525
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:562
create_filtergraph
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
Definition: vf_colorspace.c:442
ThreadData::in_linesize
ptrdiff_t in_linesize[3]
Definition: vf_colorspace.c:338
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:261
color_primaries
static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB]
Definition: vf_colorspace.c:211
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3027
ff_colorspacedsp_init
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
Definition: colorspacedsp.c:101
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
ColorSpaceContext::rgb_stride
ptrdiff_t rgb_stride
Definition: vf_colorspace.c:138
apply_lut
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
Definition: vf_colorspace.c:319
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2580
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:58