FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/pixfmt.h"
30 
31 #include "avfilter.h"
32 #include "colorspacedsp.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 #include "colorspace.h"
37 
38 enum DitherMode {
42 };
43 
44 enum Colorspace {
55 };
56 
57 enum Whitepoint {
63 };
64 
71 };
72 
84 };
85 
86 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
97 };
98 
99 static const enum AVColorSpace default_csp[CS_NB + 1] = {
110 };
111 
115 };
116 
118  double alpha, beta, gamma, delta;
119 };
120 
121 typedef struct ColorSpaceContext {
122  const AVClass *class;
123 
125 
126  enum Colorspace user_all, user_iall;
127  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
128  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
129  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
130  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
131  enum AVPixelFormat in_format, user_format;
135 
136  int16_t *rgb[3];
137  ptrdiff_t rgb_stride;
138  unsigned rgb_sz;
140 
143  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
144 
147  int16_t *lin_lut, *delin_lut;
148 
151  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
152  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
153  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
154  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
161 
164 
165 // FIXME deal with odd width/heights
166 // FIXME faster linearize/delinearize implementation (integer pow)
167 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
168 // FIXME test that the values in (de)lin_lut don't exceed their container storage
169 // type size (only useful if we keep the LUT and don't move to fast integer pow)
170 // FIXME dithering if bitdepth goes down?
171 // FIXME bitexact for fate integration?
172 
173 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
174 // find any actual tables that document their real values...
175 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
177  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
178  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
179  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
180  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
181  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
182  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
183  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
184  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
185  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
186 };
187 
188 static const struct TransferCharacteristics *
190 {
191  const struct TransferCharacteristics *coeffs;
192 
193  if (trc >= AVCOL_TRC_NB)
194  return NULL;
195  coeffs = &transfer_characteristics[trc];
196  if (!coeffs->alpha)
197  return NULL;
198 
199  return coeffs;
200 }
201 
203  [WP_D65] = { 0.3127, 0.3290 },
204  [WP_C] = { 0.3100, 0.3160 },
205  [WP_DCI] = { 0.3140, 0.3510 },
206  [WP_E] = { 1/3.0f, 1/3.0f },
207 };
208 
210  [AVCOL_PRI_BT709] = { WP_D65, { 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 } },
211  [AVCOL_PRI_BT470M] = { WP_C, { 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 } },
212  [AVCOL_PRI_BT470BG] = { WP_D65, { 0.640, 0.330, 0.290, 0.600, 0.150, 0.060 } },
213  [AVCOL_PRI_SMPTE170M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
214  [AVCOL_PRI_SMPTE240M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
215  [AVCOL_PRI_SMPTE428] = { WP_E, { 0.735, 0.265, 0.274, 0.718, 0.167, 0.009 } },
216  [AVCOL_PRI_SMPTE431] = { WP_DCI, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
217  [AVCOL_PRI_SMPTE432] = { WP_D65, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
218  [AVCOL_PRI_FILM] = { WP_C, { 0.681, 0.319, 0.243, 0.692, 0.145, 0.049 } },
219  [AVCOL_PRI_BT2020] = { WP_D65, { 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 } },
220  [AVCOL_PRI_JEDEC_P22] = { WP_D65, { 0.630, 0.340, 0.295, 0.605, 0.155, 0.077 } },
221 };
222 
224 {
225  const struct ColorPrimaries *p;
226 
227  if (prm >= AVCOL_PRI_NB)
228  return NULL;
229  p = &color_primaries[prm];
230  if (!p->coeff.xr)
231  return NULL;
232 
233  return p;
234 }
235 
237 {
238  int n;
239  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
240  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
241  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
242  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
243  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
244 
245  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
246  if (!s->lin_lut)
247  return AVERROR(ENOMEM);
248  s->delin_lut = &s->lin_lut[32768];
249  for (n = 0; n < 32768; n++) {
250  double v = (n - 2048.0) / 28672.0, d, l;
251 
252  // delinearize
253  if (v <= -out_beta) {
254  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
255  } else if (v < out_beta) {
256  d = out_delta * v;
257  } else {
258  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
259  }
260  s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
261 
262  // linearize
263  if (v <= -in_beta * in_delta) {
264  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
265  } else if (v < in_beta * in_delta) {
266  l = v * in_idelta;
267  } else {
268  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
269  }
270  s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
271  }
272 
273  return 0;
274 }
275 
276 /*
277  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
278  * This function uses the Bradford mechanism.
279  */
280 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
281  enum Whitepoint src, enum Whitepoint dst)
282 {
283  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
284  [WP_ADAPT_BRADFORD] = {
285  { 0.8951, 0.2664, -0.1614 },
286  { -0.7502, 1.7135, 0.0367 },
287  { 0.0389, -0.0685, 1.0296 },
288  }, [WP_ADAPT_VON_KRIES] = {
289  { 0.40024, 0.70760, -0.08081 },
290  { -0.22630, 1.16532, 0.04570 },
291  { 0.00000, 0.00000, 0.91822 },
292  },
293  };
294  const double (*ma)[3] = ma_tbl[wp_adapt];
295  const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
296  double zw_src = 1.0 - wp_src->xw - wp_src->yw;
297  const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
298  double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
299  double mai[3][3], fac[3][3], tmp[3][3];
300  double rs, gs, bs, rd, gd, bd;
301 
302  ff_matrix_invert_3x3(ma, mai);
303  rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
304  gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
305  bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
306  rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
307  gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
308  bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
309  fac[0][0] = rd / rs;
310  fac[1][1] = gd / gs;
311  fac[2][2] = bd / bs;
312  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
313  ff_matrix_mul_3x3(tmp, ma, fac);
314  ff_matrix_mul_3x3(out, tmp, mai);
315 }
316 
317 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
318  int w, int h, const int16_t *lut)
319 {
320  int y, x, n;
321 
322  for (n = 0; n < 3; n++) {
323  int16_t *data = buf[n];
324 
325  for (y = 0; y < h; y++) {
326  for (x = 0; x < w; x++)
327  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
328 
329  data += stride;
330  }
331  }
332 }
333 
334 struct ThreadData {
335  AVFrame *in, *out;
336  ptrdiff_t in_linesize[3], out_linesize[3];
338 };
339 
340 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
341 {
342  struct ThreadData *td = data;
343  ColorSpaceContext *s = ctx->priv;
344  uint8_t *in_data[3], *out_data[3];
345  int16_t *rgb[3];
346  int h_in = (td->in->height + 1) >> 1;
347  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
348  int w = td->in->width, h = h2 - h1;
349 
350  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
351  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
352  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
353  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
354  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
355  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
356  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
357  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
358  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
359 
360  // FIXME for simd, also make sure we do pictures with negative stride
361  // top-down so we don't overwrite lines with padding of data before it
362  // in the same buffer (same as swscale)
363 
364  if (s->yuv2yuv_fastmode) {
365  // FIXME possibly use a fast mode in case only the y range changes?
366  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
367  // are non-zero
368  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
369  s->yuv2yuv_coeffs, s->yuv_offset);
370  } else {
371  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
372  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
373  // 2 lines, for yuv420.)
374  /*
375  * General design:
376  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
377  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
378  * of RGB in psuedo-restricted 15+sign bits. That means that the float
379  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
380  * range is used for overflow/underflow outside the representable
381  * range of this RGB type. rgb2yuv is the exact opposite.
382  * - gamma correction is done using a LUT since that appears to work
383  * fairly fast.
384  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
385  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
386  * read chroma pixels at luma resolution. If you want some more fancy
387  * filter, you can use swscale to convert to yuv444p.
388  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
389  */
390  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
391  s->yuv2rgb_coeffs, s->yuv_offset[0]);
392  if (!s->rgb2rgb_passthrough) {
393  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
394  if (!s->lrgb2lrgb_passthrough)
395  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
396  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
397  }
398  if (s->dither == DITHER_FSB) {
399  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
400  s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
401  } else {
402  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
403  s->rgb2yuv_coeffs, s->yuv_offset[1]);
404  }
405  }
406 
407  return 0;
408 }
409 
410 static int get_range_off(AVFilterContext *ctx, int *off,
411  int *y_rng, int *uv_rng,
412  enum AVColorRange rng, int depth)
413 {
414  switch (rng) {
416  ColorSpaceContext *s = ctx->priv;
417 
418  if (!s->did_warn_range) {
419  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
420  s->did_warn_range = 1;
421  }
422  }
423  // fall-through
424  case AVCOL_RANGE_MPEG:
425  *off = 16 << (depth - 8);
426  *y_rng = 219 << (depth - 8);
427  *uv_rng = 224 << (depth - 8);
428  break;
429  case AVCOL_RANGE_JPEG:
430  *off = 0;
431  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
432  break;
433  default:
434  return AVERROR(EINVAL);
435  }
436 
437  return 0;
438 }
439 
441  const AVFrame *in, const AVFrame *out)
442 {
443  ColorSpaceContext *s = ctx->priv;
444  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
445  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
446  int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
447 
448 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
449 #define supported_subsampling(lcw, lch) \
450  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
451 #define supported_format(d) \
452  ((d) != NULL && (d)->nb_components == 3 && \
453  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
454  supported_depth((d)->comp[0].depth) && \
455  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
456 
457  if (!supported_format(in_desc)) {
459  "Unsupported input format %d (%s) or bitdepth (%d)\n",
460  in->format, av_get_pix_fmt_name(in->format),
461  in_desc ? in_desc->comp[0].depth : -1);
462  return AVERROR(EINVAL);
463  }
464  if (!supported_format(out_desc)) {
466  "Unsupported output format %d (%s) or bitdepth (%d)\n",
467  out->format, av_get_pix_fmt_name(out->format),
468  out_desc ? out_desc->comp[0].depth : -1);
469  return AVERROR(EINVAL);
470  }
471 
472  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
473  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
474  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
475  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
476  if (in->colorspace != s->in_csp ||
477  in->color_range != s->in_rng) s->in_lumacoef = NULL;
478  if (out->colorspace != s->out_csp ||
479  out->color_range != s->out_rng) s->out_lumacoef = NULL;
480 
481  if (!s->out_primaries || !s->in_primaries) {
482  s->in_prm = in->color_primaries;
483  if (s->user_iall != CS_UNSPECIFIED)
484  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
485  if (s->user_iprm != AVCOL_PRI_UNSPECIFIED)
486  s->in_prm = s->user_iprm;
487  s->in_primaries = get_color_primaries(s->in_prm);
488  if (!s->in_primaries) {
490  "Unsupported input primaries %d (%s)\n",
491  s->in_prm, av_color_primaries_name(s->in_prm));
492  return AVERROR(EINVAL);
493  }
494  s->out_prm = out->color_primaries;
495  s->out_primaries = get_color_primaries(s->out_prm);
496  if (!s->out_primaries) {
497  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
498  if (s->user_all == CS_UNSPECIFIED) {
499  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
500  } else {
502  "Unsupported output color property %d\n", s->user_all);
503  }
504  } else {
506  "Unsupported output primaries %d (%s)\n",
507  s->out_prm, av_color_primaries_name(s->out_prm));
508  }
509  return AVERROR(EINVAL);
510  }
511  s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
512  sizeof(*s->in_primaries));
513  if (!s->lrgb2lrgb_passthrough) {
514  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
515  const struct WhitepointCoefficients *wp_out, *wp_in;
516 
517  wp_out = &whitepoint_coefficients[s->out_primaries->wp];
518  wp_in = &whitepoint_coefficients[s->in_primaries->wp];
519  ff_fill_rgb2xyz_table(&s->out_primaries->coeff, wp_out, rgb2xyz);
520  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
521  ff_fill_rgb2xyz_table(&s->in_primaries->coeff, wp_in, rgb2xyz);
522  if (s->out_primaries->wp != s->in_primaries->wp &&
523  s->wp_adapt != WP_ADAPT_IDENTITY) {
524  double wpconv[3][3], tmp[3][3];
525 
526  fill_whitepoint_conv_table(wpconv, s->wp_adapt, s->in_primaries->wp,
527  s->out_primaries->wp);
528  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
529  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
530  } else {
531  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
532  }
533  for (m = 0; m < 3; m++)
534  for (n = 0; n < 3; n++) {
535  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
536  for (o = 1; o < 8; o++)
537  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
538  }
539 
540  emms = 1;
541  }
542  }
543 
544  if (!s->in_txchr) {
545  av_freep(&s->lin_lut);
546  s->in_trc = in->color_trc;
547  if (s->user_iall != CS_UNSPECIFIED)
548  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
549  if (s->user_itrc != AVCOL_TRC_UNSPECIFIED)
550  s->in_trc = s->user_itrc;
551  s->in_txchr = get_transfer_characteristics(s->in_trc);
552  if (!s->in_txchr) {
554  "Unsupported input transfer characteristics %d (%s)\n",
555  s->in_trc, av_color_transfer_name(s->in_trc));
556  return AVERROR(EINVAL);
557  }
558  }
559 
560  if (!s->out_txchr) {
561  av_freep(&s->lin_lut);
562  s->out_trc = out->color_trc;
563  s->out_txchr = get_transfer_characteristics(s->out_trc);
564  if (!s->out_txchr) {
565  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
566  if (s->user_all == CS_UNSPECIFIED) {
568  "Please specify output transfer characteristics\n");
569  } else {
571  "Unsupported output color property %d\n", s->user_all);
572  }
573  } else {
575  "Unsupported output transfer characteristics %d (%s)\n",
576  s->out_trc, av_color_transfer_name(s->out_trc));
577  }
578  return AVERROR(EINVAL);
579  }
580  }
581 
582  s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
583  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
584  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
585  res = fill_gamma_table(s);
586  if (res < 0)
587  return res;
588  emms = 1;
589  }
590 
591  if (!s->in_lumacoef) {
592  s->in_csp = in->colorspace;
593  if (s->user_iall != CS_UNSPECIFIED)
594  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
595  if (s->user_icsp != AVCOL_SPC_UNSPECIFIED)
596  s->in_csp = s->user_icsp;
597  s->in_rng = in->color_range;
598  if (s->user_irng != AVCOL_RANGE_UNSPECIFIED)
599  s->in_rng = s->user_irng;
600  s->in_lumacoef = ff_get_luma_coefficients(s->in_csp);
601  if (!s->in_lumacoef) {
603  "Unsupported input colorspace %d (%s)\n",
604  s->in_csp, av_color_space_name(s->in_csp));
605  return AVERROR(EINVAL);
606  }
607  redo_yuv2rgb = 1;
608  }
609 
610  if (!s->out_lumacoef) {
611  s->out_csp = out->colorspace;
612  s->out_rng = out->color_range;
613  s->out_lumacoef = ff_get_luma_coefficients(s->out_csp);
614  if (!s->out_lumacoef) {
615  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
616  if (s->user_all == CS_UNSPECIFIED) {
618  "Please specify output transfer characteristics\n");
619  } else {
621  "Unsupported output color property %d\n", s->user_all);
622  }
623  } else {
625  "Unsupported output transfer characteristics %d (%s)\n",
626  s->out_csp, av_color_space_name(s->out_csp));
627  }
628  return AVERROR(EINVAL);
629  }
630  redo_rgb2yuv = 1;
631  }
632 
633  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
634  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
635  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
636  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
637  !memcmp(s->in_lumacoef, s->out_lumacoef,
638  sizeof(*s->in_lumacoef)) &&
639  in_desc->comp[0].depth == out_desc->comp[0].depth;
640  if (!s->yuv2yuv_passthrough) {
641  if (redo_yuv2rgb) {
642  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
643  int off, bits, in_rng;
644 
645  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
646  s->in_rng, in_desc->comp[0].depth);
647  if (res < 0) {
649  "Unsupported input color range %d (%s)\n",
650  s->in_rng, av_color_range_name(s->in_rng));
651  return res;
652  }
653  for (n = 0; n < 8; n++)
654  s->yuv_offset[0][n] = off;
655  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
657  bits = 1 << (in_desc->comp[0].depth - 1);
658  for (n = 0; n < 3; n++) {
659  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
660  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
661  for (o = 1; o < 8; o++)
662  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
663  }
664  }
665  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
666  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
667  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
668  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
669  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
670  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
671  emms = 1;
672  }
673 
674  if (redo_rgb2yuv) {
675  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
676  int off, out_rng, bits;
677 
678  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
679  s->out_rng, out_desc->comp[0].depth);
680  if (res < 0) {
682  "Unsupported output color range %d (%s)\n",
683  s->out_rng, av_color_range_name(s->out_rng));
684  return res;
685  }
686  for (n = 0; n < 8; n++)
687  s->yuv_offset[1][n] = off;
688  ff_fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
689  bits = 1 << (29 - out_desc->comp[0].depth);
690  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
691  for (m = 0; m < 3; m++) {
692  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
693  for (o = 1; o < 8; o++)
694  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
695  }
696  }
697  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
698  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
699  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
700  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
701  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
702  emms = 1;
703  }
704 
705  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
706  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
707  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
708  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
709  double yuv2yuv[3][3];
710  int in_rng, out_rng;
711 
713  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
714  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
715  s->yuv2yuv_coeffs[m][n][0] =
716  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
717  (in_rng * (1 << odepth)));
718  for (o = 1; o < 8; o++)
719  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
720  }
721  }
722  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
723  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
724  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
725  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
726  }
727  }
728 
729  if (emms)
730  emms_c();
731 
732  return 0;
733 }
734 
736 {
737  ColorSpaceContext *s = ctx->priv;
738 
739  ff_colorspacedsp_init(&s->dsp);
740 
741  return 0;
742 }
743 
745 {
746  ColorSpaceContext *s = ctx->priv;
747 
748  av_freep(&s->rgb[0]);
749  av_freep(&s->rgb[1]);
750  av_freep(&s->rgb[2]);
751  s->rgb_sz = 0;
752  av_freep(&s->dither_scratch_base[0][0]);
753  av_freep(&s->dither_scratch_base[0][1]);
754  av_freep(&s->dither_scratch_base[1][0]);
755  av_freep(&s->dither_scratch_base[1][1]);
756  av_freep(&s->dither_scratch_base[2][0]);
757  av_freep(&s->dither_scratch_base[2][1]);
758 
759  av_freep(&s->lin_lut);
760 }
761 
763 {
764  AVFilterContext *ctx = link->dst;
765  AVFilterLink *outlink = ctx->outputs[0];
766  ColorSpaceContext *s = ctx->priv;
767  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
768  // input one if it is writable *OR* the actual literal values of in_*
769  // and out_* are identical (not just their respective properties)
770  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
771  int res;
772  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
773  unsigned rgb_sz = rgb_stride * in->height;
774  struct ThreadData td;
775 
776  if (!out) {
777  av_frame_free(&in);
778  return AVERROR(ENOMEM);
779  }
780  res = av_frame_copy_props(out, in);
781  if (res < 0) {
782  av_frame_free(&in);
783  av_frame_free(&out);
784  return res;
785  }
786 
787  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
788  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
789  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
791 
792  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
793  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
794  out->color_trc = AVCOL_TRC_BT2020_12;
795  } else {
796  out->color_trc = s->user_trc;
797  }
798  out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
799  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
800  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
801  in->color_range : s->user_rng;
802  if (rgb_sz != s->rgb_sz) {
804  int uvw = in->width >> desc->log2_chroma_w;
805 
806  av_freep(&s->rgb[0]);
807  av_freep(&s->rgb[1]);
808  av_freep(&s->rgb[2]);
809  s->rgb_sz = 0;
810  av_freep(&s->dither_scratch_base[0][0]);
811  av_freep(&s->dither_scratch_base[0][1]);
812  av_freep(&s->dither_scratch_base[1][0]);
813  av_freep(&s->dither_scratch_base[1][1]);
814  av_freep(&s->dither_scratch_base[2][0]);
815  av_freep(&s->dither_scratch_base[2][1]);
816 
817  s->rgb[0] = av_malloc(rgb_sz);
818  s->rgb[1] = av_malloc(rgb_sz);
819  s->rgb[2] = av_malloc(rgb_sz);
820  s->dither_scratch_base[0][0] =
821  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
822  s->dither_scratch_base[0][1] =
823  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
824  s->dither_scratch_base[1][0] =
825  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
826  s->dither_scratch_base[1][1] =
827  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
828  s->dither_scratch_base[2][0] =
829  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
830  s->dither_scratch_base[2][1] =
831  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
832  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
833  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
834  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
835  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
836  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
837  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
838  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
839  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
840  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
841  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
842  uninit(ctx);
843  av_frame_free(&in);
844  av_frame_free(&out);
845  return AVERROR(ENOMEM);
846  }
847  s->rgb_sz = rgb_sz;
848  }
849  res = create_filtergraph(ctx, in, out);
850  if (res < 0) {
851  av_frame_free(&in);
852  av_frame_free(&out);
853  return res;
854  }
855  s->rgb_stride = rgb_stride / sizeof(int16_t);
856  td.in = in;
857  td.out = out;
858  td.in_linesize[0] = in->linesize[0];
859  td.in_linesize[1] = in->linesize[1];
860  td.in_linesize[2] = in->linesize[2];
861  td.out_linesize[0] = out->linesize[0];
862  td.out_linesize[1] = out->linesize[1];
863  td.out_linesize[2] = out->linesize[2];
864  td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h;
865  td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h;
866  if (s->yuv2yuv_passthrough) {
867  res = av_frame_copy(out, in);
868  if (res < 0) {
869  av_frame_free(&in);
870  av_frame_free(&out);
871  return res;
872  }
873  } else {
874  ctx->internal->execute(ctx, convert, &td, NULL,
875  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
876  }
877  av_frame_free(&in);
878 
879  return ff_filter_frame(outlink, out);
880 }
881 
883 {
884  static const enum AVPixelFormat pix_fmts[] = {
890  };
891  int res;
892  ColorSpaceContext *s = ctx->priv;
894 
895  if (!formats)
896  return AVERROR(ENOMEM);
897  if (s->user_format == AV_PIX_FMT_NONE)
899  res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
900  if (res < 0)
901  return res;
902  formats = NULL;
903  res = ff_add_format(&formats, s->user_format);
904  if (res < 0)
905  return res;
906 
907  return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
908 }
909 
910 static int config_props(AVFilterLink *outlink)
911 {
912  AVFilterContext *ctx = outlink->dst;
913  AVFilterLink *inlink = outlink->src->inputs[0];
914 
915  if (inlink->w % 2 || inlink->h % 2) {
916  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
917  inlink->w, inlink->h);
918  return AVERROR_PATCHWELCOME;
919  }
920 
921  outlink->w = inlink->w;
922  outlink->h = inlink->h;
923  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
924  outlink->time_base = inlink->time_base;
925 
926  return 0;
927 }
928 
929 #define OFFSET(x) offsetof(ColorSpaceContext, x)
930 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
931 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
932 
933 static const AVOption colorspace_options[] = {
934  { "all", "Set all color properties together",
935  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
936  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
937  ENUM("bt470m", CS_BT470M, "all"),
938  ENUM("bt470bg", CS_BT470BG, "all"),
939  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
940  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
941  ENUM("bt709", CS_BT709, "all"),
942  ENUM("smpte170m", CS_SMPTE170M, "all"),
943  ENUM("smpte240m", CS_SMPTE240M, "all"),
944  ENUM("bt2020", CS_BT2020, "all"),
945 
946  { "space", "Output colorspace",
947  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
948  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
949  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
950  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
951  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
952  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
953  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
954  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
955  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
956  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
957  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
958 
959  { "range", "Output color range",
960  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
962  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
963  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
964  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
965  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
966 
967  { "primaries", "Output color primaries",
968  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
969  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
970  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
971  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
972  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
973  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
974  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
975  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
976  ENUM("film", AVCOL_PRI_FILM, "prm"),
977  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
978  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
979  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
980  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
981 
982  { "trc", "Output transfer characteristics",
983  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
984  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
985  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
986  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
987  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
988  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
989  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
990  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
991  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
992  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
993  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
994  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
995  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
996  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
997  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
998 
999  { "format", "Output pixel format",
1000  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
1002  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
1003  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
1004  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
1005  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
1006  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
1007  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
1008  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1009  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1010  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1011 
1012  { "fast", "Ignore primary chromaticity and gamma correction",
1013  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1014  0, 1, FLAGS },
1015 
1016  { "dither", "Dithering mode",
1017  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1018  DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
1019  ENUM("none", DITHER_NONE, "dither"),
1020  ENUM("fsb", DITHER_FSB, "dither"),
1021 
1022  { "wpadapt", "Whitepoint adaptation method",
1023  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1024  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
1025  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1026  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1027  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1028 
1029  { "iall", "Set all input color properties together",
1030  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1031  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1032  { "ispace", "Input colorspace",
1033  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1034  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
1035  { "irange", "Input color range",
1036  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1038  { "iprimaries", "Input color primaries",
1039  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1040  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1041  { "itrc", "Input transfer characteristics",
1042  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1043  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1044 
1045  { NULL }
1046 };
1047 
1048 AVFILTER_DEFINE_CLASS(colorspace);
1049 
1050 static const AVFilterPad inputs[] = {
1051  {
1052  .name = "default",
1053  .type = AVMEDIA_TYPE_VIDEO,
1054  .filter_frame = filter_frame,
1055  },
1056  { NULL }
1057 };
1058 
1059 static const AVFilterPad outputs[] = {
1060  {
1061  .name = "default",
1062  .type = AVMEDIA_TYPE_VIDEO,
1063  .config_props = config_props,
1064  },
1065  { NULL }
1066 };
1067 
1069  .name = "colorspace",
1070  .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1071  .init = init,
1072  .uninit = uninit,
1073  .query_formats = query_formats,
1074  .priv_size = sizeof(ColorSpaceContext),
1075  .priv_class = &colorspace_class,
1076  .inputs = inputs,
1077  .outputs = outputs,
1079 };
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
stride
int stride
Definition: mace.c:144
ColorSpaceContext::fast_mode
int fast_mode
Definition: vf_colorspace.c:132
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ColorSpaceContext::yuv2yuv_passthrough
int yuv2yuv_passthrough
Definition: vf_colorspace.c:150
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ColorSpaceContext::rgb2yuv_fsb
rgb2yuv_fsb_fn rgb2yuv_fsb
Definition: vf_colorspace.c:157
WP_ADAPT_VON_KRIES
@ WP_ADAPT_VON_KRIES
Definition: vf_colorspace.c:67
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
ColorSpaceContext::user_format
enum AVPixelFormat in_format user_format
Definition: vf_colorspace.c:131
ColorSpaceContext::delin_lut
int16_t * delin_lut
Definition: vf_colorspace.c:147
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:467
rgb2yuv
static void fn() rgb2yuv(uint8_t *_yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t s, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp_template.c:130
out
FILE * out
Definition: movenc.c:54
WP_NB
@ WP_NB
Definition: vf_colorspace.c:62
n
int n
Definition: avisynth_c.h:760
NB_WP_ADAPT
@ NB_WP_ADAPT
Definition: vf_colorspace.c:70
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
ColorSpaceContext::dither_scratch_base
int * dither_scratch_base[3][2]
Definition: vf_colorspace.c:139
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
ff_matrix_invert_3x3
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
ColorSpaceContext::yuv2rgb
yuv2rgb_fn yuv2rgb
Definition: vf_colorspace.c:155
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ColorSpaceContext::out_txchr
const struct TransferCharacteristics * out_txchr
Definition: vf_colorspace.c:145
CS_SMPTE240M
@ CS_SMPTE240M
Definition: vf_colorspace.c:52
TransferCharacteristics::gamma
double gamma
Definition: vf_colorspace.c:118
WP_ADAPT_BRADFORD
@ WP_ADAPT_BRADFORD
Definition: vf_colorspace.c:66
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
ColorSpaceContext::rgb_sz
unsigned rgb_sz
Definition: vf_colorspace.c:138
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorspace)
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AVCOL_TRC_NB
@ AVCOL_TRC_NB
Not part of ABI.
Definition: pixfmt.h:489
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVOption
AVOption.
Definition: opt.h:246
AVCOL_SPC_NB
@ AVCOL_SPC_NB
Not part of ABI.
Definition: pixfmt.h:513
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:470
data
const char data[16]
Definition: mxf.c:91
LumaCoefficients
Definition: colorspace.h:28
ColorSpaceContext::rgb2yuv_dbl_coeffs
double rgb2yuv_dbl_coeffs[3][3]
Definition: vf_colorspace.c:159
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
convert
Definition: convert.py:1
AVCOL_PRI_JEDEC_P22
@ AVCOL_PRI_JEDEC_P22
JEDEC P22 phosphors.
Definition: pixfmt.h:459
ThreadData::out_ss_h
int out_ss_h
Definition: vf_colorspace.c:337
ColorSpaceContext::in_uv_rng
int in_uv_rng
Definition: vf_colorspace.c:160
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:497
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:483
CS_BT709
@ CS_BT709
Definition: vf_colorspace.c:50
WP_ADAPT_IDENTITY
@ WP_ADAPT_IDENTITY
Definition: vf_colorspace.c:69
ColorSpaceContext::lrgb2lrgb_coeffs
int16_t lrgb2lrgb_coeffs[3][3][8]
Definition: vf_colorspace.c:143
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:443
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
WhitepointCoefficients::xw
double xw
Definition: colorspace.h:37
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:488
get_transfer_characteristics
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
Definition: vf_colorspace.c:189
video.h
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1795
ColorSpaceContext::wp_adapt
enum WhitepointAdaptation wp_adapt
Definition: vf_colorspace.c:134
colorspace_options
static const AVOption colorspace_options[]
Definition: vf_colorspace.c:933
Colorspace
Colorspace
Definition: vf_colorspace.c:44
ColorSpaceContext::rgb2rgb_passthrough
int rgb2rgb_passthrough
Definition: vf_colorspace.c:146
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:288
DITHER_FSB
@ DITHER_FSB
Definition: vf_colorspace.c:40
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:502
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:481
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2915
ColorPrimaries::wp
enum Whitepoint wp
Definition: vf_colorspace.c:113
WP_D65
@ WP_D65
Definition: vf_colorspace.c:58
WP_DCI
@ WP_DCI
Definition: vf_colorspace.c:60
ThreadData::out_linesize
ptrdiff_t out_linesize[3]
Definition: vf_colorspace.c:336
colorspace.h
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:523
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:473
ColorSpaceContext
Definition: vf_colorspace.c:121
PrimaryCoefficients
Definition: colorspace.h:32
whitepoint_coefficients
static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB]
Definition: vf_colorspace.c:202
CS_BT2020
@ CS_BT2020
Definition: vf_colorspace.c:53
CS_BT601_6_525
@ CS_BT601_6_525
Definition: vf_colorspace.c:48
src
#define src
Definition: vp8dsp.c:254
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:472
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
ColorSpaceContext::yuv_offset
int16_t yuv_offset[2][8]
Definition: vf_colorspace.c:154
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
ff_get_luma_coefficients
const struct LumaCoefficients * ff_get_luma_coefficients(enum AVColorSpace csp)
Definition: colorspace.c:128
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
fill_whitepoint_conv_table
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, enum Whitepoint src, enum Whitepoint dst)
Definition: vf_colorspace.c:280
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
AVCOL_PRI_RESERVED0
@ AVCOL_PRI_RESERVED0
Definition: pixfmt.h:444
ColorSpaceContext::in_lumacoef
const struct LumaCoefficients * in_lumacoef
Definition: vf_colorspace.c:149
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
DITHER_NB
@ DITHER_NB
Definition: vf_colorspace.c:41
AVCOL_PRI_NB
@ AVCOL_PRI_NB
Not part of ABI.
Definition: pixfmt.h:460
CS_BT470BG
@ CS_BT470BG
Definition: vf_colorspace.c:47
CS_UNSPECIFIED
@ CS_UNSPECIFIED
Definition: vf_colorspace.c:45
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:503
ColorSpaceContext::yuv2rgb_coeffs
int16_t yuv2rgb_coeffs[3][3][8]
Definition: vf_colorspace.c:151
get_range_off
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
Definition: vf_colorspace.c:410
ff_fill_rgb2yuv_table
void ff_fill_rgb2yuv_table(const struct LumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:141
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
ColorSpaceDSPContext
Definition: colorspacedsp.h:59
bits
uint8_t bits
Definition: vp3data.h:202
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_colorspace.c:762
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
PrimaryCoefficients::xr
double xr
Definition: colorspace.h:33
default_trc
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:73
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVCOL_PRI_SMPTE428
@ AVCOL_PRI_SMPTE428
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:455
Whitepoint
Whitepoint
Definition: vf_colorspace.c:57
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
functionally identical to above
Definition: pixfmt.h:452
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:446
ColorSpaceContext::yuv2yuv
yuv2yuv_fn yuv2yuv
Definition: vf_colorspace.c:158
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:450
ColorSpaceContext::rgb2yuv_coeffs
int16_t rgb2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:152
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:451
ColorSpaceContext::user_irng
enum AVColorRange in_rng out_rng user_rng user_irng
Definition: vf_colorspace.c:128
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2848
ColorSpaceContext::yuv2yuv_coeffs
int16_t yuv2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:153
ff_matrix_mul_3x3
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_colorspace.c:910
CS_NB
@ CS_NB
Definition: vf_colorspace.c:54
AVCOL_TRC_RESERVED0
@ AVCOL_TRC_RESERVED0
Definition: pixfmt.h:468
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
TransferCharacteristics::alpha
double alpha
Definition: vf_colorspace.c:118
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
ff_vf_colorspace
AVFilter ff_vf_colorspace
Definition: vf_colorspace.c:1068
ff_fill_rgb2xyz_table
void ff_fill_rgb2xyz_table(const struct PrimaryCoefficients *coeffs, const struct WhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:68
CS_SMPTE170M
@ CS_SMPTE170M
Definition: vf_colorspace.c:51
ColorSpaceContext::user_itrc
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
Definition: vf_colorspace.c:129
AVCOL_TRC_IEC61966_2_4
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
Definition: pixfmt.h:479
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
init
static int init(AVFilterContext *ctx)
Definition: vf_colorspace.c:735
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:445
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:337
fill_gamma_table
static int fill_gamma_table(ColorSpaceContext *s)
Definition: vf_colorspace.c:236
ColorSpaceContext::lin_lut
int16_t * lin_lut
Definition: vf_colorspace.c:147
ColorSpaceContext::out_primaries
const struct ColorPrimaries * out_primaries
Definition: vf_colorspace.c:141
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2867
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:482
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:505
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
ColorSpaceContext::in_txchr
const struct TransferCharacteristics * in_txchr
Definition: vf_colorspace.c:145
ColorSpaceContext::user_iall
enum Colorspace user_all user_iall
Definition: vf_colorspace.c:126
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:520
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:454
uninit
static void uninit(AVFilterContext *ctx)
Definition: vf_colorspace.c:744
ColorSpaceContext::out_y_rng
int out_y_rng
Definition: vf_colorspace.c:160
ColorSpaceContext::lrgb2lrgb_passthrough
int lrgb2lrgb_passthrough
Definition: vf_colorspace.c:142
AVCOL_PRI_SMPTE431
@ AVCOL_PRI_SMPTE431
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:457
desc
const char * desc
Definition: nvenc.c:68
yuv2yuv_fn
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
ThreadData::m
AVFrame * m
Definition: vf_maskedclamp.c:34
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:475
AVCOL_PRI_FILM
@ AVCOL_PRI_FILM
colour filters using Illuminant C
Definition: pixfmt.h:453
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
ColorSpaceContext::yuv2yuv_fastmode
int yuv2yuv_fastmode
Definition: vf_colorspace.c:150
OFFSET
#define OFFSET(x)
Definition: vf_colorspace.c:929
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
ColorSpaceContext::in_primaries
const struct ColorPrimaries * in_primaries
Definition: vf_colorspace.c:141
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
transfer_characteristics
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
Definition: vf_colorspace.c:176
TransferCharacteristics
Definition: vf_colorspace.c:117
ColorSpaceContext::rgb2yuv
rgb2yuv_fn rgb2yuv
Definition: vf_colorspace.c:156
WP_E
@ WP_E
Definition: vf_colorspace.c:61
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ColorSpaceContext::in_y_rng
int in_y_rng
Definition: vf_colorspace.c:160
ColorSpaceContext::yuv2rgb_dbl_coeffs
double yuv2rgb_dbl_coeffs[3][3]
Definition: vf_colorspace.c:159
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:469
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
WhitepointCoefficients
Definition: colorspace.h:36
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:504
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
convert
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
Definition: vf_colorspace.c:340
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
yuv2yuv
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp_yuv2yuv_template.c:40
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colorspace.c:882
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
WhitepointCoefficients::yw
double yw
Definition: colorspace.h:37
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:507
ColorPrimaries::coeff
struct PrimaryCoefficients coeff
Definition: vf_colorspace.c:114
ColorSpaceContext::dither_scratch
int * dither_scratch[3][2]
Definition: vf_colorspace.c:139
ColorPrimaries
Definition: vf_colorspace.c:112
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:496
CS_BT601_6_625
@ CS_BT601_6_625
Definition: vf_colorspace.c:49
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
ThreadData
Used for passing data between threads.
Definition: af_adeclick.c:487
uint8_t
uint8_t
Definition: audio_convert.c:194
DitherMode
DitherMode
Definition: vf_colorspace.c:38
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:499
FLAGS
#define FLAGS
Definition: vf_colorspace.c:930
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
AVFilter
Filter definition.
Definition: avfilter.h:144
ColorSpaceContext::dsp
ColorSpaceDSPContext dsp
Definition: vf_colorspace.c:124
NB_WP_ADAPT_NON_IDENTITY
@ NB_WP_ADAPT_NON_IDENTITY
Definition: vf_colorspace.c:68
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:448
pixfmt.h
outputs
static const AVFilterPad outputs[]
Definition: vf_colorspace.c:1059
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
CS_BT470M
@ CS_BT470M
Definition: vf_colorspace.c:46
yuv2rgb_fn
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
ColorSpaceContext::out_lumacoef
const struct LumaCoefficients * out_lumacoef
Definition: vf_colorspace.c:149
ColorSpaceContext::user_icsp
enum AVColorSpace in_csp out_csp user_csp user_icsp
Definition: vf_colorspace.c:127
ColorSpaceContext::dither
enum DitherMode dither
Definition: vf_colorspace.c:133
default_csp
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:99
default_prm
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:86
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:501
WP_C
@ WP_C
Definition: vf_colorspace.c:59
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
colorspacedsp.h
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
rgb2yuv_fn
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
ColorSpaceContext::out_uv_rng
int out_uv_rng
Definition: vf_colorspace.c:160
AVCOL_TRC_SMPTE170M
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:474
inputs
static const AVFilterPad inputs[]
Definition: vf_colorspace.c:1050
get_color_primaries
static const struct ColorPrimaries * get_color_primaries(enum AVColorPrimaries prm)
Definition: vf_colorspace.c:223
ThreadData::in_ss_h
int in_ss_h
Definition: vf_colorspace.c:337
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
ColorSpaceContext::did_warn_range
int did_warn_range
Definition: vf_colorspace.c:162
WhitepointAdaptation
WhitepointAdaptation
Definition: vf_colorspace.c:65
ColorSpaceContext::user_iprm
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
Definition: vf_colorspace.c:130
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ENUM
#define ENUM(x, y, z)
Definition: vf_colorspace.c:931
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1082
rgb2yuv_fsb_fn
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AVCOL_PRI_SMPTE432
@ AVCOL_PRI_SMPTE432
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:458
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DITHER_NONE
@ DITHER_NONE
Definition: vf_colorspace.c:39
TransferCharacteristics::beta
double beta
Definition: vf_colorspace.c:118
ThreadData::o
AVFrame * o
Definition: vf_maskedclamp.c:34
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ma
#define ma
Definition: vf_colormatrix.c:100
TransferCharacteristics::delta
double delta
Definition: vf_colorspace.c:118
h
h
Definition: vp9dsp_template.c:2038
supported_format
#define supported_format(d)
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
AVColorRange
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:519
create_filtergraph
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
Definition: vf_colorspace.c:440
ThreadData::in_linesize
ptrdiff_t in_linesize[3]
Definition: vf_colorspace.c:336
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:280
color_primaries
static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB]
Definition: vf_colorspace.c:209
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2891
ff_colorspacedsp_init
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
Definition: colorspacedsp.c:101
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
ColorSpaceContext::rgb_stride
ptrdiff_t rgb_stride
Definition: vf_colorspace.c:137
ColorSpaceContext::rgb
int16_t * rgb[3]
Definition: vf_colorspace.c:136
apply_lut
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
Definition: vf_colorspace.c:317
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:57