FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/csp.h"
28 #include "libavutil/mem_internal.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/pixfmt.h"
32 
33 #include "avfilter.h"
34 #include "colorspacedsp.h"
35 #include "formats.h"
36 #include "internal.h"
37 #include "video.h"
38 #include "colorspace.h"
39 
40 enum DitherMode {
44 };
45 
46 enum Colorspace {
57 };
58 
65 };
66 
78 };
79 
80 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
91 };
92 
93 static const enum AVColorSpace default_csp[CS_NB + 1] = {
104 };
105 
107  double alpha, beta, gamma, delta;
108 };
109 
110 typedef struct ColorSpaceContext {
111  const AVClass *class;
112 
114 
115  enum Colorspace user_all, user_iall;
116  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
117  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
118  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
119  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
120  enum AVPixelFormat in_format, user_format;
124 
125  int16_t *rgb[3];
126  ptrdiff_t rgb_stride;
127  unsigned rgb_sz;
129 
132  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
133 
136  int16_t *lin_lut, *delin_lut;
137 
140  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
141  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
142  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
143  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
150 
153 
154 // FIXME deal with odd width/heights
155 // FIXME faster linearize/delinearize implementation (integer pow)
156 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
157 // FIXME test that the values in (de)lin_lut don't exceed their container storage
158 // type size (only useful if we keep the LUT and don't move to fast integer pow)
159 // FIXME dithering if bitdepth goes down?
160 // FIXME bitexact for fate integration?
161 
162 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
163 // find any actual tables that document their real values...
164 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
166  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
167  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
168  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
169  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
170  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
171  [AVCOL_TRC_LINEAR] = { 1.0, 0.0, 1.0, 0.0 },
172  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
173  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
174  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
175  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
176 };
177 
178 static const struct TransferCharacteristics *
180 {
181  const struct TransferCharacteristics *coeffs;
182 
183  if (trc >= AVCOL_TRC_NB)
184  return NULL;
185  coeffs = &transfer_characteristics[trc];
186  if (!coeffs->alpha)
187  return NULL;
188 
189  return coeffs;
190 }
191 
193 {
194  int n;
195  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
196  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
197  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
198  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
199  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
200 
201  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
202  if (!s->lin_lut)
203  return AVERROR(ENOMEM);
204  s->delin_lut = &s->lin_lut[32768];
205  for (n = 0; n < 32768; n++) {
206  double v = (n - 2048.0) / 28672.0, d, l;
207 
208  // delinearize
209  if (v <= -out_beta) {
210  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
211  } else if (v < out_beta) {
212  d = out_delta * v;
213  } else {
214  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
215  }
216  s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
217 
218  // linearize
219  if (v <= -in_beta * in_delta) {
220  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
221  } else if (v < in_beta * in_delta) {
222  l = v * in_idelta;
223  } else {
224  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
225  }
226  s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
227  }
228 
229  return 0;
230 }
231 
232 /*
233  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
234  * This function uses the Bradford mechanism.
235  */
236 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
237  const AVWhitepointCoefficients *wp_src,
238  const AVWhitepointCoefficients *wp_dst)
239 {
240  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
241  [WP_ADAPT_BRADFORD] = {
242  { 0.8951, 0.2664, -0.1614 },
243  { -0.7502, 1.7135, 0.0367 },
244  { 0.0389, -0.0685, 1.0296 },
245  }, [WP_ADAPT_VON_KRIES] = {
246  { 0.40024, 0.70760, -0.08081 },
247  { -0.22630, 1.16532, 0.04570 },
248  { 0.00000, 0.00000, 0.91822 },
249  },
250  };
251  const double (*ma)[3] = ma_tbl[wp_adapt];
252  double xw_src = av_q2d(wp_src->x), yw_src = av_q2d(wp_src->y);
253  double xw_dst = av_q2d(wp_dst->x), yw_dst = av_q2d(wp_dst->y);
254  double zw_src = 1.0 - xw_src - yw_src;
255  double zw_dst = 1.0 - xw_dst - yw_dst;
256  double mai[3][3], fac[3][3], tmp[3][3];
257  double rs, gs, bs, rd, gd, bd;
258 
259  ff_matrix_invert_3x3(ma, mai);
260  rs = ma[0][0] * xw_src + ma[0][1] * yw_src + ma[0][2] * zw_src;
261  gs = ma[1][0] * xw_src + ma[1][1] * yw_src + ma[1][2] * zw_src;
262  bs = ma[2][0] * xw_src + ma[2][1] * yw_src + ma[2][2] * zw_src;
263  rd = ma[0][0] * xw_dst + ma[0][1] * yw_dst + ma[0][2] * zw_dst;
264  gd = ma[1][0] * xw_dst + ma[1][1] * yw_dst + ma[1][2] * zw_dst;
265  bd = ma[2][0] * xw_dst + ma[2][1] * yw_dst + ma[2][2] * zw_dst;
266  fac[0][0] = rd / rs;
267  fac[1][1] = gd / gs;
268  fac[2][2] = bd / bs;
269  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
270  ff_matrix_mul_3x3(tmp, ma, fac);
271  ff_matrix_mul_3x3(out, tmp, mai);
272 }
273 
274 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
275  int w, int h, const int16_t *lut)
276 {
277  int y, x, n;
278 
279  for (n = 0; n < 3; n++) {
280  int16_t *data = buf[n];
281 
282  for (y = 0; y < h; y++) {
283  for (x = 0; x < w; x++)
284  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
285 
286  data += stride;
287  }
288  }
289 }
290 
291 typedef struct ThreadData {
292  AVFrame *in, *out;
293  ptrdiff_t in_linesize[3], out_linesize[3];
295 } ThreadData;
296 
297 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
298 {
299  const ThreadData *td = data;
300  ColorSpaceContext *s = ctx->priv;
301  uint8_t *in_data[3], *out_data[3];
302  int16_t *rgb[3];
303  int h_in = (td->in->height + 1) >> 1;
304  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
305  int w = td->in->width, h = h2 - h1;
306 
307  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
308  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
309  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
310  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
311  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
312  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
313  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
314  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
315  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
316 
317  // FIXME for simd, also make sure we do pictures with negative stride
318  // top-down so we don't overwrite lines with padding of data before it
319  // in the same buffer (same as swscale)
320 
321  if (s->yuv2yuv_fastmode) {
322  // FIXME possibly use a fast mode in case only the y range changes?
323  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
324  // are non-zero
325  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
326  s->yuv2yuv_coeffs, s->yuv_offset);
327  } else {
328  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
329  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
330  // 2 lines, for yuv420.)
331  /*
332  * General design:
333  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
334  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
335  * of RGB in psuedo-restricted 15+sign bits. That means that the float
336  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
337  * range is used for overflow/underflow outside the representable
338  * range of this RGB type. rgb2yuv is the exact opposite.
339  * - gamma correction is done using a LUT since that appears to work
340  * fairly fast.
341  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
342  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
343  * read chroma pixels at luma resolution. If you want some more fancy
344  * filter, you can use swscale to convert to yuv444p.
345  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
346  */
347  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
348  s->yuv2rgb_coeffs, s->yuv_offset[0]);
349  if (!s->rgb2rgb_passthrough) {
350  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
351  if (!s->lrgb2lrgb_passthrough)
352  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
353  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
354  }
355  if (s->dither == DITHER_FSB) {
356  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
357  s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
358  } else {
359  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
360  s->rgb2yuv_coeffs, s->yuv_offset[1]);
361  }
362  }
363 
364  return 0;
365 }
366 
367 static int get_range_off(AVFilterContext *ctx, int *off,
368  int *y_rng, int *uv_rng,
369  enum AVColorRange rng, int depth)
370 {
371  switch (rng) {
373  ColorSpaceContext *s = ctx->priv;
374 
375  if (!s->did_warn_range) {
376  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
377  s->did_warn_range = 1;
378  }
379  }
380  // fall-through
381  case AVCOL_RANGE_MPEG:
382  *off = 16 << (depth - 8);
383  *y_rng = 219 << (depth - 8);
384  *uv_rng = 224 << (depth - 8);
385  break;
386  case AVCOL_RANGE_JPEG:
387  *off = 0;
388  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
389  break;
390  default:
391  return AVERROR(EINVAL);
392  }
393 
394  return 0;
395 }
396 
398  const AVFrame *in, const AVFrame *out)
399 {
400  ColorSpaceContext *s = ctx->priv;
401  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
402  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
403  int m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
404 
405 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
406 #define supported_subsampling(lcw, lch) \
407  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
408 #define supported_format(d) \
409  ((d) != NULL && (d)->nb_components == 3 && \
410  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
411  supported_depth((d)->comp[0].depth) && \
412  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
413 
414  if (!supported_format(in_desc)) {
416  "Unsupported input format %d (%s) or bitdepth (%d)\n",
418  in_desc ? in_desc->comp[0].depth : -1);
419  return AVERROR(EINVAL);
420  }
421  if (!supported_format(out_desc)) {
423  "Unsupported output format %d (%s) or bitdepth (%d)\n",
424  out->format, av_get_pix_fmt_name(out->format),
425  out_desc ? out_desc->comp[0].depth : -1);
426  return AVERROR(EINVAL);
427  }
428 
429  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
430  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
431  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
432  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
433  if (in->colorspace != s->in_csp ||
434  in->color_range != s->in_rng) s->in_lumacoef = NULL;
435  if (out->colorspace != s->out_csp ||
436  out->color_range != s->out_rng) s->out_lumacoef = NULL;
437 
438  if (!s->out_primaries || !s->in_primaries) {
439  s->in_prm = in->color_primaries;
440  if (s->user_iall != CS_UNSPECIFIED)
441  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
442  if (s->user_iprm != AVCOL_PRI_UNSPECIFIED)
443  s->in_prm = s->user_iprm;
444  s->in_primaries = av_csp_primaries_desc_from_id(s->in_prm);
445  if (!s->in_primaries) {
447  "Unsupported input primaries %d (%s)\n",
448  s->in_prm, av_color_primaries_name(s->in_prm));
449  return AVERROR(EINVAL);
450  }
451  s->out_prm = out->color_primaries;
452  s->out_primaries = av_csp_primaries_desc_from_id(s->out_prm);
453  if (!s->out_primaries) {
454  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
455  if (s->user_all == CS_UNSPECIFIED) {
456  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
457  } else {
459  "Unsupported output color property %d\n", s->user_all);
460  }
461  } else {
463  "Unsupported output primaries %d (%s)\n",
464  s->out_prm, av_color_primaries_name(s->out_prm));
465  }
466  return AVERROR(EINVAL);
467  }
468  s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
469  sizeof(*s->in_primaries));
470  if (!s->lrgb2lrgb_passthrough) {
471  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
472  const AVWhitepointCoefficients *wp_out, *wp_in;
473 
474  wp_out = &s->out_primaries->wp;
475  wp_in = &s->in_primaries->wp;
476  ff_fill_rgb2xyz_table(&s->out_primaries->prim, wp_out, rgb2xyz);
477  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
478  ff_fill_rgb2xyz_table(&s->in_primaries->prim, wp_in, rgb2xyz);
479  if (memcmp(wp_in, wp_out, sizeof(*wp_in)) != 0 &&
480  s->wp_adapt != WP_ADAPT_IDENTITY) {
481  double wpconv[3][3], tmp[3][3];
482 
483  fill_whitepoint_conv_table(wpconv, s->wp_adapt, &s->in_primaries->wp,
484  &s->out_primaries->wp);
485  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
486  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
487  } else {
488  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
489  }
490  for (m = 0; m < 3; m++)
491  for (n = 0; n < 3; n++) {
492  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
493  for (o = 1; o < 8; o++)
494  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
495  }
496 
497  }
498  }
499 
500  if (!s->in_txchr) {
501  av_freep(&s->lin_lut);
502  s->in_trc = in->color_trc;
503  if (s->user_iall != CS_UNSPECIFIED)
504  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
505  if (s->user_itrc != AVCOL_TRC_UNSPECIFIED)
506  s->in_trc = s->user_itrc;
507  s->in_txchr = get_transfer_characteristics(s->in_trc);
508  if (!s->in_txchr) {
510  "Unsupported input transfer characteristics %d (%s)\n",
511  s->in_trc, av_color_transfer_name(s->in_trc));
512  return AVERROR(EINVAL);
513  }
514  }
515 
516  if (!s->out_txchr) {
517  av_freep(&s->lin_lut);
518  s->out_trc = out->color_trc;
519  s->out_txchr = get_transfer_characteristics(s->out_trc);
520  if (!s->out_txchr) {
521  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
522  if (s->user_all == CS_UNSPECIFIED) {
524  "Please specify output transfer characteristics\n");
525  } else {
527  "Unsupported output color property %d\n", s->user_all);
528  }
529  } else {
531  "Unsupported output transfer characteristics %d (%s)\n",
532  s->out_trc, av_color_transfer_name(s->out_trc));
533  }
534  return AVERROR(EINVAL);
535  }
536  }
537 
538  s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
539  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
540  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
541  res = fill_gamma_table(s);
542  if (res < 0)
543  return res;
544  }
545 
546  if (!s->in_lumacoef) {
547  s->in_csp = in->colorspace;
548  if (s->user_iall != CS_UNSPECIFIED)
549  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
550  if (s->user_icsp != AVCOL_SPC_UNSPECIFIED)
551  s->in_csp = s->user_icsp;
552  s->in_rng = in->color_range;
553  if (s->user_irng != AVCOL_RANGE_UNSPECIFIED)
554  s->in_rng = s->user_irng;
555  s->in_lumacoef = av_csp_luma_coeffs_from_avcsp(s->in_csp);
556  if (!s->in_lumacoef) {
558  "Unsupported input colorspace %d (%s)\n",
559  s->in_csp, av_color_space_name(s->in_csp));
560  return AVERROR(EINVAL);
561  }
562  redo_yuv2rgb = 1;
563  }
564 
565  if (!s->out_lumacoef) {
566  s->out_csp = out->colorspace;
567  s->out_rng = out->color_range;
568  s->out_lumacoef = av_csp_luma_coeffs_from_avcsp(s->out_csp);
569  if (!s->out_lumacoef) {
570  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
571  if (s->user_all == CS_UNSPECIFIED) {
573  "Please specify output colorspace\n");
574  } else {
576  "Unsupported output color property %d\n", s->user_all);
577  }
578  } else {
580  "Unsupported output colorspace %d (%s)\n", s->out_csp,
581  av_color_space_name(s->out_csp));
582  }
583  return AVERROR(EINVAL);
584  }
585  redo_rgb2yuv = 1;
586  }
587 
588  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
589  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
590  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
591  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
592  !memcmp(s->in_lumacoef, s->out_lumacoef,
593  sizeof(*s->in_lumacoef)) &&
594  in_desc->comp[0].depth == out_desc->comp[0].depth;
595  if (!s->yuv2yuv_passthrough) {
596  if (redo_yuv2rgb) {
597  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
598  int off, bits, in_rng;
599 
600  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
601  s->in_rng, in_desc->comp[0].depth);
602  if (res < 0) {
604  "Unsupported input color range %d (%s)\n",
605  s->in_rng, av_color_range_name(s->in_rng));
606  return res;
607  }
608  for (n = 0; n < 8; n++)
609  s->yuv_offset[0][n] = off;
610  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
612  bits = 1 << (in_desc->comp[0].depth - 1);
613  for (n = 0; n < 3; n++) {
614  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
615  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
616  for (o = 1; o < 8; o++)
617  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
618  }
619  }
620  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
621  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
622  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
623  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
624  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
625  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
626  }
627 
628  if (redo_rgb2yuv) {
629  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
630  int off, out_rng, bits;
631 
632  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
633  s->out_rng, out_desc->comp[0].depth);
634  if (res < 0) {
636  "Unsupported output color range %d (%s)\n",
637  s->out_rng, av_color_range_name(s->out_rng));
638  return res;
639  }
640  for (n = 0; n < 8; n++)
641  s->yuv_offset[1][n] = off;
642  ff_fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
643  bits = 1 << (29 - out_desc->comp[0].depth);
644  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
645  for (m = 0; m < 3; m++) {
646  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
647  for (o = 1; o < 8; o++)
648  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
649  }
650  }
651  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
652  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
653  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
654  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
655  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
656  }
657 
658  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
659  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
660  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
661  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
662  double yuv2yuv[3][3];
663  int in_rng, out_rng;
664 
666  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
667  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
668  s->yuv2yuv_coeffs[m][n][0] =
669  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
670  (in_rng * (1 << odepth)));
671  for (o = 1; o < 8; o++)
672  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
673  }
674  }
675  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
676  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
677  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
678  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
679  }
680  }
681 
682  return 0;
683 }
684 
686 {
687  ColorSpaceContext *s = ctx->priv;
688 
689  ff_colorspacedsp_init(&s->dsp);
690 
691  return 0;
692 }
693 
695 {
696  ColorSpaceContext *s = ctx->priv;
697 
698  av_freep(&s->rgb[0]);
699  av_freep(&s->rgb[1]);
700  av_freep(&s->rgb[2]);
701  s->rgb_sz = 0;
702  av_freep(&s->dither_scratch_base[0][0]);
703  av_freep(&s->dither_scratch_base[0][1]);
704  av_freep(&s->dither_scratch_base[1][0]);
705  av_freep(&s->dither_scratch_base[1][1]);
706  av_freep(&s->dither_scratch_base[2][0]);
707  av_freep(&s->dither_scratch_base[2][1]);
708 
709  av_freep(&s->lin_lut);
710 }
711 
713 {
714  AVFilterContext *ctx = link->dst;
715  AVFilterLink *outlink = ctx->outputs[0];
716  ColorSpaceContext *s = ctx->priv;
717  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
718  // input one if it is writable *OR* the actual literal values of in_*
719  // and out_* are identical (not just their respective properties)
720  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
721  int res;
722  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
723  unsigned rgb_sz = rgb_stride * in->height;
724  ThreadData td;
725 
726  if (!out) {
727  av_frame_free(&in);
728  return AVERROR(ENOMEM);
729  }
730  res = av_frame_copy_props(out, in);
731  if (res < 0) {
732  av_frame_free(&in);
733  av_frame_free(&out);
734  return res;
735  }
736 
737  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
738  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
739  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
741 
742  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
743  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
744  out->color_trc = AVCOL_TRC_BT2020_12;
745  } else {
746  out->color_trc = s->user_trc;
747  }
748  out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
749  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
750  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
751  in->color_range : s->user_rng;
752  if (rgb_sz != s->rgb_sz) {
754  int uvw = in->width >> desc->log2_chroma_w;
755 
756  av_freep(&s->rgb[0]);
757  av_freep(&s->rgb[1]);
758  av_freep(&s->rgb[2]);
759  s->rgb_sz = 0;
760  av_freep(&s->dither_scratch_base[0][0]);
761  av_freep(&s->dither_scratch_base[0][1]);
762  av_freep(&s->dither_scratch_base[1][0]);
763  av_freep(&s->dither_scratch_base[1][1]);
764  av_freep(&s->dither_scratch_base[2][0]);
765  av_freep(&s->dither_scratch_base[2][1]);
766 
767  s->rgb[0] = av_malloc(rgb_sz);
768  s->rgb[1] = av_malloc(rgb_sz);
769  s->rgb[2] = av_malloc(rgb_sz);
770  s->dither_scratch_base[0][0] =
771  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
772  s->dither_scratch_base[0][1] =
773  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
774  s->dither_scratch_base[1][0] =
775  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
776  s->dither_scratch_base[1][1] =
777  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
778  s->dither_scratch_base[2][0] =
779  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
780  s->dither_scratch_base[2][1] =
781  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
782  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
783  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
784  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
785  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
786  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
787  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
788  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
789  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
790  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
791  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
792  uninit(ctx);
793  av_frame_free(&in);
794  av_frame_free(&out);
795  return AVERROR(ENOMEM);
796  }
797  s->rgb_sz = rgb_sz;
798  }
799  res = create_filtergraph(ctx, in, out);
800  if (res < 0) {
801  av_frame_free(&in);
802  av_frame_free(&out);
803  return res;
804  }
805  s->rgb_stride = rgb_stride / sizeof(int16_t);
806  td.in = in;
807  td.out = out;
808  td.in_linesize[0] = in->linesize[0];
809  td.in_linesize[1] = in->linesize[1];
810  td.in_linesize[2] = in->linesize[2];
811  td.out_linesize[0] = out->linesize[0];
812  td.out_linesize[1] = out->linesize[1];
813  td.out_linesize[2] = out->linesize[2];
814  td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h;
815  td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h;
816  if (s->yuv2yuv_passthrough) {
817  res = av_frame_copy(out, in);
818  if (res < 0) {
819  av_frame_free(&in);
820  av_frame_free(&out);
821  return res;
822  }
823  } else {
825  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
826  }
827  av_frame_free(&in);
828 
829  return ff_filter_frame(outlink, out);
830 }
831 
833 {
834  static const enum AVPixelFormat pix_fmts[] = {
840  };
841  int res;
842  ColorSpaceContext *s = ctx->priv;
844 
845  if (!formats)
846  return AVERROR(ENOMEM);
847  if (s->user_format == AV_PIX_FMT_NONE)
849  res = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats);
850  if (res < 0)
851  return res;
852  formats = NULL;
853  res = ff_add_format(&formats, s->user_format);
854  if (res < 0)
855  return res;
856 
857  return ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats);
858 }
859 
860 static int config_props(AVFilterLink *outlink)
861 {
862  AVFilterContext *ctx = outlink->dst;
863  AVFilterLink *inlink = outlink->src->inputs[0];
864 
865  if (inlink->w % 2 || inlink->h % 2) {
866  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
867  inlink->w, inlink->h);
868  return AVERROR_PATCHWELCOME;
869  }
870 
871  outlink->w = inlink->w;
872  outlink->h = inlink->h;
873  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
874  outlink->time_base = inlink->time_base;
875 
876  return 0;
877 }
878 
879 #define OFFSET(x) offsetof(ColorSpaceContext, x)
880 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
881 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, .unit = z }
882 
883 static const AVOption colorspace_options[] = {
884  { "all", "Set all color properties together",
885  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
886  CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" },
887  ENUM("bt470m", CS_BT470M, "all"),
888  ENUM("bt470bg", CS_BT470BG, "all"),
889  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
890  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
891  ENUM("bt709", CS_BT709, "all"),
892  ENUM("smpte170m", CS_SMPTE170M, "all"),
893  ENUM("smpte240m", CS_SMPTE240M, "all"),
894  ENUM("bt2020", CS_BT2020, "all"),
895 
896  { "space", "Output colorspace",
897  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
898  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, .unit = "csp"},
899  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
900  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
901  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
902  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
903  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
904  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
905  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
906  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
907  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
908 
909  { "range", "Output color range",
910  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
911  AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" },
912  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
913  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
914  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
915  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
916 
917  { "primaries", "Output color primaries",
918  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
919  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "prm" },
920  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
921  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
922  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
923  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
924  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
925  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
926  ENUM("film", AVCOL_PRI_FILM, "prm"),
927  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
928  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
929  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
930  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
931  ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"),
932 
933  { "trc", "Output transfer characteristics",
934  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
935  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, .unit = "trc" },
936  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
937  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
938  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
939  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
940  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
941  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
942  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
943  ENUM("linear", AVCOL_TRC_LINEAR, "trc"),
944  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
945  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
946  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
947  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
948  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
949  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
950 
951  { "format", "Output pixel format",
952  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
953  AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, .unit = "fmt" },
954  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
955  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
956  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
957  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
958  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
959  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
960  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
961  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
962  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
963 
964  { "fast", "Ignore primary chromaticity and gamma correction",
965  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
966  0, 1, FLAGS },
967 
968  { "dither", "Dithering mode",
970  DITHER_NONE, DITHER_NB - 1, FLAGS, .unit = "dither" },
971  ENUM("none", DITHER_NONE, "dither"),
972  ENUM("fsb", DITHER_FSB, "dither"),
973 
974  { "wpadapt", "Whitepoint adaptation method",
975  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
976  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, .unit = "wpadapt" },
977  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
978  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
979  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
980 
981  { "iall", "Set all input color properties together",
982  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
983  CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" },
984  { "ispace", "Input colorspace",
985  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
986  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "csp" },
987  { "irange", "Input color range",
988  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
989  AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" },
990  { "iprimaries", "Input color primaries",
991  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
992  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "prm" },
993  { "itrc", "Input transfer characteristics",
994  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
995  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, .unit = "trc" },
996 
997  { NULL }
998 };
999 
1000 AVFILTER_DEFINE_CLASS(colorspace);
1001 
1002 static const AVFilterPad inputs[] = {
1003  {
1004  .name = "default",
1005  .type = AVMEDIA_TYPE_VIDEO,
1006  .filter_frame = filter_frame,
1007  },
1008 };
1009 
1010 static const AVFilterPad outputs[] = {
1011  {
1012  .name = "default",
1013  .type = AVMEDIA_TYPE_VIDEO,
1014  .config_props = config_props,
1015  },
1016 };
1017 
1019  .name = "colorspace",
1020  .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1021  .init = init,
1022  .uninit = uninit,
1023  .priv_size = sizeof(ColorSpaceContext),
1024  .priv_class = &colorspace_class,
1029 };
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:623
ColorSpaceContext::fast_mode
int fast_mode
Definition: vf_colorspace.c:121
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:619
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ColorSpaceContext::yuv2yuv_passthrough
int yuv2yuv_passthrough
Definition: vf_colorspace.c:139
AVCOL_PRI_EBU3213
@ AVCOL_PRI_EBU3213
EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors.
Definition: pixfmt.h:571
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ColorSpaceContext::rgb2yuv_fsb
rgb2yuv_fsb_fn rgb2yuv_fsb
Definition: vf_colorspace.c:146
WP_ADAPT_VON_KRIES
@ WP_ADAPT_VON_KRIES
Definition: vf_colorspace.c:61
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:435
ColorSpaceContext::user_format
enum AVPixelFormat in_format user_format
Definition: vf_colorspace.c:120
ColorSpaceContext::delin_lut
int16_t * delin_lut
Definition: vf_colorspace.c:136
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:580
mem_internal.h
out
FILE * out
Definition: movenc.c:54
AVColorPrimariesDesc
Struct that contains both white point location and primaries location, providing the complete descrip...
Definition: csp.h:78
NB_WP_ADAPT
@ NB_WP_ADAPT
Definition: vf_colorspace.c:64
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
ColorSpaceContext::dither_scratch_base
int * dither_scratch_base[3][2]
Definition: vf_colorspace.c:128
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
ff_matrix_invert_3x3
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:589
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:122
ColorSpaceContext::yuv2rgb
yuv2rgb_fn yuv2rgb
Definition: vf_colorspace.c:144
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ColorSpaceContext::out_txchr
const struct TransferCharacteristics * out_txchr
Definition: vf_colorspace.c:134
CS_SMPTE240M
@ CS_SMPTE240M
Definition: vf_colorspace.c:54
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:621
TransferCharacteristics::gamma
double gamma
Definition: vf_colorspace.c:107
WP_ADAPT_BRADFORD
@ WP_ADAPT_BRADFORD
Definition: vf_colorspace.c:60
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:630
ColorSpaceContext::rgb_sz
unsigned rgb_sz
Definition: vf_colorspace.c:127
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorspace)
fill_whitepoint_conv_table
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, const AVWhitepointCoefficients *wp_src, const AVWhitepointCoefficients *wp_dst)
Definition: vf_colorspace.c:236
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVCOL_TRC_NB
@ AVCOL_TRC_NB
Not part of ABI.
Definition: pixfmt.h:602
pixdesc.h
AVFrame::width
int width
Definition: frame.h:412
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
av_csp_luma_coeffs_from_avcsp
const struct AVLumaCoefficients * av_csp_luma_coeffs_from_avcsp(enum AVColorSpace csp)
Retrieves the Luma coefficients necessary to construct a conversion matrix from an enum constant desc...
Definition: csp.c:58
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVOption
AVOption.
Definition: opt.h:346
AVCOL_SPC_NB
@ AVCOL_SPC_NB
Not part of ABI.
Definition: pixfmt.h:626
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:583
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
data
const char data[16]
Definition: mxf.c:148
rgb2yuv
static const char rgb2yuv[]
Definition: vf_scale_vulkan.c:70
ColorSpaceContext::rgb2yuv_dbl_coeffs
double rgb2yuv_dbl_coeffs[3][3]
Definition: vf_colorspace.c:148
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
AVCOL_PRI_JEDEC_P22
@ AVCOL_PRI_JEDEC_P22
Definition: pixfmt.h:572
ThreadData::out_ss_h
int out_ss_h
Definition: vf_colorspace.c:294
ColorSpaceContext::in_uv_rng
int in_uv_rng
Definition: vf_colorspace.c:149
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:610
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:596
AVLumaCoefficients
Struct containing luma coefficients to be used for RGB to YUV/YCoCg, or similar calculations.
Definition: csp.h:48
CS_BT709
@ CS_BT709
Definition: vf_colorspace.c:52
WP_ADAPT_IDENTITY
@ WP_ADAPT_IDENTITY
Definition: vf_colorspace.c:63
ColorSpaceContext::lrgb2lrgb_coeffs
int16_t lrgb2lrgb_coeffs[3][3][8]
Definition: vf_colorspace.c:132
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:555
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
get_transfer_characteristics
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
Definition: vf_colorspace.c:179
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:153
ColorSpaceContext::wp_adapt
enum WhitepointAdaptation wp_adapt
Definition: vf_colorspace.c:123
colorspace_options
static const AVOption colorspace_options[]
Definition: vf_colorspace.c:883
Colorspace
Colorspace
Definition: vf_colorspace.c:46
ColorSpaceContext::rgb2rgb_passthrough
int rgb2rgb_passthrough
Definition: vf_colorspace.c:135
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:311
DITHER_FSB
@ DITHER_FSB
Definition: vf_colorspace.c:42
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
rgb
Definition: rpzaenc.c:60
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:594
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3338
ThreadData::out_linesize
ptrdiff_t out_linesize[3]
Definition: vf_colorspace.c:293
colorspace.h
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:684
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:586
ColorSpaceContext
Definition: vf_colorspace.c:110
CS_BT2020
@ CS_BT2020
Definition: vf_colorspace.c:55
CS_BT601_6_525
@ CS_BT601_6_525
Definition: vf_colorspace.c:50
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:585
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
ColorSpaceContext::yuv_offset
int16_t yuv_offset[2][8]
Definition: vf_colorspace.c:143
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:867
AVCOL_PRI_RESERVED0
@ AVCOL_PRI_RESERVED0
Definition: pixfmt.h:556
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
s
#define s(width, name)
Definition: cbs_vp9.c:198
DITHER_NB
@ DITHER_NB
Definition: vf_colorspace.c:43
AVCOL_PRI_NB
@ AVCOL_PRI_NB
Not part of ABI.
Definition: pixfmt.h:573
av_csp_primaries_desc_from_id
const AVColorPrimariesDesc * av_csp_primaries_desc_from_id(enum AVColorPrimaries prm)
Retrieves a complete gamut description from an enum constant describing the color primaries.
Definition: csp.c:90
CS_BT470BG
@ CS_BT470BG
Definition: vf_colorspace.c:49
CS_UNSPECIFIED
@ CS_UNSPECIFIED
Definition: vf_colorspace.c:47
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:616
ColorSpaceContext::yuv2rgb_coeffs
int16_t yuv2rgb_coeffs[3][3][8]
Definition: vf_colorspace.c:140
get_range_off
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
Definition: vf_colorspace.c:367
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:678
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ColorSpaceDSPContext
Definition: colorspacedsp.h:59
bits
uint8_t bits
Definition: vp3data.h:128
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_colorspace.c:712
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
default_trc
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:67
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVCOL_PRI_SMPTE428
@ AVCOL_PRI_SMPTE428
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:567
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
Definition: pixfmt.h:564
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:558
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
ColorSpaceContext::yuv2yuv
yuv2yuv_fn yuv2yuv
Definition: vf_colorspace.c:147
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:562
ColorSpaceContext::rgb2yuv_coeffs
int16_t rgb2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:141
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:563
ColorSpaceContext::user_irng
enum AVColorRange in_rng out_rng user_rng user_irng
Definition: vf_colorspace.c:117
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_colorspace.c:685
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3278
ColorSpaceContext::yuv2yuv_coeffs
int16_t yuv2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:142
ff_matrix_mul_3x3
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_colorspace.c:860
CS_NB
@ CS_NB
Definition: vf_colorspace.c:56
AVCOL_TRC_RESERVED0
@ AVCOL_TRC_RESERVED0
Definition: pixfmt.h:581
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
TransferCharacteristics::alpha
double alpha
Definition: vf_colorspace.c:107
av_clip_int16
#define av_clip_int16
Definition: common.h:113
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:637
CS_SMPTE170M
@ CS_SMPTE170M
Definition: vf_colorspace.c:53
ColorSpaceContext::user_itrc
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
Definition: vf_colorspace.c:118
AVCOL_TRC_IEC61966_2_4
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
Definition: pixfmt.h:592
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:415
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
Definition: pixfmt.h:557
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:504
fill_gamma_table
static int fill_gamma_table(ColorSpaceContext *s)
Definition: vf_colorspace.c:192
ColorSpaceContext::lin_lut
int16_t * lin_lut
Definition: vf_colorspace.c:136
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3296
double
double
Definition: af_crystalizer.c:131
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:595
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
Definition: pixfmt.h:618
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
ColorSpaceContext::in_txchr
const struct TransferCharacteristics * in_txchr
Definition: vf_colorspace.c:134
AVCIExy
Struct containing chromaticity x and y values for the standard CIE 1931 chromaticity definition.
Definition: csp.h:56
ColorSpaceContext::user_iall
enum Colorspace user_all user_iall
Definition: vf_colorspace.c:115
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:649
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:566
uninit
static void uninit(AVFilterContext *ctx)
Definition: vf_colorspace.c:694
ColorSpaceContext::out_y_rng
int out_y_rng
Definition: vf_colorspace.c:149
AVCIExy::x
AVRational x
Definition: csp.h:57
ColorSpaceContext::lrgb2lrgb_passthrough
int lrgb2lrgb_passthrough
Definition: vf_colorspace.c:131
AVCOL_PRI_SMPTE431
@ AVCOL_PRI_SMPTE431
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:569
yuv2yuv_fn
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:588
AVCOL_PRI_FILM
@ AVCOL_PRI_FILM
colour filters using Illuminant C
Definition: pixfmt.h:565
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:109
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:769
ColorSpaceContext::yuv2yuv_fastmode
int yuv2yuv_fastmode
Definition: vf_colorspace.c:139
OFFSET
#define OFFSET(x)
Definition: vf_colorspace.c:879
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1873
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
transfer_characteristics
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
Definition: vf_colorspace.c:165
TransferCharacteristics
Definition: vf_colorspace.c:106
ColorSpaceContext::rgb2yuv
rgb2yuv_fn rgb2yuv
Definition: vf_colorspace.c:145
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
ColorSpaceContext::in_y_rng
int in_y_rng
Definition: vf_colorspace.c:149
ColorSpaceContext::yuv2rgb_dbl_coeffs
double yuv2rgb_dbl_coeffs[3][3]
Definition: vf_colorspace.c:148
csp.h
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:582
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:617
convert
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
Definition: vf_colorspace.c:297
yuv2yuv
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp_yuv2yuv_template.c:40
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colorspace.c:832
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
ff_fill_rgb2yuv_table
void ff_fill_rgb2yuv_table(const AVLumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:125
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:620
ColorSpaceContext::dither_scratch
int * dither_scratch[3][2]
Definition: vf_colorspace.c:128
ColorSpaceContext::in_primaries
const AVColorPrimariesDesc * in_primaries
Definition: vf_colorspace.c:130
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
CS_BT601_6_625
@ CS_BT601_6_625
Definition: vf_colorspace.c:51
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
ColorSpaceContext::in_lumacoef
const AVLumaCoefficients * in_lumacoef
Definition: vf_colorspace.c:138
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
ColorSpaceContext::out_primaries
const AVColorPrimariesDesc * out_primaries
Definition: vf_colorspace.c:130
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
DitherMode
DitherMode
Definition: vf_colorspace.c:40
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
FLAGS
#define FLAGS
Definition: vf_colorspace.c:880
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:666
stride
#define stride
Definition: h264pred_template.c:537
AVFilter
Filter definition.
Definition: avfilter.h:166
ColorSpaceContext::dsp
ColorSpaceDSPContext dsp
Definition: vf_colorspace.c:113
NB_WP_ADAPT_NON_IDENTITY
@ NB_WP_ADAPT_NON_IDENTITY
Definition: vf_colorspace.c:62
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:560
pixfmt.h
outputs
static const AVFilterPad outputs[]
Definition: vf_colorspace.c:1010
ColorSpaceContext::out_lumacoef
const AVLumaCoefficients * out_lumacoef
Definition: vf_colorspace.c:138
AVCIExy::y
AVRational y
Definition: csp.h:57
ff_fill_rgb2xyz_table
void ff_fill_rgb2xyz_table(const AVPrimaryCoefficients *coeffs, const AVWhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:79
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
CS_BT470M
@ CS_BT470M
Definition: vf_colorspace.c:48
yuv2rgb_fn
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
ColorSpaceContext::user_icsp
enum AVColorSpace in_csp out_csp user_csp user_icsp
Definition: vf_colorspace.c:116
ColorSpaceContext::dither
enum DitherMode dither
Definition: vf_colorspace.c:122
AVFrame::height
int height
Definition: frame.h:412
default_csp
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:93
default_prm
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:80
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:614
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
colorspacedsp.h
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
rgb2yuv_fn
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
ColorSpaceContext::out_uv_rng
int out_uv_rng
Definition: vf_colorspace.c:149
AVCOL_TRC_SMPTE170M
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:587
inputs
static const AVFilterPad inputs[]
Definition: vf_colorspace.c:1002
ThreadData::in_ss_h
int in_ss_h
Definition: vf_colorspace.c:294
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
ColorSpaceContext::did_warn_range
int did_warn_range
Definition: vf_colorspace.c:151
WhitepointAdaptation
WhitepointAdaptation
Definition: vf_colorspace.c:59
ColorSpaceContext::user_iprm
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
Definition: vf_colorspace.c:119
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:73
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
ENUM
#define ENUM(x, y, z)
Definition: vf_colorspace.c:881
rgb2yuv_fsb_fn
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVCOL_PRI_SMPTE432
@ AVCOL_PRI_SMPTE432
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:570
ff_vf_colorspace
const AVFilter ff_vf_colorspace
Definition: vf_colorspace.c:1018
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
DITHER_NONE
@ DITHER_NONE
Definition: vf_colorspace.c:41
TransferCharacteristics::beta
double beta
Definition: vf_colorspace.c:107
d
d
Definition: ffmpeg_filter.c:425
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ma
#define ma
Definition: vf_colormatrix.c:98
TransferCharacteristics::delta
double delta
Definition: vf_colorspace.c:107
h
h
Definition: vp9dsp_template.c:2038
supported_format
#define supported_format(d)
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:648
create_filtergraph
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
Definition: vf_colorspace.c:397
ThreadData::in_linesize
ptrdiff_t in_linesize[3]
Definition: vf_colorspace.c:293
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:262
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3317
ff_colorspacedsp_init
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
Definition: colorspacedsp.c:102
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
ColorSpaceContext::rgb_stride
ptrdiff_t rgb_stride
Definition: vf_colorspace.c:126
apply_lut
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
Definition: vf_colorspace.c:274
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2882
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:60