FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/pixfmt.h"
30 
31 #include "avfilter.h"
32 #include "colorspacedsp.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 #include "colorspace.h"
37 
38 enum DitherMode {
42 };
43 
44 enum Colorspace {
55 };
56 
57 enum Whitepoint {
63 };
64 
71 };
72 
84 };
85 
86 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
97 };
98 
99 static const enum AVColorSpace default_csp[CS_NB + 1] = {
110 };
111 
115 };
116 
118  double alpha, beta, gamma, delta;
119 };
120 
121 typedef struct ColorSpaceContext {
122  const AVClass *class;
123 
125 
126  enum Colorspace user_all, user_iall;
127  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
128  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
129  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
130  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
131  enum AVPixelFormat in_format, user_format;
134  enum WhitepointAdaptation wp_adapt;
135 
136  int16_t *rgb[3];
137  ptrdiff_t rgb_stride;
138  unsigned rgb_sz;
139  int *dither_scratch[3][2], *dither_scratch_base[3][2];
140 
141  const struct ColorPrimaries *in_primaries, *out_primaries;
143  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
144 
145  const struct TransferCharacteristics *in_txchr, *out_txchr;
147  int16_t *lin_lut, *delin_lut;
148 
149  const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
150  int yuv2yuv_passthrough, yuv2yuv_fastmode;
151  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
152  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
153  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
154  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
159  double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
160  int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
161 
164 
165 // FIXME deal with odd width/heights
166 // FIXME faster linearize/delinearize implementation (integer pow)
167 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
168 // FIXME test that the values in (de)lin_lut don't exceed their container storage
169 // type size (only useful if we keep the LUT and don't move to fast integer pow)
170 // FIXME dithering if bitdepth goes down?
171 // FIXME bitexact for fate integration?
172 
173 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
174 // find any actual tables that document their real values...
175 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
177  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
178  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
179  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
180  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
181  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
182  [AVCOL_TRC_LINEAR] = { 1.0, 0.0, 1.0, 0.0 },
183  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
184  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
185  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
186  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
187 };
188 
189 static const struct TransferCharacteristics *
191 {
192  const struct TransferCharacteristics *coeffs;
193 
194  if (trc >= AVCOL_TRC_NB)
195  return NULL;
196  coeffs = &transfer_characteristics[trc];
197  if (!coeffs->alpha)
198  return NULL;
199 
200  return coeffs;
201 }
202 
204  [WP_D65] = { 0.3127, 0.3290 },
205  [WP_C] = { 0.3100, 0.3160 },
206  [WP_DCI] = { 0.3140, 0.3510 },
207  [WP_E] = { 1/3.0f, 1/3.0f },
208 };
209 
211  [AVCOL_PRI_BT709] = { WP_D65, { 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 } },
212  [AVCOL_PRI_BT470M] = { WP_C, { 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 } },
213  [AVCOL_PRI_BT470BG] = { WP_D65, { 0.640, 0.330, 0.290, 0.600, 0.150, 0.060 } },
214  [AVCOL_PRI_SMPTE170M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
215  [AVCOL_PRI_SMPTE240M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
216  [AVCOL_PRI_SMPTE428] = { WP_E, { 0.735, 0.265, 0.274, 0.718, 0.167, 0.009 } },
217  [AVCOL_PRI_SMPTE431] = { WP_DCI, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
218  [AVCOL_PRI_SMPTE432] = { WP_D65, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
219  [AVCOL_PRI_FILM] = { WP_C, { 0.681, 0.319, 0.243, 0.692, 0.145, 0.049 } },
220  [AVCOL_PRI_BT2020] = { WP_D65, { 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 } },
221  [AVCOL_PRI_JEDEC_P22] = { WP_D65, { 0.630, 0.340, 0.295, 0.605, 0.155, 0.077 } },
222 };
223 
225 {
226  const struct ColorPrimaries *p;
227 
228  if (prm >= AVCOL_PRI_NB)
229  return NULL;
230  p = &color_primaries[prm];
231  if (!p->coeff.xr)
232  return NULL;
233 
234  return p;
235 }
236 
238 {
239  int n;
240  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
241  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
242  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
243  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
244  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
245 
246  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
247  if (!s->lin_lut)
248  return AVERROR(ENOMEM);
249  s->delin_lut = &s->lin_lut[32768];
250  for (n = 0; n < 32768; n++) {
251  double v = (n - 2048.0) / 28672.0, d, l;
252 
253  // delinearize
254  if (v <= -out_beta) {
255  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
256  } else if (v < out_beta) {
257  d = out_delta * v;
258  } else {
259  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
260  }
261  s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
262 
263  // linearize
264  if (v <= -in_beta * in_delta) {
265  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
266  } else if (v < in_beta * in_delta) {
267  l = v * in_idelta;
268  } else {
269  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
270  }
271  s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
272  }
273 
274  return 0;
275 }
276 
277 /*
278  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
279  * This function uses the Bradford mechanism.
280  */
281 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
282  enum Whitepoint src, enum Whitepoint dst)
283 {
284  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
285  [WP_ADAPT_BRADFORD] = {
286  { 0.8951, 0.2664, -0.1614 },
287  { -0.7502, 1.7135, 0.0367 },
288  { 0.0389, -0.0685, 1.0296 },
289  }, [WP_ADAPT_VON_KRIES] = {
290  { 0.40024, 0.70760, -0.08081 },
291  { -0.22630, 1.16532, 0.04570 },
292  { 0.00000, 0.00000, 0.91822 },
293  },
294  };
295  const double (*ma)[3] = ma_tbl[wp_adapt];
296  const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
297  double zw_src = 1.0 - wp_src->xw - wp_src->yw;
298  const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
299  double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
300  double mai[3][3], fac[3][3], tmp[3][3];
301  double rs, gs, bs, rd, gd, bd;
302 
303  ff_matrix_invert_3x3(ma, mai);
304  rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
305  gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
306  bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
307  rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
308  gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
309  bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
310  fac[0][0] = rd / rs;
311  fac[1][1] = gd / gs;
312  fac[2][2] = bd / bs;
313  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
314  ff_matrix_mul_3x3(tmp, ma, fac);
315  ff_matrix_mul_3x3(out, tmp, mai);
316 }
317 
318 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
319  int w, int h, const int16_t *lut)
320 {
321  int y, x, n;
322 
323  for (n = 0; n < 3; n++) {
324  int16_t *data = buf[n];
325 
326  for (y = 0; y < h; y++) {
327  for (x = 0; x < w; x++)
328  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
329 
330  data += stride;
331  }
332  }
333 }
334 
335 typedef struct ThreadData {
336  AVFrame *in, *out;
337  ptrdiff_t in_linesize[3], out_linesize[3];
338  int in_ss_h, out_ss_h;
339 } ThreadData;
340 
341 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
342 {
343  const ThreadData *td = data;
344  ColorSpaceContext *s = ctx->priv;
345  uint8_t *in_data[3], *out_data[3];
346  int16_t *rgb[3];
347  int h_in = (td->in->height + 1) >> 1;
348  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
349  int w = td->in->width, h = h2 - h1;
350 
351  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
352  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
353  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
354  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
355  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
356  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
357  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
358  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
359  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
360 
361  // FIXME for simd, also make sure we do pictures with negative stride
362  // top-down so we don't overwrite lines with padding of data before it
363  // in the same buffer (same as swscale)
364 
365  if (s->yuv2yuv_fastmode) {
366  // FIXME possibly use a fast mode in case only the y range changes?
367  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
368  // are non-zero
369  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
370  s->yuv2yuv_coeffs, s->yuv_offset);
371  } else {
372  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
373  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
374  // 2 lines, for yuv420.)
375  /*
376  * General design:
377  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
378  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
379  * of RGB in psuedo-restricted 15+sign bits. That means that the float
380  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
381  * range is used for overflow/underflow outside the representable
382  * range of this RGB type. rgb2yuv is the exact opposite.
383  * - gamma correction is done using a LUT since that appears to work
384  * fairly fast.
385  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
386  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
387  * read chroma pixels at luma resolution. If you want some more fancy
388  * filter, you can use swscale to convert to yuv444p.
389  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
390  */
391  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
392  s->yuv2rgb_coeffs, s->yuv_offset[0]);
393  if (!s->rgb2rgb_passthrough) {
394  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
395  if (!s->lrgb2lrgb_passthrough)
396  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
397  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
398  }
399  if (s->dither == DITHER_FSB) {
400  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
402  } else {
403  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
404  s->rgb2yuv_coeffs, s->yuv_offset[1]);
405  }
406  }
407 
408  return 0;
409 }
410 
411 static int get_range_off(AVFilterContext *ctx, int *off,
412  int *y_rng, int *uv_rng,
413  enum AVColorRange rng, int depth)
414 {
415  switch (rng) {
417  ColorSpaceContext *s = ctx->priv;
418 
419  if (!s->did_warn_range) {
420  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
421  s->did_warn_range = 1;
422  }
423  }
424  // fall-through
425  case AVCOL_RANGE_MPEG:
426  *off = 16 << (depth - 8);
427  *y_rng = 219 << (depth - 8);
428  *uv_rng = 224 << (depth - 8);
429  break;
430  case AVCOL_RANGE_JPEG:
431  *off = 0;
432  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
433  break;
434  default:
435  return AVERROR(EINVAL);
436  }
437 
438  return 0;
439 }
440 
442  const AVFrame *in, const AVFrame *out)
443 {
444  ColorSpaceContext *s = ctx->priv;
445  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
446  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
447  int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
448 
449 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
450 #define supported_subsampling(lcw, lch) \
451  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
452 #define supported_format(d) \
453  ((d) != NULL && (d)->nb_components == 3 && \
454  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
455  supported_depth((d)->comp[0].depth) && \
456  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
457 
458  if (!supported_format(in_desc)) {
459  av_log(ctx, AV_LOG_ERROR,
460  "Unsupported input format %d (%s) or bitdepth (%d)\n",
462  in_desc ? in_desc->comp[0].depth : -1);
463  return AVERROR(EINVAL);
464  }
465  if (!supported_format(out_desc)) {
466  av_log(ctx, AV_LOG_ERROR,
467  "Unsupported output format %d (%s) or bitdepth (%d)\n",
468  out->format, av_get_pix_fmt_name(out->format),
469  out_desc ? out_desc->comp[0].depth : -1);
470  return AVERROR(EINVAL);
471  }
472 
473  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
474  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
475  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
476  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
477  if (in->colorspace != s->in_csp ||
478  in->color_range != s->in_rng) s->in_lumacoef = NULL;
479  if (out->colorspace != s->out_csp ||
480  out->color_range != s->out_rng) s->out_lumacoef = NULL;
481 
482  if (!s->out_primaries || !s->in_primaries) {
483  s->in_prm = in->color_primaries;
484  if (s->user_iall != CS_UNSPECIFIED)
485  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
487  s->in_prm = s->user_iprm;
488  s->in_primaries = get_color_primaries(s->in_prm);
489  if (!s->in_primaries) {
490  av_log(ctx, AV_LOG_ERROR,
491  "Unsupported input primaries %d (%s)\n",
492  s->in_prm, av_color_primaries_name(s->in_prm));
493  return AVERROR(EINVAL);
494  }
495  s->out_prm = out->color_primaries;
496  s->out_primaries = get_color_primaries(s->out_prm);
497  if (!s->out_primaries) {
498  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
499  if (s->user_all == CS_UNSPECIFIED) {
500  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
501  } else {
502  av_log(ctx, AV_LOG_ERROR,
503  "Unsupported output color property %d\n", s->user_all);
504  }
505  } else {
506  av_log(ctx, AV_LOG_ERROR,
507  "Unsupported output primaries %d (%s)\n",
508  s->out_prm, av_color_primaries_name(s->out_prm));
509  }
510  return AVERROR(EINVAL);
511  }
513  sizeof(*s->in_primaries));
514  if (!s->lrgb2lrgb_passthrough) {
515  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
516  const struct WhitepointCoefficients *wp_out, *wp_in;
517 
518  wp_out = &whitepoint_coefficients[s->out_primaries->wp];
519  wp_in = &whitepoint_coefficients[s->in_primaries->wp];
520  ff_fill_rgb2xyz_table(&s->out_primaries->coeff, wp_out, rgb2xyz);
521  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
522  ff_fill_rgb2xyz_table(&s->in_primaries->coeff, wp_in, rgb2xyz);
523  if (s->out_primaries->wp != s->in_primaries->wp &&
524  s->wp_adapt != WP_ADAPT_IDENTITY) {
525  double wpconv[3][3], tmp[3][3];
526 
528  s->out_primaries->wp);
529  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
530  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
531  } else {
532  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
533  }
534  for (m = 0; m < 3; m++)
535  for (n = 0; n < 3; n++) {
536  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
537  for (o = 1; o < 8; o++)
538  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
539  }
540 
541  emms = 1;
542  }
543  }
544 
545  if (!s->in_txchr) {
546  av_freep(&s->lin_lut);
547  s->in_trc = in->color_trc;
548  if (s->user_iall != CS_UNSPECIFIED)
549  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
551  s->in_trc = s->user_itrc;
552  s->in_txchr = get_transfer_characteristics(s->in_trc);
553  if (!s->in_txchr) {
554  av_log(ctx, AV_LOG_ERROR,
555  "Unsupported input transfer characteristics %d (%s)\n",
556  s->in_trc, av_color_transfer_name(s->in_trc));
557  return AVERROR(EINVAL);
558  }
559  }
560 
561  if (!s->out_txchr) {
562  av_freep(&s->lin_lut);
563  s->out_trc = out->color_trc;
564  s->out_txchr = get_transfer_characteristics(s->out_trc);
565  if (!s->out_txchr) {
566  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
567  if (s->user_all == CS_UNSPECIFIED) {
568  av_log(ctx, AV_LOG_ERROR,
569  "Please specify output transfer characteristics\n");
570  } else {
571  av_log(ctx, AV_LOG_ERROR,
572  "Unsupported output color property %d\n", s->user_all);
573  }
574  } else {
575  av_log(ctx, AV_LOG_ERROR,
576  "Unsupported output transfer characteristics %d (%s)\n",
577  s->out_trc, av_color_transfer_name(s->out_trc));
578  }
579  return AVERROR(EINVAL);
580  }
581  }
582 
584  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
585  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
586  res = fill_gamma_table(s);
587  if (res < 0)
588  return res;
589  emms = 1;
590  }
591 
592  if (!s->in_lumacoef) {
593  s->in_csp = in->colorspace;
594  if (s->user_iall != CS_UNSPECIFIED)
595  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
597  s->in_csp = s->user_icsp;
598  s->in_rng = in->color_range;
600  s->in_rng = s->user_irng;
601  s->in_lumacoef = ff_get_luma_coefficients(s->in_csp);
602  if (!s->in_lumacoef) {
603  av_log(ctx, AV_LOG_ERROR,
604  "Unsupported input colorspace %d (%s)\n",
605  s->in_csp, av_color_space_name(s->in_csp));
606  return AVERROR(EINVAL);
607  }
608  redo_yuv2rgb = 1;
609  }
610 
611  if (!s->out_lumacoef) {
612  s->out_csp = out->colorspace;
613  s->out_rng = out->color_range;
614  s->out_lumacoef = ff_get_luma_coefficients(s->out_csp);
615  if (!s->out_lumacoef) {
616  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
617  if (s->user_all == CS_UNSPECIFIED) {
618  av_log(ctx, AV_LOG_ERROR,
619  "Please specify output transfer characteristics\n");
620  } else {
621  av_log(ctx, AV_LOG_ERROR,
622  "Unsupported output color property %d\n", s->user_all);
623  }
624  } else {
625  av_log(ctx, AV_LOG_ERROR,
626  "Unsupported output transfer characteristics %d (%s)\n",
627  s->out_csp, av_color_space_name(s->out_csp));
628  }
629  return AVERROR(EINVAL);
630  }
631  redo_rgb2yuv = 1;
632  }
633 
634  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
635  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
636  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
637  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
638  !memcmp(s->in_lumacoef, s->out_lumacoef,
639  sizeof(*s->in_lumacoef)) &&
640  in_desc->comp[0].depth == out_desc->comp[0].depth;
641  if (!s->yuv2yuv_passthrough) {
642  if (redo_yuv2rgb) {
643  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
644  int off, bits, in_rng;
645 
646  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
647  s->in_rng, in_desc->comp[0].depth);
648  if (res < 0) {
649  av_log(ctx, AV_LOG_ERROR,
650  "Unsupported input color range %d (%s)\n",
651  s->in_rng, av_color_range_name(s->in_rng));
652  return res;
653  }
654  for (n = 0; n < 8; n++)
655  s->yuv_offset[0][n] = off;
656  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
657  ff_matrix_invert_3x3(rgb2yuv, yuv2rgb);
658  bits = 1 << (in_desc->comp[0].depth - 1);
659  for (n = 0; n < 3; n++) {
660  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
661  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
662  for (o = 1; o < 8; o++)
663  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
664  }
665  }
666  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
667  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
668  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
669  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
670  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
671  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
672  emms = 1;
673  }
674 
675  if (redo_rgb2yuv) {
676  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
677  int off, out_rng, bits;
678 
679  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
680  s->out_rng, out_desc->comp[0].depth);
681  if (res < 0) {
682  av_log(ctx, AV_LOG_ERROR,
683  "Unsupported output color range %d (%s)\n",
684  s->out_rng, av_color_range_name(s->out_rng));
685  return res;
686  }
687  for (n = 0; n < 8; n++)
688  s->yuv_offset[1][n] = off;
690  bits = 1 << (29 - out_desc->comp[0].depth);
691  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
692  for (m = 0; m < 3; m++) {
693  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
694  for (o = 1; o < 8; o++)
695  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
696  }
697  }
698  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
699  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
700  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
701  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
702  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
703  emms = 1;
704  }
705 
706  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
707  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
708  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
709  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
710  double yuv2yuv[3][3];
711  int in_rng, out_rng;
712 
713  ff_matrix_mul_3x3(yuv2yuv, yuv2rgb, rgb2yuv);
714  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
715  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
716  s->yuv2yuv_coeffs[m][n][0] =
717  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
718  (in_rng * (1 << odepth)));
719  for (o = 1; o < 8; o++)
720  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
721  }
722  }
723  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
724  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
725  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
726  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
727  }
728  }
729 
730  if (emms)
731  emms_c();
732 
733  return 0;
734 }
735 
737 {
738  ColorSpaceContext *s = ctx->priv;
739 
741 
742  return 0;
743 }
744 
746 {
747  ColorSpaceContext *s = ctx->priv;
748 
749  av_freep(&s->rgb[0]);
750  av_freep(&s->rgb[1]);
751  av_freep(&s->rgb[2]);
752  s->rgb_sz = 0;
753  av_freep(&s->dither_scratch_base[0][0]);
754  av_freep(&s->dither_scratch_base[0][1]);
755  av_freep(&s->dither_scratch_base[1][0]);
756  av_freep(&s->dither_scratch_base[1][1]);
757  av_freep(&s->dither_scratch_base[2][0]);
758  av_freep(&s->dither_scratch_base[2][1]);
759 
760  av_freep(&s->lin_lut);
761 }
762 
764 {
765  AVFilterContext *ctx = link->dst;
766  AVFilterLink *outlink = ctx->outputs[0];
767  ColorSpaceContext *s = ctx->priv;
768  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
769  // input one if it is writable *OR* the actual literal values of in_*
770  // and out_* are identical (not just their respective properties)
771  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
772  int res;
773  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
774  unsigned rgb_sz = rgb_stride * in->height;
775  ThreadData td;
776 
777  if (!out) {
778  av_frame_free(&in);
779  return AVERROR(ENOMEM);
780  }
781  res = av_frame_copy_props(out, in);
782  if (res < 0) {
783  av_frame_free(&in);
784  av_frame_free(&out);
785  return res;
786  }
787 
788  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
789  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
790  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
792 
793  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
794  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
796  } else {
797  out->color_trc = s->user_trc;
798  }
799  out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
800  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
801  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
802  in->color_range : s->user_rng;
803  if (rgb_sz != s->rgb_sz) {
805  int uvw = in->width >> desc->log2_chroma_w;
806 
807  av_freep(&s->rgb[0]);
808  av_freep(&s->rgb[1]);
809  av_freep(&s->rgb[2]);
810  s->rgb_sz = 0;
811  av_freep(&s->dither_scratch_base[0][0]);
812  av_freep(&s->dither_scratch_base[0][1]);
813  av_freep(&s->dither_scratch_base[1][0]);
814  av_freep(&s->dither_scratch_base[1][1]);
815  av_freep(&s->dither_scratch_base[2][0]);
816  av_freep(&s->dither_scratch_base[2][1]);
817 
818  s->rgb[0] = av_malloc(rgb_sz);
819  s->rgb[1] = av_malloc(rgb_sz);
820  s->rgb[2] = av_malloc(rgb_sz);
821  s->dither_scratch_base[0][0] =
822  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
823  s->dither_scratch_base[0][1] =
824  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
825  s->dither_scratch_base[1][0] =
826  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
827  s->dither_scratch_base[1][1] =
828  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
829  s->dither_scratch_base[2][0] =
830  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
831  s->dither_scratch_base[2][1] =
832  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
833  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
834  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
835  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
836  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
837  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
838  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
839  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
840  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
841  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
842  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
843  uninit(ctx);
844  av_frame_free(&in);
845  av_frame_free(&out);
846  return AVERROR(ENOMEM);
847  }
848  s->rgb_sz = rgb_sz;
849  }
850  res = create_filtergraph(ctx, in, out);
851  if (res < 0) {
852  av_frame_free(&in);
853  av_frame_free(&out);
854  return res;
855  }
856  s->rgb_stride = rgb_stride / sizeof(int16_t);
857  td.in = in;
858  td.out = out;
859  td.in_linesize[0] = in->linesize[0];
860  td.in_linesize[1] = in->linesize[1];
861  td.in_linesize[2] = in->linesize[2];
862  td.out_linesize[0] = out->linesize[0];
863  td.out_linesize[1] = out->linesize[1];
864  td.out_linesize[2] = out->linesize[2];
867  if (s->yuv2yuv_passthrough) {
868  res = av_frame_copy(out, in);
869  if (res < 0) {
870  av_frame_free(&in);
871  av_frame_free(&out);
872  return res;
873  }
874  } else {
875  ctx->internal->execute(ctx, convert, &td, NULL,
876  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
877  }
878  av_frame_free(&in);
879 
880  return ff_filter_frame(outlink, out);
881 }
882 
884 {
885  static const enum AVPixelFormat pix_fmts[] = {
891  };
892  int res;
893  ColorSpaceContext *s = ctx->priv;
895 
896  if (!formats)
897  return AVERROR(ENOMEM);
898  if (s->user_format == AV_PIX_FMT_NONE)
899  return ff_set_common_formats(ctx, formats);
900  res = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats);
901  if (res < 0)
902  return res;
903  formats = NULL;
904  res = ff_add_format(&formats, s->user_format);
905  if (res < 0)
906  return res;
907 
908  return ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats);
909 }
910 
911 static int config_props(AVFilterLink *outlink)
912 {
913  AVFilterContext *ctx = outlink->dst;
914  AVFilterLink *inlink = outlink->src->inputs[0];
915 
916  if (inlink->w % 2 || inlink->h % 2) {
917  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
918  inlink->w, inlink->h);
919  return AVERROR_PATCHWELCOME;
920  }
921 
922  outlink->w = inlink->w;
923  outlink->h = inlink->h;
924  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
925  outlink->time_base = inlink->time_base;
926 
927  return 0;
928 }
929 
930 #define OFFSET(x) offsetof(ColorSpaceContext, x)
931 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
932 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
933 
934 static const AVOption colorspace_options[] = {
935  { "all", "Set all color properties together",
936  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
937  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
938  ENUM("bt470m", CS_BT470M, "all"),
939  ENUM("bt470bg", CS_BT470BG, "all"),
940  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
941  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
942  ENUM("bt709", CS_BT709, "all"),
943  ENUM("smpte170m", CS_SMPTE170M, "all"),
944  ENUM("smpte240m", CS_SMPTE240M, "all"),
945  ENUM("bt2020", CS_BT2020, "all"),
946 
947  { "space", "Output colorspace",
948  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
949  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
950  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
951  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
952  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
953  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
954  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
955  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
956  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
957  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
958  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
959 
960  { "range", "Output color range",
961  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
963  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
964  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
965  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
966  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
967 
968  { "primaries", "Output color primaries",
969  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
970  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
971  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
972  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
973  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
974  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
975  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
976  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
977  ENUM("film", AVCOL_PRI_FILM, "prm"),
978  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
979  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
980  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
981  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
982  ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"),
983 
984  { "trc", "Output transfer characteristics",
985  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
986  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
987  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
988  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
989  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
990  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
991  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
992  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
993  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
994  ENUM("linear", AVCOL_TRC_LINEAR, "trc"),
995  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
996  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
997  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
998  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
999  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
1000  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
1001 
1002  { "format", "Output pixel format",
1003  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
1005  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
1006  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
1007  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
1008  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
1009  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
1010  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
1011  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1012  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1013  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1014 
1015  { "fast", "Ignore primary chromaticity and gamma correction",
1016  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1017  0, 1, FLAGS },
1018 
1019  { "dither", "Dithering mode",
1020  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1021  DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
1022  ENUM("none", DITHER_NONE, "dither"),
1023  ENUM("fsb", DITHER_FSB, "dither"),
1024 
1025  { "wpadapt", "Whitepoint adaptation method",
1026  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1027  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
1028  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1029  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1030  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1031 
1032  { "iall", "Set all input color properties together",
1033  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1034  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1035  { "ispace", "Input colorspace",
1036  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1037  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
1038  { "irange", "Input color range",
1039  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1041  { "iprimaries", "Input color primaries",
1042  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1043  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1044  { "itrc", "Input transfer characteristics",
1045  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1046  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1047 
1048  { NULL }
1049 };
1050 
1051 AVFILTER_DEFINE_CLASS(colorspace);
1052 
1053 static const AVFilterPad inputs[] = {
1054  {
1055  .name = "default",
1056  .type = AVMEDIA_TYPE_VIDEO,
1057  .filter_frame = filter_frame,
1058  },
1059  { NULL }
1060 };
1061 
1062 static const AVFilterPad outputs[] = {
1063  {
1064  .name = "default",
1065  .type = AVMEDIA_TYPE_VIDEO,
1066  .config_props = config_props,
1067  },
1068  { NULL }
1069 };
1070 
1072  .name = "colorspace",
1073  .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1074  .init = init,
1075  .uninit = uninit,
1076  .query_formats = query_formats,
1077  .priv_size = sizeof(ColorSpaceContext),
1078  .priv_class = &colorspace_class,
1079  .inputs = inputs,
1080  .outputs = outputs,
1082 };
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:499
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
EBU Tech. 3213-E / JEDEC P22 phosphors.
Definition: pixfmt.h:474
#define NULL
Definition: coverity.c:32
AVFrame * out
Definition: af_adeclick.c:494
IEC 61966-2-4.
Definition: pixfmt.h:495
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
rgb2yuv_fn rgb2yuv
int16_t yuv_offset[2][8]
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:86
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
"Linear transfer characteristics"
Definition: pixfmt.h:492
double yuv2rgb_dbl_coeffs[3][3]
#define ma
const char * desc
Definition: libsvtav1.c:79
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
int * dither_scratch[3][2]
Main libavfilter public API header.
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
static const AVOption colorspace_options[]
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:280
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:473
int16_t yuv2rgb_coeffs[3][3][8]
ptrdiff_t in_linesize[3]
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:472
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
enum DitherMode dither
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:483
functionally identical to above
Definition: pixfmt.h:520
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2966
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
static const struct ColorPrimaries * get_color_primaries(enum AVColorPrimaries prm)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
void(* multiply3x3)(int16_t *data[3], ptrdiff_t stride, int w, int h, const int16_t m[3][3][8])
Definition: colorspacedsp.h:74
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
float delta
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
static av_cold int init(AVFilterContext *ctx)
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2899
AVFilter ff_vf_colorspace
enum Colorspace user_all user_iall
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:521
enum Whitepoint wp
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:488
static void uninit(AVFilterContext *ctx)
yuv2rgb_fn yuv2rgb
const struct ColorPrimaries * out_primaries
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
Colorspace
Definition: vf_colorspace.c:44
ptrdiff_t out_linesize[3]
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
Not part of ABI.
Definition: pixfmt.h:587
AVColorRange
Visual content value range.
Definition: pixfmt.h:551
ColorSpaceDSPContext dsp
const struct LumaCoefficients * ff_get_luma_coefficients(enum AVColorSpace csp)
Definition: colorspace.c:128
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
const struct ColorPrimaries * in_primaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:458
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
enum AVColorSpace in_csp out_csp user_csp user_icsp
#define src
Definition: vp8dsp.c:254
ptrdiff_t rgb_stride
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:463
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
double rgb2yuv_dbl_coeffs[3][3]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const struct LumaCoefficients * out_lumacoef
static const uint8_t dither[8][8]
Definition: vf_fspp.c:57
void * priv
private data for use by the filter
Definition: avfilter.h:356
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:558
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
Not part of ABI.
Definition: pixfmt.h:476
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:569
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:460
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:333
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:470
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
uint8_t bits
Definition: vp3data.h:202
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:799
static const AVFilterPad inputs[]
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2918
#define supported_format(d)
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
struct PrimaryCoefficients coeff
#define ENUM(x, y, z)
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:800
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
colour filters using Illuminant C
Definition: pixfmt.h:468
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
uint8_t w
Definition: llviddspenc.c:38
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:99
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:523
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:465
static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB]
AVFormatContext * ctx
Definition: movenc.c:48
static const AVFilterPad outputs[]
int16_t * rgb[3]
int16_t lrgb2lrgb_coeffs[3][3][8]
#define s(width, name)
Definition: cbs_vp9.c:257
#define FLAGS
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
Full range content.
Definition: pixfmt.h:586
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:288
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
also ITU-R BT1361
Definition: pixfmt.h:485
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, enum Whitepoint src, enum Whitepoint dst)
void ff_fill_rgb2yuv_table(const struct LumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:141
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:490
static int query_formats(AVFilterContext *ctx)
int16_t yuv2yuv_coeffs[3][3][8]
functionally identical to above
Definition: pixfmt.h:467
Used for passing data between threads.
Definition: dsddec.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static const int16_t alpha[]
Definition: ilbcdata.h:55
rgb2yuv_fsb_fn rgb2yuv_fsb
WhitepointAdaptation
Definition: vf_colorspace.c:65
yuv2yuv_fn yuv2yuv[NB_BPP][NB_BPP][NB_SS]
Definition: colorspacedsp.h:70
static int fill_gamma_table(ColorSpaceContext *s)
Whitepoint
Definition: vf_colorspace.c:57
rgb2yuv_fn rgb2yuv[NB_BPP][NB_SS]
Definition: colorspacedsp.h:65
int * dither_scratch_base[3][2]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
yuv2rgb_fn yuv2rgb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:62
Not part of ABI.
Definition: pixfmt.h:505
const struct LumaCoefficients * in_lumacoef
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
const char * name
Filter name.
Definition: avfilter.h:149
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:73
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
#define flags(name, subs,...)
Definition: cbs_av1.c:560
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:381
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Narrow or limited range content.
Definition: pixfmt.h:569
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:497
enum WhitepointAdaptation wp_adapt
enum AVColorRange in_rng out_rng user_rng user_irng
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2942
also ITU-R BT470BG
Definition: pixfmt.h:489
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
enum AVPixelFormat in_format user_format
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
avfilter_execute_func * execute
Definition: internal.h:136
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
int16_t rgb2yuv_coeffs[3][3][8]
pixel format definitions
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
const struct TransferCharacteristics * in_txchr
const struct TransferCharacteristics * out_txchr
A list of supported formats for one end of a filter link.
Definition: formats.h:65
#define lrint
Definition: tablegen.h:53
enum AVColorPrimaries color_primaries
Definition: frame.h:560
An instance of a filter.
Definition: avfilter.h:341
AVFILTER_DEFINE_CLASS(colorspace)
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:498
static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB]
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:466
ITU-R BT2020.
Definition: pixfmt.h:469
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
#define av_freep(p)
RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT uint8_t const uint8_t const uint8_t const uint8_t int uint32_t * rgb2yuv
Definition: input.c:401
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:562
AVFrame * in
Definition: af_adenorm.c:223
formats
Definition: signature.h:48
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
#define stride
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static int filter_frame(AVFilterLink *link, AVFrame *in)
static int config_props(AVFilterLink *outlink)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Definition: rpzaenc.c:58
rgb2yuv_fsb_fn rgb2yuv_fsb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:67
Not part of ABI.
Definition: pixfmt.h:529
DitherMode
Definition: vf_colorspace.c:38
yuv2yuv_fn yuv2yuv
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
void ff_fill_rgb2xyz_table(const struct PrimaryCoefficients *coeffs, const struct WhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:68
#define OFFSET(x)
static uint8_t tmp[11]
Definition: aes_ctr.c:26