FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/mem_internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/pixfmt.h"
31 
32 #include "avfilter.h"
33 #include "colorspacedsp.h"
34 #include "formats.h"
35 #include "internal.h"
36 #include "video.h"
37 #include "colorspace.h"
38 
39 enum DitherMode {
43 };
44 
45 enum Colorspace {
56 };
57 
58 enum Whitepoint {
64 };
65 
72 };
73 
85 };
86 
87 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
98 };
99 
100 static const enum AVColorSpace default_csp[CS_NB + 1] = {
111 };
112 
116 };
117 
119  double alpha, beta, gamma, delta;
120 };
121 
122 typedef struct ColorSpaceContext {
123  const AVClass *class;
124 
126 
127  enum Colorspace user_all, user_iall;
128  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
129  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
130  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
131  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
132  enum AVPixelFormat in_format, user_format;
135  enum WhitepointAdaptation wp_adapt;
136 
137  int16_t *rgb[3];
138  ptrdiff_t rgb_stride;
139  unsigned rgb_sz;
140  int *dither_scratch[3][2], *dither_scratch_base[3][2];
141 
142  const struct ColorPrimaries *in_primaries, *out_primaries;
144  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
145 
146  const struct TransferCharacteristics *in_txchr, *out_txchr;
148  int16_t *lin_lut, *delin_lut;
149 
150  const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
151  int yuv2yuv_passthrough, yuv2yuv_fastmode;
152  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
153  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
154  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
155  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
160  double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
161  int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
162 
165 
166 // FIXME deal with odd width/heights
167 // FIXME faster linearize/delinearize implementation (integer pow)
168 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
169 // FIXME test that the values in (de)lin_lut don't exceed their container storage
170 // type size (only useful if we keep the LUT and don't move to fast integer pow)
171 // FIXME dithering if bitdepth goes down?
172 // FIXME bitexact for fate integration?
173 
174 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
175 // find any actual tables that document their real values...
176 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
178  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
179  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
180  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
181  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
182  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
183  [AVCOL_TRC_LINEAR] = { 1.0, 0.0, 1.0, 0.0 },
184  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
185  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
186  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
187  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
188 };
189 
190 static const struct TransferCharacteristics *
192 {
193  const struct TransferCharacteristics *coeffs;
194 
195  if (trc >= AVCOL_TRC_NB)
196  return NULL;
197  coeffs = &transfer_characteristics[trc];
198  if (!coeffs->alpha)
199  return NULL;
200 
201  return coeffs;
202 }
203 
205  [WP_D65] = { 0.3127, 0.3290 },
206  [WP_C] = { 0.3100, 0.3160 },
207  [WP_DCI] = { 0.3140, 0.3510 },
208  [WP_E] = { 1/3.0f, 1/3.0f },
209 };
210 
212  [AVCOL_PRI_BT709] = { WP_D65, { 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 } },
213  [AVCOL_PRI_BT470M] = { WP_C, { 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 } },
214  [AVCOL_PRI_BT470BG] = { WP_D65, { 0.640, 0.330, 0.290, 0.600, 0.150, 0.060 } },
215  [AVCOL_PRI_SMPTE170M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
216  [AVCOL_PRI_SMPTE240M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
217  [AVCOL_PRI_SMPTE428] = { WP_E, { 0.735, 0.265, 0.274, 0.718, 0.167, 0.009 } },
218  [AVCOL_PRI_SMPTE431] = { WP_DCI, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
219  [AVCOL_PRI_SMPTE432] = { WP_D65, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
220  [AVCOL_PRI_FILM] = { WP_C, { 0.681, 0.319, 0.243, 0.692, 0.145, 0.049 } },
221  [AVCOL_PRI_BT2020] = { WP_D65, { 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 } },
222  [AVCOL_PRI_JEDEC_P22] = { WP_D65, { 0.630, 0.340, 0.295, 0.605, 0.155, 0.077 } },
223 };
224 
226 {
227  const struct ColorPrimaries *p;
228 
229  if (prm >= AVCOL_PRI_NB)
230  return NULL;
231  p = &color_primaries[prm];
232  if (!p->coeff.xr)
233  return NULL;
234 
235  return p;
236 }
237 
239 {
240  int n;
241  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
242  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
243  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
244  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
245  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
246 
247  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
248  if (!s->lin_lut)
249  return AVERROR(ENOMEM);
250  s->delin_lut = &s->lin_lut[32768];
251  for (n = 0; n < 32768; n++) {
252  double v = (n - 2048.0) / 28672.0, d, l;
253 
254  // delinearize
255  if (v <= -out_beta) {
256  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
257  } else if (v < out_beta) {
258  d = out_delta * v;
259  } else {
260  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
261  }
262  s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
263 
264  // linearize
265  if (v <= -in_beta * in_delta) {
266  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
267  } else if (v < in_beta * in_delta) {
268  l = v * in_idelta;
269  } else {
270  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
271  }
272  s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
273  }
274 
275  return 0;
276 }
277 
278 /*
279  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
280  * This function uses the Bradford mechanism.
281  */
282 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
283  enum Whitepoint src, enum Whitepoint dst)
284 {
285  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
286  [WP_ADAPT_BRADFORD] = {
287  { 0.8951, 0.2664, -0.1614 },
288  { -0.7502, 1.7135, 0.0367 },
289  { 0.0389, -0.0685, 1.0296 },
290  }, [WP_ADAPT_VON_KRIES] = {
291  { 0.40024, 0.70760, -0.08081 },
292  { -0.22630, 1.16532, 0.04570 },
293  { 0.00000, 0.00000, 0.91822 },
294  },
295  };
296  const double (*ma)[3] = ma_tbl[wp_adapt];
297  const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
298  double zw_src = 1.0 - wp_src->xw - wp_src->yw;
299  const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
300  double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
301  double mai[3][3], fac[3][3], tmp[3][3];
302  double rs, gs, bs, rd, gd, bd;
303 
304  ff_matrix_invert_3x3(ma, mai);
305  rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
306  gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
307  bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
308  rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
309  gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
310  bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
311  fac[0][0] = rd / rs;
312  fac[1][1] = gd / gs;
313  fac[2][2] = bd / bs;
314  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
315  ff_matrix_mul_3x3(tmp, ma, fac);
316  ff_matrix_mul_3x3(out, tmp, mai);
317 }
318 
319 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
320  int w, int h, const int16_t *lut)
321 {
322  int y, x, n;
323 
324  for (n = 0; n < 3; n++) {
325  int16_t *data = buf[n];
326 
327  for (y = 0; y < h; y++) {
328  for (x = 0; x < w; x++)
329  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
330 
331  data += stride;
332  }
333  }
334 }
335 
336 typedef struct ThreadData {
337  AVFrame *in, *out;
338  ptrdiff_t in_linesize[3], out_linesize[3];
339  int in_ss_h, out_ss_h;
340 } ThreadData;
341 
342 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
343 {
344  const ThreadData *td = data;
345  ColorSpaceContext *s = ctx->priv;
346  uint8_t *in_data[3], *out_data[3];
347  int16_t *rgb[3];
348  int h_in = (td->in->height + 1) >> 1;
349  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
350  int w = td->in->width, h = h2 - h1;
351 
352  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
353  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
354  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
355  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
356  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
357  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
358  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
359  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
360  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
361 
362  // FIXME for simd, also make sure we do pictures with negative stride
363  // top-down so we don't overwrite lines with padding of data before it
364  // in the same buffer (same as swscale)
365 
366  if (s->yuv2yuv_fastmode) {
367  // FIXME possibly use a fast mode in case only the y range changes?
368  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
369  // are non-zero
370  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
371  s->yuv2yuv_coeffs, s->yuv_offset);
372  } else {
373  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
374  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
375  // 2 lines, for yuv420.)
376  /*
377  * General design:
378  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
379  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
380  * of RGB in psuedo-restricted 15+sign bits. That means that the float
381  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
382  * range is used for overflow/underflow outside the representable
383  * range of this RGB type. rgb2yuv is the exact opposite.
384  * - gamma correction is done using a LUT since that appears to work
385  * fairly fast.
386  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
387  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
388  * read chroma pixels at luma resolution. If you want some more fancy
389  * filter, you can use swscale to convert to yuv444p.
390  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
391  */
392  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
393  s->yuv2rgb_coeffs, s->yuv_offset[0]);
394  if (!s->rgb2rgb_passthrough) {
395  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
396  if (!s->lrgb2lrgb_passthrough)
397  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
398  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
399  }
400  if (s->dither == DITHER_FSB) {
401  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
403  } else {
404  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
405  s->rgb2yuv_coeffs, s->yuv_offset[1]);
406  }
407  }
408 
409  return 0;
410 }
411 
412 static int get_range_off(AVFilterContext *ctx, int *off,
413  int *y_rng, int *uv_rng,
414  enum AVColorRange rng, int depth)
415 {
416  switch (rng) {
418  ColorSpaceContext *s = ctx->priv;
419 
420  if (!s->did_warn_range) {
421  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
422  s->did_warn_range = 1;
423  }
424  }
425  // fall-through
426  case AVCOL_RANGE_MPEG:
427  *off = 16 << (depth - 8);
428  *y_rng = 219 << (depth - 8);
429  *uv_rng = 224 << (depth - 8);
430  break;
431  case AVCOL_RANGE_JPEG:
432  *off = 0;
433  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
434  break;
435  default:
436  return AVERROR(EINVAL);
437  }
438 
439  return 0;
440 }
441 
443  const AVFrame *in, const AVFrame *out)
444 {
445  ColorSpaceContext *s = ctx->priv;
446  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
447  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
448  int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
449 
450 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
451 #define supported_subsampling(lcw, lch) \
452  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
453 #define supported_format(d) \
454  ((d) != NULL && (d)->nb_components == 3 && \
455  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
456  supported_depth((d)->comp[0].depth) && \
457  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
458 
459  if (!supported_format(in_desc)) {
460  av_log(ctx, AV_LOG_ERROR,
461  "Unsupported input format %d (%s) or bitdepth (%d)\n",
463  in_desc ? in_desc->comp[0].depth : -1);
464  return AVERROR(EINVAL);
465  }
466  if (!supported_format(out_desc)) {
467  av_log(ctx, AV_LOG_ERROR,
468  "Unsupported output format %d (%s) or bitdepth (%d)\n",
469  out->format, av_get_pix_fmt_name(out->format),
470  out_desc ? out_desc->comp[0].depth : -1);
471  return AVERROR(EINVAL);
472  }
473 
474  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
475  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
476  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
477  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
478  if (in->colorspace != s->in_csp ||
479  in->color_range != s->in_rng) s->in_lumacoef = NULL;
480  if (out->colorspace != s->out_csp ||
481  out->color_range != s->out_rng) s->out_lumacoef = NULL;
482 
483  if (!s->out_primaries || !s->in_primaries) {
484  s->in_prm = in->color_primaries;
485  if (s->user_iall != CS_UNSPECIFIED)
486  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
488  s->in_prm = s->user_iprm;
489  s->in_primaries = get_color_primaries(s->in_prm);
490  if (!s->in_primaries) {
491  av_log(ctx, AV_LOG_ERROR,
492  "Unsupported input primaries %d (%s)\n",
493  s->in_prm, av_color_primaries_name(s->in_prm));
494  return AVERROR(EINVAL);
495  }
496  s->out_prm = out->color_primaries;
497  s->out_primaries = get_color_primaries(s->out_prm);
498  if (!s->out_primaries) {
499  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
500  if (s->user_all == CS_UNSPECIFIED) {
501  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
502  } else {
503  av_log(ctx, AV_LOG_ERROR,
504  "Unsupported output color property %d\n", s->user_all);
505  }
506  } else {
507  av_log(ctx, AV_LOG_ERROR,
508  "Unsupported output primaries %d (%s)\n",
509  s->out_prm, av_color_primaries_name(s->out_prm));
510  }
511  return AVERROR(EINVAL);
512  }
514  sizeof(*s->in_primaries));
515  if (!s->lrgb2lrgb_passthrough) {
516  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
517  const struct WhitepointCoefficients *wp_out, *wp_in;
518 
519  wp_out = &whitepoint_coefficients[s->out_primaries->wp];
520  wp_in = &whitepoint_coefficients[s->in_primaries->wp];
521  ff_fill_rgb2xyz_table(&s->out_primaries->coeff, wp_out, rgb2xyz);
522  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
523  ff_fill_rgb2xyz_table(&s->in_primaries->coeff, wp_in, rgb2xyz);
524  if (s->out_primaries->wp != s->in_primaries->wp &&
525  s->wp_adapt != WP_ADAPT_IDENTITY) {
526  double wpconv[3][3], tmp[3][3];
527 
529  s->out_primaries->wp);
530  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
531  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
532  } else {
533  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
534  }
535  for (m = 0; m < 3; m++)
536  for (n = 0; n < 3; n++) {
537  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
538  for (o = 1; o < 8; o++)
539  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
540  }
541 
542  emms = 1;
543  }
544  }
545 
546  if (!s->in_txchr) {
547  av_freep(&s->lin_lut);
548  s->in_trc = in->color_trc;
549  if (s->user_iall != CS_UNSPECIFIED)
550  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
552  s->in_trc = s->user_itrc;
553  s->in_txchr = get_transfer_characteristics(s->in_trc);
554  if (!s->in_txchr) {
555  av_log(ctx, AV_LOG_ERROR,
556  "Unsupported input transfer characteristics %d (%s)\n",
557  s->in_trc, av_color_transfer_name(s->in_trc));
558  return AVERROR(EINVAL);
559  }
560  }
561 
562  if (!s->out_txchr) {
563  av_freep(&s->lin_lut);
564  s->out_trc = out->color_trc;
565  s->out_txchr = get_transfer_characteristics(s->out_trc);
566  if (!s->out_txchr) {
567  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
568  if (s->user_all == CS_UNSPECIFIED) {
569  av_log(ctx, AV_LOG_ERROR,
570  "Please specify output transfer characteristics\n");
571  } else {
572  av_log(ctx, AV_LOG_ERROR,
573  "Unsupported output color property %d\n", s->user_all);
574  }
575  } else {
576  av_log(ctx, AV_LOG_ERROR,
577  "Unsupported output transfer characteristics %d (%s)\n",
578  s->out_trc, av_color_transfer_name(s->out_trc));
579  }
580  return AVERROR(EINVAL);
581  }
582  }
583 
585  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
586  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
587  res = fill_gamma_table(s);
588  if (res < 0)
589  return res;
590  emms = 1;
591  }
592 
593  if (!s->in_lumacoef) {
594  s->in_csp = in->colorspace;
595  if (s->user_iall != CS_UNSPECIFIED)
596  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
598  s->in_csp = s->user_icsp;
599  s->in_rng = in->color_range;
601  s->in_rng = s->user_irng;
602  s->in_lumacoef = ff_get_luma_coefficients(s->in_csp);
603  if (!s->in_lumacoef) {
604  av_log(ctx, AV_LOG_ERROR,
605  "Unsupported input colorspace %d (%s)\n",
606  s->in_csp, av_color_space_name(s->in_csp));
607  return AVERROR(EINVAL);
608  }
609  redo_yuv2rgb = 1;
610  }
611 
612  if (!s->out_lumacoef) {
613  s->out_csp = out->colorspace;
614  s->out_rng = out->color_range;
615  s->out_lumacoef = ff_get_luma_coefficients(s->out_csp);
616  if (!s->out_lumacoef) {
617  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
618  if (s->user_all == CS_UNSPECIFIED) {
619  av_log(ctx, AV_LOG_ERROR,
620  "Please specify output transfer characteristics\n");
621  } else {
622  av_log(ctx, AV_LOG_ERROR,
623  "Unsupported output color property %d\n", s->user_all);
624  }
625  } else {
626  av_log(ctx, AV_LOG_ERROR,
627  "Unsupported output transfer characteristics %d (%s)\n",
628  s->out_csp, av_color_space_name(s->out_csp));
629  }
630  return AVERROR(EINVAL);
631  }
632  redo_rgb2yuv = 1;
633  }
634 
635  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
636  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
637  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
638  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
639  !memcmp(s->in_lumacoef, s->out_lumacoef,
640  sizeof(*s->in_lumacoef)) &&
641  in_desc->comp[0].depth == out_desc->comp[0].depth;
642  if (!s->yuv2yuv_passthrough) {
643  if (redo_yuv2rgb) {
644  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
645  int off, bits, in_rng;
646 
647  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
648  s->in_rng, in_desc->comp[0].depth);
649  if (res < 0) {
650  av_log(ctx, AV_LOG_ERROR,
651  "Unsupported input color range %d (%s)\n",
652  s->in_rng, av_color_range_name(s->in_rng));
653  return res;
654  }
655  for (n = 0; n < 8; n++)
656  s->yuv_offset[0][n] = off;
657  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
658  ff_matrix_invert_3x3(rgb2yuv, yuv2rgb);
659  bits = 1 << (in_desc->comp[0].depth - 1);
660  for (n = 0; n < 3; n++) {
661  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
662  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
663  for (o = 1; o < 8; o++)
664  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
665  }
666  }
667  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
668  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
669  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
670  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
671  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
672  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
673  emms = 1;
674  }
675 
676  if (redo_rgb2yuv) {
677  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
678  int off, out_rng, bits;
679 
680  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
681  s->out_rng, out_desc->comp[0].depth);
682  if (res < 0) {
683  av_log(ctx, AV_LOG_ERROR,
684  "Unsupported output color range %d (%s)\n",
685  s->out_rng, av_color_range_name(s->out_rng));
686  return res;
687  }
688  for (n = 0; n < 8; n++)
689  s->yuv_offset[1][n] = off;
691  bits = 1 << (29 - out_desc->comp[0].depth);
692  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
693  for (m = 0; m < 3; m++) {
694  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
695  for (o = 1; o < 8; o++)
696  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
697  }
698  }
699  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
700  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
701  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
702  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
703  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
704  emms = 1;
705  }
706 
707  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
708  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
709  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
710  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
711  double yuv2yuv[3][3];
712  int in_rng, out_rng;
713 
714  ff_matrix_mul_3x3(yuv2yuv, yuv2rgb, rgb2yuv);
715  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
716  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
717  s->yuv2yuv_coeffs[m][n][0] =
718  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
719  (in_rng * (1 << odepth)));
720  for (o = 1; o < 8; o++)
721  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
722  }
723  }
724  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
725  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
726  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
727  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
728  }
729  }
730 
731  if (emms)
732  emms_c();
733 
734  return 0;
735 }
736 
738 {
739  ColorSpaceContext *s = ctx->priv;
740 
742 
743  return 0;
744 }
745 
747 {
748  ColorSpaceContext *s = ctx->priv;
749 
750  av_freep(&s->rgb[0]);
751  av_freep(&s->rgb[1]);
752  av_freep(&s->rgb[2]);
753  s->rgb_sz = 0;
754  av_freep(&s->dither_scratch_base[0][0]);
755  av_freep(&s->dither_scratch_base[0][1]);
756  av_freep(&s->dither_scratch_base[1][0]);
757  av_freep(&s->dither_scratch_base[1][1]);
758  av_freep(&s->dither_scratch_base[2][0]);
759  av_freep(&s->dither_scratch_base[2][1]);
760 
761  av_freep(&s->lin_lut);
762 }
763 
765 {
766  AVFilterContext *ctx = link->dst;
767  AVFilterLink *outlink = ctx->outputs[0];
768  ColorSpaceContext *s = ctx->priv;
769  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
770  // input one if it is writable *OR* the actual literal values of in_*
771  // and out_* are identical (not just their respective properties)
772  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
773  int res;
774  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
775  unsigned rgb_sz = rgb_stride * in->height;
776  ThreadData td;
777 
778  if (!out) {
779  av_frame_free(&in);
780  return AVERROR(ENOMEM);
781  }
782  res = av_frame_copy_props(out, in);
783  if (res < 0) {
784  av_frame_free(&in);
785  av_frame_free(&out);
786  return res;
787  }
788 
789  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
790  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
791  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
793 
794  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
795  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
797  } else {
798  out->color_trc = s->user_trc;
799  }
800  out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
801  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
802  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
803  in->color_range : s->user_rng;
804  if (rgb_sz != s->rgb_sz) {
806  int uvw = in->width >> desc->log2_chroma_w;
807 
808  av_freep(&s->rgb[0]);
809  av_freep(&s->rgb[1]);
810  av_freep(&s->rgb[2]);
811  s->rgb_sz = 0;
812  av_freep(&s->dither_scratch_base[0][0]);
813  av_freep(&s->dither_scratch_base[0][1]);
814  av_freep(&s->dither_scratch_base[1][0]);
815  av_freep(&s->dither_scratch_base[1][1]);
816  av_freep(&s->dither_scratch_base[2][0]);
817  av_freep(&s->dither_scratch_base[2][1]);
818 
819  s->rgb[0] = av_malloc(rgb_sz);
820  s->rgb[1] = av_malloc(rgb_sz);
821  s->rgb[2] = av_malloc(rgb_sz);
822  s->dither_scratch_base[0][0] =
823  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
824  s->dither_scratch_base[0][1] =
825  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
826  s->dither_scratch_base[1][0] =
827  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
828  s->dither_scratch_base[1][1] =
829  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
830  s->dither_scratch_base[2][0] =
831  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
832  s->dither_scratch_base[2][1] =
833  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
834  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
835  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
836  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
837  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
838  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
839  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
840  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
841  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
842  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
843  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
844  uninit(ctx);
845  av_frame_free(&in);
846  av_frame_free(&out);
847  return AVERROR(ENOMEM);
848  }
849  s->rgb_sz = rgb_sz;
850  }
851  res = create_filtergraph(ctx, in, out);
852  if (res < 0) {
853  av_frame_free(&in);
854  av_frame_free(&out);
855  return res;
856  }
857  s->rgb_stride = rgb_stride / sizeof(int16_t);
858  td.in = in;
859  td.out = out;
860  td.in_linesize[0] = in->linesize[0];
861  td.in_linesize[1] = in->linesize[1];
862  td.in_linesize[2] = in->linesize[2];
863  td.out_linesize[0] = out->linesize[0];
864  td.out_linesize[1] = out->linesize[1];
865  td.out_linesize[2] = out->linesize[2];
868  if (s->yuv2yuv_passthrough) {
869  res = av_frame_copy(out, in);
870  if (res < 0) {
871  av_frame_free(&in);
872  av_frame_free(&out);
873  return res;
874  }
875  } else {
876  ctx->internal->execute(ctx, convert, &td, NULL,
877  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
878  }
879  av_frame_free(&in);
880 
881  return ff_filter_frame(outlink, out);
882 }
883 
885 {
886  static const enum AVPixelFormat pix_fmts[] = {
892  };
893  int res;
894  ColorSpaceContext *s = ctx->priv;
896 
897  if (!formats)
898  return AVERROR(ENOMEM);
899  if (s->user_format == AV_PIX_FMT_NONE)
900  return ff_set_common_formats(ctx, formats);
901  res = ff_formats_ref(formats, &ctx->inputs[0]->outcfg.formats);
902  if (res < 0)
903  return res;
904  formats = NULL;
905  res = ff_add_format(&formats, s->user_format);
906  if (res < 0)
907  return res;
908 
909  return ff_formats_ref(formats, &ctx->outputs[0]->incfg.formats);
910 }
911 
912 static int config_props(AVFilterLink *outlink)
913 {
914  AVFilterContext *ctx = outlink->dst;
915  AVFilterLink *inlink = outlink->src->inputs[0];
916 
917  if (inlink->w % 2 || inlink->h % 2) {
918  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
919  inlink->w, inlink->h);
920  return AVERROR_PATCHWELCOME;
921  }
922 
923  outlink->w = inlink->w;
924  outlink->h = inlink->h;
925  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
926  outlink->time_base = inlink->time_base;
927 
928  return 0;
929 }
930 
931 #define OFFSET(x) offsetof(ColorSpaceContext, x)
932 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
933 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
934 
935 static const AVOption colorspace_options[] = {
936  { "all", "Set all color properties together",
937  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
938  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
939  ENUM("bt470m", CS_BT470M, "all"),
940  ENUM("bt470bg", CS_BT470BG, "all"),
941  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
942  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
943  ENUM("bt709", CS_BT709, "all"),
944  ENUM("smpte170m", CS_SMPTE170M, "all"),
945  ENUM("smpte240m", CS_SMPTE240M, "all"),
946  ENUM("bt2020", CS_BT2020, "all"),
947 
948  { "space", "Output colorspace",
949  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
950  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
951  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
952  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
953  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
954  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
955  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
956  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
957  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
958  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
959  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
960 
961  { "range", "Output color range",
962  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
964  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
965  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
966  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
967  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
968 
969  { "primaries", "Output color primaries",
970  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
971  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
972  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
973  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
974  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
975  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
976  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
977  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
978  ENUM("film", AVCOL_PRI_FILM, "prm"),
979  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
980  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
981  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
982  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
983  ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"),
984 
985  { "trc", "Output transfer characteristics",
986  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
987  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
988  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
989  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
990  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
991  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
992  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
993  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
994  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
995  ENUM("linear", AVCOL_TRC_LINEAR, "trc"),
996  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
997  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
998  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
999  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
1000  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
1001  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
1002 
1003  { "format", "Output pixel format",
1004  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
1006  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
1007  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
1008  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
1009  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
1010  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
1011  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
1012  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1013  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1014  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1015 
1016  { "fast", "Ignore primary chromaticity and gamma correction",
1017  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1018  0, 1, FLAGS },
1019 
1020  { "dither", "Dithering mode",
1021  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1022  DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
1023  ENUM("none", DITHER_NONE, "dither"),
1024  ENUM("fsb", DITHER_FSB, "dither"),
1025 
1026  { "wpadapt", "Whitepoint adaptation method",
1027  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1028  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
1029  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1030  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1031  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1032 
1033  { "iall", "Set all input color properties together",
1034  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1035  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1036  { "ispace", "Input colorspace",
1037  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1038  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
1039  { "irange", "Input color range",
1040  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1042  { "iprimaries", "Input color primaries",
1043  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1044  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1045  { "itrc", "Input transfer characteristics",
1046  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1047  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1048 
1049  { NULL }
1050 };
1051 
1052 AVFILTER_DEFINE_CLASS(colorspace);
1053 
1054 static const AVFilterPad inputs[] = {
1055  {
1056  .name = "default",
1057  .type = AVMEDIA_TYPE_VIDEO,
1058  .filter_frame = filter_frame,
1059  },
1060  { NULL }
1061 };
1062 
1063 static const AVFilterPad outputs[] = {
1064  {
1065  .name = "default",
1066  .type = AVMEDIA_TYPE_VIDEO,
1067  .config_props = config_props,
1068  },
1069  { NULL }
1070 };
1071 
1073  .name = "colorspace",
1074  .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1075  .init = init,
1076  .uninit = uninit,
1077  .query_formats = query_formats,
1078  .priv_size = sizeof(ColorSpaceContext),
1079  .priv_class = &colorspace_class,
1080  .inputs = inputs,
1081  .outputs = outputs,
1083 };
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:499
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
EBU Tech. 3213-E / JEDEC P22 phosphors.
Definition: pixfmt.h:474
#define NULL
Definition: coverity.c:32
AVFrame * out
Definition: af_adeclick.c:494
IEC 61966-2-4.
Definition: pixfmt.h:495
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
rgb2yuv_fn rgb2yuv
int16_t yuv_offset[2][8]
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:87
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
"Linear transfer characteristics"
Definition: pixfmt.h:492
double yuv2rgb_dbl_coeffs[3][3]
#define ma
const char * desc
Definition: libsvtav1.c:79
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
int * dither_scratch[3][2]
Main libavfilter public API header.
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
static const AVOption colorspace_options[]
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:281
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:473
int16_t yuv2rgb_coeffs[3][3][8]
ptrdiff_t in_linesize[3]
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:472
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
enum DitherMode dither
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:483
functionally identical to above
Definition: pixfmt.h:520
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2966
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
static const struct ColorPrimaries * get_color_primaries(enum AVColorPrimaries prm)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
void(* multiply3x3)(int16_t *data[3], ptrdiff_t stride, int w, int h, const int16_t m[3][3][8])
Definition: colorspacedsp.h:74
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
float delta
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:512
static av_cold int init(AVFilterContext *ctx)
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2899
AVFilter ff_vf_colorspace
enum Colorspace user_all user_iall
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:521
enum Whitepoint wp
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:488
static void uninit(AVFilterContext *ctx)
yuv2rgb_fn yuv2rgb
const struct ColorPrimaries * out_primaries
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:117
Colorspace
Definition: vf_colorspace.c:45
ptrdiff_t out_linesize[3]
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
Not part of ABI.
Definition: pixfmt.h:587
AVColorRange
Visual content value range.
Definition: pixfmt.h:551
ColorSpaceDSPContext dsp
const struct LumaCoefficients * ff_get_luma_coefficients(enum AVColorSpace csp)
Definition: colorspace.c:128
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
const struct ColorPrimaries * in_primaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:458
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
enum AVColorSpace in_csp out_csp user_csp user_icsp
#define src
Definition: vp8dsp.c:255
ptrdiff_t rgb_stride
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:463
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
double rgb2yuv_dbl_coeffs[3][3]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:115
const struct LumaCoefficients * out_lumacoef
static const uint8_t dither[8][8]
Definition: vf_fspp.c:59
void * priv
private data for use by the filter
Definition: avfilter.h:356
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:558
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
Not part of ABI.
Definition: pixfmt.h:476
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:569
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:460
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:333
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:470
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
uint8_t bits
Definition: vp3data.h:141
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:799
static const AVFilterPad inputs[]
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2918
#define supported_format(d)
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
struct PrimaryCoefficients coeff
#define ENUM(x, y, z)
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:800
#define FFMIN(a, b)
Definition: common.h:104
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
colour filters using Illuminant C
Definition: pixfmt.h:468
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
uint8_t w
Definition: llviddspenc.c:39
static enum AVColorSpace default_csp[CS_NB+1]
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:523
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:465
static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB]
AVFormatContext * ctx
Definition: movenc.c:48
static const AVFilterPad outputs[]
int16_t * rgb[3]
int16_t lrgb2lrgb_coeffs[3][3][8]
#define s(width, name)
Definition: cbs_vp9.c:257
#define FLAGS
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
Full range content.
Definition: pixfmt.h:586
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:288
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
also ITU-R BT1361
Definition: pixfmt.h:485
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, enum Whitepoint src, enum Whitepoint dst)
void ff_fill_rgb2yuv_table(const struct LumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:141
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:490
static int query_formats(AVFilterContext *ctx)
int16_t yuv2yuv_coeffs[3][3][8]
functionally identical to above
Definition: pixfmt.h:467
Used for passing data between threads.
Definition: dsddec.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static const int16_t alpha[]
Definition: ilbcdata.h:55
rgb2yuv_fsb_fn rgb2yuv_fsb
WhitepointAdaptation
Definition: vf_colorspace.c:66
yuv2yuv_fn yuv2yuv[NB_BPP][NB_BPP][NB_SS]
Definition: colorspacedsp.h:70
static int fill_gamma_table(ColorSpaceContext *s)
Whitepoint
Definition: vf_colorspace.c:58
rgb2yuv_fn rgb2yuv[NB_BPP][NB_SS]
Definition: colorspacedsp.h:65
int * dither_scratch_base[3][2]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
yuv2rgb_fn yuv2rgb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:62
Not part of ABI.
Definition: pixfmt.h:505
const struct LumaCoefficients * in_lumacoef
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
const char * name
Filter name.
Definition: avfilter.h:149
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:74
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:381
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Narrow or limited range content.
Definition: pixfmt.h:569
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:497
enum WhitepointAdaptation wp_adapt
enum AVColorRange in_rng out_rng user_rng user_irng
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2942
also ITU-R BT470BG
Definition: pixfmt.h:489
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
enum AVPixelFormat in_format user_format
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
avfilter_execute_func * execute
Definition: internal.h:136
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
int16_t rgb2yuv_coeffs[3][3][8]
pixel format definitions
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
const struct TransferCharacteristics * in_txchr
const struct TransferCharacteristics * out_txchr
A list of supported formats for one end of a filter link.
Definition: formats.h:65
#define lrint
Definition: tablegen.h:53
enum AVColorPrimaries color_primaries
Definition: frame.h:560
An instance of a filter.
Definition: avfilter.h:341
AVFILTER_DEFINE_CLASS(colorspace)
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:498
static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB]
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:466
ITU-R BT2020.
Definition: pixfmt.h:469
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
#define av_freep(p)
RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT RGB2YUV_SHIFT uint8_t const uint8_t const uint8_t const uint8_t int uint32_t * rgb2yuv
Definition: input.c:401
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:562
AVFrame * in
Definition: af_adenorm.c:223
formats
Definition: signature.h:48
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
#define stride
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static int filter_frame(AVFilterLink *link, AVFrame *in)
static int config_props(AVFilterLink *outlink)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Definition: rpzaenc.c:58
rgb2yuv_fsb_fn rgb2yuv_fsb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:67
Not part of ABI.
Definition: pixfmt.h:529
DitherMode
Definition: vf_colorspace.c:39
yuv2yuv_fn yuv2yuv
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
void ff_fill_rgb2xyz_table(const struct PrimaryCoefficients *coeffs, const struct WhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:68
#define OFFSET(x)
static uint8_t tmp[11]
Definition: aes_ctr.c:27