FFmpeg
vf_colorcorrect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "internal.h"
27 #include "video.h"
28 
29 typedef enum AnalyzeMode {
35 } AnalyzeMode;
36 
37 typedef struct ColorCorrectContext {
38  const AVClass *class;
39 
40  float rl, bl;
41  float rh, bh;
42  float saturation;
43  int analyze;
44 
45  int depth;
46  float max, imax;
47 
49  int planeheight[4];
50  int planewidth[4];
51 
52  unsigned *uhistogram;
53  unsigned *vhistogram;
54 
56 
58  int jobnr, int nb_jobs);
60  int jobnr, int nb_jobs);
62 
63 static int average_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
64 {
65  ColorCorrectContext *s = ctx->priv;
66  AVFrame *frame = arg;
67  const float imax = s->imax;
68  const int width = s->planewidth[1];
69  const int height = s->planeheight[1];
70  const int slice_start = (height * jobnr) / nb_jobs;
71  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
72  const ptrdiff_t ulinesize = frame->linesize[1];
73  const ptrdiff_t vlinesize = frame->linesize[2];
74  const uint8_t *uptr = (const uint8_t *)frame->data[1] + slice_start * ulinesize;
75  const uint8_t *vptr = (const uint8_t *)frame->data[2] + slice_start * vlinesize;
76  int sum_u = 0, sum_v = 0;
77 
78  for (int y = slice_start; y < slice_end; y++) {
79  for (int x = 0; x < width; x++) {
80  sum_u += uptr[x];
81  sum_v += vptr[x];
82  }
83 
84  uptr += ulinesize;
85  vptr += vlinesize;
86  }
87 
88  s->analyzeret[jobnr][0] = s->analyzeret[jobnr][2] = imax * sum_u / (float)((slice_end - slice_start) * width) - 0.5f;
89  s->analyzeret[jobnr][1] = s->analyzeret[jobnr][3] = imax * sum_v / (float)((slice_end - slice_start) * width) - 0.5f;
90 
91  return 0;
92 }
93 
94 static int average_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
95 {
96  ColorCorrectContext *s = ctx->priv;
97  AVFrame *frame = arg;
98  const float imax = s->imax;
99  const int width = s->planewidth[1];
100  const int height = s->planeheight[1];
101  const int slice_start = (height * jobnr) / nb_jobs;
102  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
103  const ptrdiff_t ulinesize = frame->linesize[1] / 2;
104  const ptrdiff_t vlinesize = frame->linesize[2] / 2;
105  const uint16_t *uptr = (const uint16_t *)frame->data[1] + slice_start * ulinesize;
106  const uint16_t *vptr = (const uint16_t *)frame->data[2] + slice_start * vlinesize;
107  int64_t sum_u = 0, sum_v = 0;
108 
109  for (int y = slice_start; y < slice_end; y++) {
110  for (int x = 0; x < width; x++) {
111  sum_u += uptr[x];
112  sum_v += vptr[x];
113  }
114 
115  uptr += ulinesize;
116  vptr += vlinesize;
117  }
118 
119  s->analyzeret[jobnr][0] = s->analyzeret[jobnr][2] = imax * sum_u / (float)((slice_end - slice_start) * width) - 0.5f;
120  s->analyzeret[jobnr][1] = s->analyzeret[jobnr][3] = imax * sum_v / (float)((slice_end - slice_start) * width) - 0.5f;
121 
122  return 0;
123 }
124 
125 static int minmax_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
126 {
127  ColorCorrectContext *s = ctx->priv;
128  AVFrame *frame = arg;
129  const float imax = s->imax;
130  const int width = s->planewidth[1];
131  const int height = s->planeheight[1];
132  const int slice_start = (height * jobnr) / nb_jobs;
133  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
134  const ptrdiff_t ulinesize = frame->linesize[1];
135  const ptrdiff_t vlinesize = frame->linesize[2];
136  const uint8_t *uptr = (const uint8_t *)frame->data[1] + slice_start * ulinesize;
137  const uint8_t *vptr = (const uint8_t *)frame->data[2] + slice_start * vlinesize;
138  int min_u = 255, min_v = 255;
139  int max_u = 0, max_v = 0;
140 
141  for (int y = slice_start; y < slice_end; y++) {
142  for (int x = 0; x < width; x++) {
143  min_u = FFMIN(min_u, uptr[x]);
144  min_v = FFMIN(min_v, vptr[x]);
145  max_u = FFMAX(max_u, uptr[x]);
146  max_v = FFMAX(max_v, vptr[x]);
147  }
148 
149  uptr += ulinesize;
150  vptr += vlinesize;
151  }
152 
153  s->analyzeret[jobnr][0] = imax * min_u - 0.5f;
154  s->analyzeret[jobnr][1] = imax * min_v - 0.5f;
155  s->analyzeret[jobnr][2] = imax * max_u - 0.5f;
156  s->analyzeret[jobnr][3] = imax * max_v - 0.5f;
157 
158  return 0;
159 }
160 
161 static int minmax_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
162 {
163  ColorCorrectContext *s = ctx->priv;
164  AVFrame *frame = arg;
165  const float imax = s->imax;
166  const int width = s->planewidth[1];
167  const int height = s->planeheight[1];
168  const int slice_start = (height * jobnr) / nb_jobs;
169  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
170  const ptrdiff_t ulinesize = frame->linesize[1] / 2;
171  const ptrdiff_t vlinesize = frame->linesize[2] / 2;
172  const uint16_t *uptr = (const uint16_t *)frame->data[1] + slice_start * ulinesize;
173  const uint16_t *vptr = (const uint16_t *)frame->data[2] + slice_start * vlinesize;
174  int min_u = INT_MAX, min_v = INT_MAX;
175  int max_u = INT_MIN, max_v = INT_MIN;
176 
177  for (int y = slice_start; y < slice_end; y++) {
178  for (int x = 0; x < width; x++) {
179  min_u = FFMIN(min_u, uptr[x]);
180  min_v = FFMIN(min_v, vptr[x]);
181  max_u = FFMAX(max_u, uptr[x]);
182  max_v = FFMAX(max_v, vptr[x]);
183  }
184 
185  uptr += ulinesize;
186  vptr += vlinesize;
187  }
188 
189  s->analyzeret[jobnr][0] = imax * min_u - 0.5f;
190  s->analyzeret[jobnr][1] = imax * min_v - 0.5f;
191  s->analyzeret[jobnr][2] = imax * max_u - 0.5f;
192  s->analyzeret[jobnr][3] = imax * max_v - 0.5f;
193 
194  return 0;
195 }
196 
197 static int median_8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
198 {
199  ColorCorrectContext *s = ctx->priv;
200  AVFrame *frame = arg;
201  const float imax = s->imax;
202  const int width = s->planewidth[1];
203  const int height = s->planeheight[1];
204  const ptrdiff_t ulinesize = frame->linesize[1];
205  const ptrdiff_t vlinesize = frame->linesize[2];
206  const uint8_t *uptr = (const uint8_t *)frame->data[1];
207  const uint8_t *vptr = (const uint8_t *)frame->data[2];
208  unsigned *uhistogram = s->uhistogram;
209  unsigned *vhistogram = s->vhistogram;
210  const int half_size = width * height / 2;
211  int umedian = s->max, vmedian = s->max;
212  unsigned ucnt = 0, vcnt = 0;
213 
214  memset(uhistogram, 0, sizeof(*uhistogram) * (s->max + 1));
215  memset(vhistogram, 0, sizeof(*vhistogram) * (s->max + 1));
216 
217  for (int y = 0; y < height; y++) {
218  for (int x = 0; x < width; x++) {
219  uhistogram[uptr[x]]++;
220  vhistogram[vptr[x]]++;
221  }
222 
223  uptr += ulinesize;
224  vptr += vlinesize;
225  }
226 
227  for (int i = 0; i < s->max + 1; i++) {
228  ucnt += uhistogram[i];
229  if (ucnt >= half_size) {
230  umedian = i;
231  break;
232  }
233  }
234 
235  for (int i = 0; i < s->max + 1; i++) {
236  vcnt += vhistogram[i];
237  if (vcnt >= half_size) {
238  vmedian = i;
239  break;
240  }
241  }
242 
243  s->analyzeret[0][0] = imax * umedian - 0.5f;
244  s->analyzeret[0][1] = imax * vmedian - 0.5f;
245  s->analyzeret[0][2] = imax * umedian - 0.5f;
246  s->analyzeret[0][3] = imax * vmedian - 0.5f;
247 
248  return 0;
249 }
250 
251 static int median_16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
252 {
253  ColorCorrectContext *s = ctx->priv;
254  AVFrame *frame = arg;
255  const float imax = s->imax;
256  const int width = s->planewidth[1];
257  const int height = s->planeheight[1];
258  const ptrdiff_t ulinesize = frame->linesize[1] / 2;
259  const ptrdiff_t vlinesize = frame->linesize[2] / 2;
260  const uint16_t *uptr = (const uint16_t *)frame->data[1];
261  const uint16_t *vptr = (const uint16_t *)frame->data[2];
262  unsigned *uhistogram = s->uhistogram;
263  unsigned *vhistogram = s->vhistogram;
264  const int half_size = width * height / 2;
265  int umedian = s->max, vmedian = s->max;
266  unsigned ucnt = 0, vcnt = 0;
267 
268  memset(uhistogram, 0, sizeof(*uhistogram) * (s->max + 1));
269  memset(vhistogram, 0, sizeof(*vhistogram) * (s->max + 1));
270 
271  for (int y = 0; y < height; y++) {
272  for (int x = 0; x < width; x++) {
273  uhistogram[uptr[x]]++;
274  vhistogram[vptr[x]]++;
275  }
276 
277  uptr += ulinesize;
278  vptr += vlinesize;
279  }
280 
281  for (int i = 0; i < s->max + 1; i++) {
282  ucnt += uhistogram[i];
283  if (ucnt >= half_size) {
284  umedian = i;
285  break;
286  }
287  }
288 
289  for (int i = 0; i < s->max + 1; i++) {
290  vcnt += vhistogram[i];
291  if (vcnt >= half_size) {
292  vmedian = i;
293  break;
294  }
295  }
296 
297  s->analyzeret[0][0] = imax * umedian - 0.5f;
298  s->analyzeret[0][1] = imax * vmedian - 0.5f;
299  s->analyzeret[0][2] = imax * umedian - 0.5f;
300  s->analyzeret[0][3] = imax * vmedian - 0.5f;
301 
302  return 0;
303 }
304 
305 #define PROCESS() \
306  float y = yptr[x * chroma_w] * imax; \
307  float u = uptr[x] * imax - .5f; \
308  float v = vptr[x] * imax - .5f; \
309  float nu, nv; \
310  \
311  nu = saturation * (u + y * bd + bl); \
312  nv = saturation * (v + y * rd + rl);
313 
314 static int colorcorrect_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
315 {
316  ColorCorrectContext *s = ctx->priv;
317  AVFrame *frame = arg;
318  const float max = s->max;
319  const float imax = s->imax;
320  const int chroma_w = s->chroma_w;
321  const int chroma_h = s->chroma_h;
322  const int width = s->planewidth[1];
323  const int height = s->planeheight[1];
324  const int slice_start = (height * jobnr) / nb_jobs;
325  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
326  const ptrdiff_t ylinesize = frame->linesize[0];
327  const ptrdiff_t ulinesize = frame->linesize[1];
328  const ptrdiff_t vlinesize = frame->linesize[2];
329  uint8_t *yptr = frame->data[0] + slice_start * chroma_h * ylinesize;
330  uint8_t *uptr = frame->data[1] + slice_start * ulinesize;
331  uint8_t *vptr = frame->data[2] + slice_start * vlinesize;
332  const float saturation = s->saturation;
333  const float bl = s->bl;
334  const float rl = s->rl;
335  const float bd = s->bh - bl;
336  const float rd = s->rh - rl;
337 
338  for (int y = slice_start; y < slice_end; y++) {
339  for (int x = 0; x < width; x++) {
340  PROCESS()
341 
342  uptr[x] = av_clip_uint8((nu + 0.5f) * max);
343  vptr[x] = av_clip_uint8((nv + 0.5f) * max);
344  }
345 
346  yptr += ylinesize * chroma_h;
347  uptr += ulinesize;
348  vptr += vlinesize;
349  }
350 
351  return 0;
352 }
353 
354 static int colorcorrect_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
355 {
356  ColorCorrectContext *s = ctx->priv;
357  AVFrame *frame = arg;
358  const int depth = s->depth;
359  const float max = s->max;
360  const float imax = s->imax;
361  const int chroma_w = s->chroma_w;
362  const int chroma_h = s->chroma_h;
363  const int width = s->planewidth[1];
364  const int height = s->planeheight[1];
365  const int slice_start = (height * jobnr) / nb_jobs;
366  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
367  const ptrdiff_t ylinesize = frame->linesize[0] / 2;
368  const ptrdiff_t ulinesize = frame->linesize[1] / 2;
369  const ptrdiff_t vlinesize = frame->linesize[2] / 2;
370  uint16_t *yptr = (uint16_t *)frame->data[0] + slice_start * chroma_h * ylinesize;
371  uint16_t *uptr = (uint16_t *)frame->data[1] + slice_start * ulinesize;
372  uint16_t *vptr = (uint16_t *)frame->data[2] + slice_start * vlinesize;
373  const float saturation = s->saturation;
374  const float bl = s->bl;
375  const float rl = s->rl;
376  const float bd = s->bh - bl;
377  const float rd = s->rh - rl;
378 
379  for (int y = slice_start; y < slice_end; y++) {
380  for (int x = 0; x < width; x++) {
381  PROCESS()
382 
383  uptr[x] = av_clip_uintp2_c((nu + 0.5f) * max, depth);
384  vptr[x] = av_clip_uintp2_c((nv + 0.5f) * max, depth);
385  }
386 
387  yptr += ylinesize * chroma_h;
388  uptr += ulinesize;
389  vptr += vlinesize;
390  }
391 
392  return 0;
393 }
394 
396 {
397  AVFilterContext *ctx = inlink->dst;
398  ColorCorrectContext *s = ctx->priv;
399  const int nb_threads = s->analyze == MEDIAN ? 1 : FFMIN(s->planeheight[1], ff_filter_get_nb_threads(ctx));
400 
401  if (s->analyze) {
402  const int nb_athreads = s->analyze == MEDIAN ? 1 : nb_threads;
403  float bl = 0.f, rl = 0.f, bh = 0.f, rh = 0.f;
404 
405  ff_filter_execute(ctx, s->do_analyze, frame, NULL, nb_athreads);
406 
407  for (int i = 0; i < nb_athreads; i++) {
408  bl += s->analyzeret[i][0];
409  rl += s->analyzeret[i][1];
410  bh += s->analyzeret[i][2];
411  rh += s->analyzeret[i][3];
412  }
413 
414  bl /= nb_athreads;
415  rl /= nb_athreads;
416  bh /= nb_athreads;
417  rh /= nb_athreads;
418 
419  s->bl = -bl;
420  s->rl = -rl;
421  s->bh = -bh;
422  s->rh = -rh;
423  }
424 
425  ff_filter_execute(ctx, s->do_slice, frame, NULL, nb_threads);
426 
427  return ff_filter_frame(ctx->outputs[0], frame);
428 }
429 
430 static const enum AVPixelFormat pixel_fmts[] = {
444 };
445 
447 {
448  AVFilterContext *ctx = inlink->dst;
449  ColorCorrectContext *s = ctx->priv;
451 
452  s->depth = desc->comp[0].depth;
453  s->max = (1 << s->depth) - 1;
454  s->imax = 1.f / s->max;
455  s->do_slice = s->depth <= 8 ? colorcorrect_slice8 : colorcorrect_slice16;
456 
457  s->uhistogram = av_calloc(s->max == 255 ? 256 : 65536, sizeof(*s->uhistogram));
458  if (!s->uhistogram)
459  return AVERROR(ENOMEM);
460 
461  s->vhistogram = av_calloc(s->max == 255 ? 256 : 65536, sizeof(*s->vhistogram));
462  if (!s->vhistogram)
463  return AVERROR(ENOMEM);
464 
465  s->analyzeret = av_calloc(inlink->h, sizeof(*s->analyzeret));
466  if (!s->analyzeret)
467  return AVERROR(ENOMEM);
468 
469  switch (s->analyze) {
470  case MANUAL:
471  break;
472  case AVERAGE:
473  s->do_analyze = s->depth <= 8 ? average_slice8 : average_slice16;
474  break;
475  case MINMAX:
476  s->do_analyze = s->depth <= 8 ? minmax_slice8 : minmax_slice16;
477  break;
478  case MEDIAN:
479  s->do_analyze = s->depth <= 8 ? median_8 : median_16;
480  break;
481  default:
482  return AVERROR_BUG;
483  }
484 
485  s->chroma_w = 1 << desc->log2_chroma_w;
486  s->chroma_h = 1 << desc->log2_chroma_h;
487  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
488  s->planeheight[0] = s->planeheight[3] = inlink->h;
489  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
490  s->planewidth[0] = s->planewidth[3] = inlink->w;
491 
492  return 0;
493 }
494 
496 {
497  ColorCorrectContext *s = ctx->priv;
498 
499  av_freep(&s->analyzeret);
500 }
501 
503  {
504  .name = "default",
505  .type = AVMEDIA_TYPE_VIDEO,
507  .filter_frame = filter_frame,
508  .config_props = config_input,
509  },
510 };
511 
512 #define OFFSET(x) offsetof(ColorCorrectContext, x)
513 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
514 
515 static const AVOption colorcorrect_options[] = {
516  { "rl", "set the red shadow spot", OFFSET(rl), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, VF },
517  { "bl", "set the blue shadow spot", OFFSET(bl), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, VF },
518  { "rh", "set the red highlight spot", OFFSET(rh), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, VF },
519  { "bh", "set the blue highlight spot", OFFSET(bh), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, VF },
520  { "saturation", "set the amount of saturation", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl=1}, -3, 3, VF },
521  { "analyze", "set the analyze mode", OFFSET(analyze), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_ANALYZE-1, VF, "analyze" },
522  { "manual", "manually set options", 0, AV_OPT_TYPE_CONST, {.i64=MANUAL}, 0, 0, VF, "analyze" },
523  { "average", "use average pixels", 0, AV_OPT_TYPE_CONST, {.i64=AVERAGE}, 0, 0, VF, "analyze" },
524  { "minmax", "use minmax pixels", 0, AV_OPT_TYPE_CONST, {.i64=MINMAX}, 0, 0, VF, "analyze" },
525  { "median", "use median pixels", 0, AV_OPT_TYPE_CONST, {.i64=MEDIAN}, 0, 0, VF, "analyze" },
526  { NULL }
527 };
528 
529 AVFILTER_DEFINE_CLASS(colorcorrect);
530 
532  .name = "colorcorrect",
533  .description = NULL_IF_CONFIG_SMALL("Adjust color white balance selectively for blacks and whites."),
534  .priv_size = sizeof(ColorCorrectContext),
535  .priv_class = &colorcorrect_class,
536  .uninit = uninit,
541  .process_command = ff_filter_process_command,
542 };
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:512
VF
#define VF
Definition: vf_colorcorrect.c:513
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ColorCorrectContext::chroma_h
int chroma_h
Definition: vf_colorcorrect.c:48
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_vf_colorcorrect
const AVFilter ff_vf_colorcorrect
Definition: vf_colorcorrect.c:531
average_slice16
static int average_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:94
median_16
static int median_16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:251
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
ColorCorrectContext::planeheight
int planeheight[4]
Definition: vf_colorcorrect.c:49
colorcorrect_slice16
static int colorcorrect_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:354
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:172
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
colorcorrect_slice8
static int colorcorrect_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:314
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:504
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:276
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:511
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:506
ColorCorrectContext::uhistogram
unsigned * uhistogram
Definition: vf_colorcorrect.c:52
AVOption
AVOption.
Definition: opt.h:251
colorcorrect_inputs
static const AVFilterPad colorcorrect_inputs[]
Definition: vf_colorcorrect.c:502
ColorCorrectContext::max
float max
Definition: vf_colorcorrect.c:46
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
ColorCorrectContext::analyzeret
float(* analyzeret)[4]
Definition: vf_colorcorrect.c:55
float.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:507
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
colorcorrect_options
static const AVOption colorcorrect_options[]
Definition: vf_colorcorrect.c:515
ColorCorrectContext::do_slice
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:59
config_input
static av_cold int config_input(AVFilterLink *inlink)
Definition: vf_colorcorrect.c:446
minmax_slice16
static int minmax_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:161
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:503
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:513
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:466
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
average_slice8
static int average_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:63
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
ColorCorrectContext
Definition: vf_colorcorrect.c:37
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:480
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:36
MEDIAN
@ MEDIAN
Definition: vf_colorcorrect.c:33
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
float
float
Definition: af_crystalizer.c:121
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:481
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1979
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:510
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:465
ColorCorrectContext::chroma_w
int chroma_w
Definition: vf_colorcorrect.c:48
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:479
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
frame
static AVFrame * frame
Definition: demux_decode.c:54
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
NB_ANALYZE
@ NB_ANALYZE
Definition: vf_colorcorrect.c:34
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:470
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
AnalyzeMode
AnalyzeMode
Definition: vf_colorcorrect.c:29
median_8
static int median_8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:197
AVERAGE
@ AVERAGE
Definition: vf_colorcorrect.c:31
pixel_fmts
static enum AVPixelFormat pixel_fmts[]
Definition: vf_colorcorrect.c:430
f
f
Definition: af_crystalizer.c:121
ColorCorrectContext::vhistogram
unsigned * vhistogram
Definition: vf_colorcorrect.c:53
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
ColorCorrectContext::bl
float bl
Definition: vf_colorcorrect.c:40
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
ColorCorrectContext::rh
float rh
Definition: vf_colorcorrect.c:41
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:851
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:508
ColorCorrectContext::planewidth
int planewidth[4]
Definition: vf_colorcorrect.c:50
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
ColorCorrectContext::do_analyze
int(* do_analyze)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
ColorCorrectContext::saturation
float saturation
Definition: vf_colorcorrect.c:42
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:786
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_colorcorrect.c:395
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
MINMAX
@ MINMAX
Definition: vf_colorcorrect.c:32
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:467
AVFilter
Filter definition.
Definition: avfilter.h:166
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:505
ColorCorrectContext::depth
int depth
Definition: vf_colorcorrect.c:45
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:472
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:477
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_colorcorrect.c:495
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:509
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ColorCorrectContext::imax
float imax
Definition: vf_colorcorrect.c:46
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
ColorCorrectContext::bh
float bh
Definition: vf_colorcorrect.c:41
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ColorCorrectContext::analyze
int analyze
Definition: vf_colorcorrect.c:43
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
MANUAL
@ MANUAL
Definition: vf_colorcorrect.c:30
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
PROCESS
#define PROCESS()
Definition: vf_colorcorrect.c:305
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorcorrect)
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:474
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:478
minmax_slice8
static int minmax_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcorrect.c:125
analyze
static int analyze(const uint8_t *buf, int size, int packet_size, int probe)
Definition: mpegts.c:580
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:144
int
int
Definition: ffmpeg_filter.c:368
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
OFFSET
#define OFFSET(x)
Definition: vf_colorcorrect.c:512
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:476
ColorCorrectContext::rl
float rl
Definition: vf_colorcorrect.c:40
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: internal.h:66