FFmpeg
vf_colorbalance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "libavutil/pixdesc.h"
23 #include "avfilter.h"
24 #include "drawutils.h"
25 #include "formats.h"
26 #include "internal.h"
27 #include "video.h"
28 
29 #define R 0
30 #define G 1
31 #define B 2
32 #define A 3
33 
34 typedef struct ThreadData {
35  AVFrame *in, *out;
36 } ThreadData;
37 
38 typedef struct Range {
39  float shadows;
40  float midtones;
41  float highlights;
42 } Range;
43 
44 typedef struct ColorBalanceContext {
45  const AVClass *class;
50 
51  uint8_t rgba_map[4];
52  int depth;
53  int max;
54  int step;
55 
56  int (*color_balance)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
58 
59 #define OFFSET(x) offsetof(ColorBalanceContext, x)
60 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
61 static const AVOption colorbalance_options[] = {
62  { "rs", "set red shadows", OFFSET(cyan_red.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
63  { "gs", "set green shadows", OFFSET(magenta_green.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
64  { "bs", "set blue shadows", OFFSET(yellow_blue.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
65  { "rm", "set red midtones", OFFSET(cyan_red.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
66  { "gm", "set green midtones", OFFSET(magenta_green.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
67  { "bm", "set blue midtones", OFFSET(yellow_blue.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
68  { "rh", "set red highlights", OFFSET(cyan_red.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
69  { "gh", "set green highlights", OFFSET(magenta_green.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
70  { "bh", "set blue highlights", OFFSET(yellow_blue.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
71  { "pl", "preserve lightness", OFFSET(preserve_lightness), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
72  { NULL }
73 };
74 
75 AVFILTER_DEFINE_CLASS(colorbalance);
76 
78 {
79  static const enum AVPixelFormat pix_fmts[] = {
94  };
96 }
97 
98 static float get_component(float v, float l,
99  float s, float m, float h)
100 {
101  const float a = 4.f, b = 0.333f, scale = 0.7f;
102 
103  s *= av_clipf((b - l) * a + 0.5f, 0, 1) * scale;
104  m *= av_clipf((l - b) * a + 0.5f, 0, 1) * av_clipf((1.0 - l - b) * a + 0.5f, 0, 1) * scale;
105  h *= av_clipf((l + b - 1) * a + 0.5f, 0, 1) * scale;
106 
107  v += s;
108  v += m;
109  v += h;
110 
111  return av_clipf(v, 0, 1);
112 }
113 
114 static float hfun(float n, float h, float s, float l)
115 {
116  float a = s * FFMIN(l, 1. - l);
117  float k = fmodf(n + h / 30.f, 12.f);
118 
119  return av_clipf(l - a * FFMAX(FFMIN3(k - 3.f, 9.f - k, 1), -1.f), 0, 1);
120 }
121 
122 static void preservel(float *r, float *g, float *b, float l)
123 {
124  float max = FFMAX3(*r, *g, *b);
125  float min = FFMIN3(*r, *g, *b);
126  float h, s;
127 
128  l *= 0.5;
129 
130  if (*r == *g && *g == *b) {
131  h = 0.;
132  } else if (max == *r) {
133  h = 60. * (0. + (*g - *b) / (max - min));
134  } else if (max == *g) {
135  h = 60. * (2. + (*b - *r) / (max - min));
136  } else if (max == *b) {
137  h = 60. * (4. + (*r - *g) / (max - min));
138  } else {
139  h = 0.;
140  }
141  if (h < 0.)
142  h += 360.;
143 
144  if (max == 0. || min == 1.) {
145  s = 0.;
146  } else {
147  s = (max - min) / (1. - FFABS(2. * l - 1));
148  }
149 
150  *r = hfun(0, h, s, l);
151  *g = hfun(8, h, s, l);
152  *b = hfun(4, h, s, l);
153 }
154 
155 static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
156 {
157  ColorBalanceContext *s = ctx->priv;
158  ThreadData *td = arg;
159  AVFrame *in = td->in;
160  AVFrame *out = td->out;
161  const int slice_start = (out->height * jobnr) / nb_jobs;
162  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
163  const uint8_t *srcg = in->data[0] + slice_start * in->linesize[0];
164  const uint8_t *srcb = in->data[1] + slice_start * in->linesize[1];
165  const uint8_t *srcr = in->data[2] + slice_start * in->linesize[2];
166  const uint8_t *srca = in->data[3] + slice_start * in->linesize[3];
167  uint8_t *dstg = out->data[0] + slice_start * out->linesize[0];
168  uint8_t *dstb = out->data[1] + slice_start * out->linesize[1];
169  uint8_t *dstr = out->data[2] + slice_start * out->linesize[2];
170  uint8_t *dsta = out->data[3] + slice_start * out->linesize[3];
171  const float max = s->max;
172  int i, j;
173 
174  for (i = slice_start; i < slice_end; i++) {
175  for (j = 0; j < out->width; j++) {
176  float r = srcr[j] / max;
177  float g = srcg[j] / max;
178  float b = srcb[j] / max;
179  const float l = FFMAX3(r, g, b) + FFMIN3(r, g, b);
180 
181  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
182  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
183  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
184 
185  if (s->preserve_lightness)
186  preservel(&r, &g, &b, l);
187 
188  dstr[j] = av_clip_uint8(lrintf(r * max));
189  dstg[j] = av_clip_uint8(lrintf(g * max));
190  dstb[j] = av_clip_uint8(lrintf(b * max));
191  if (in != out && out->linesize[3])
192  dsta[j] = srca[j];
193  }
194 
195  srcg += in->linesize[0];
196  srcb += in->linesize[1];
197  srcr += in->linesize[2];
198  srca += in->linesize[3];
199  dstg += out->linesize[0];
200  dstb += out->linesize[1];
201  dstr += out->linesize[2];
202  dsta += out->linesize[3];
203  }
204 
205  return 0;
206 }
207 
208 static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
209 {
210  ColorBalanceContext *s = ctx->priv;
211  ThreadData *td = arg;
212  AVFrame *in = td->in;
213  AVFrame *out = td->out;
214  const int slice_start = (out->height * jobnr) / nb_jobs;
215  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
216  const uint16_t *srcg = (const uint16_t *)in->data[0] + slice_start * in->linesize[0] / 2;
217  const uint16_t *srcb = (const uint16_t *)in->data[1] + slice_start * in->linesize[1] / 2;
218  const uint16_t *srcr = (const uint16_t *)in->data[2] + slice_start * in->linesize[2] / 2;
219  const uint16_t *srca = (const uint16_t *)in->data[3] + slice_start * in->linesize[3] / 2;
220  uint16_t *dstg = (uint16_t *)out->data[0] + slice_start * out->linesize[0] / 2;
221  uint16_t *dstb = (uint16_t *)out->data[1] + slice_start * out->linesize[1] / 2;
222  uint16_t *dstr = (uint16_t *)out->data[2] + slice_start * out->linesize[2] / 2;
223  uint16_t *dsta = (uint16_t *)out->data[3] + slice_start * out->linesize[3] / 2;
224  const int depth = s->depth;
225  const float max = s->max;
226  int i, j;
227 
228  for (i = slice_start; i < slice_end; i++) {
229  for (j = 0; j < out->width; j++) {
230  float r = srcr[j] / max;
231  float g = srcg[j] / max;
232  float b = srcb[j] / max;
233  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
234 
235  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
236  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
237  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
238 
239  if (s->preserve_lightness)
240  preservel(&r, &g, &b, l);
241 
242  dstr[j] = av_clip_uintp2_c(lrintf(r * max), depth);
243  dstg[j] = av_clip_uintp2_c(lrintf(g * max), depth);
244  dstb[j] = av_clip_uintp2_c(lrintf(b * max), depth);
245  if (in != out && out->linesize[3])
246  dsta[j] = srca[j];
247  }
248 
249  srcg += in->linesize[0] / 2;
250  srcb += in->linesize[1] / 2;
251  srcr += in->linesize[2] / 2;
252  srca += in->linesize[3] / 2;
253  dstg += out->linesize[0] / 2;
254  dstb += out->linesize[1] / 2;
255  dstr += out->linesize[2] / 2;
256  dsta += out->linesize[3] / 2;
257  }
258 
259  return 0;
260 }
261 
262 static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
263 {
264  ColorBalanceContext *s = ctx->priv;
265  ThreadData *td = arg;
266  AVFrame *in = td->in;
267  AVFrame *out = td->out;
268  AVFilterLink *outlink = ctx->outputs[0];
269  const int slice_start = (out->height * jobnr) / nb_jobs;
270  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
271  const uint8_t *srcrow = in->data[0] + slice_start * in->linesize[0];
272  const uint8_t roffset = s->rgba_map[R];
273  const uint8_t goffset = s->rgba_map[G];
274  const uint8_t boffset = s->rgba_map[B];
275  const uint8_t aoffset = s->rgba_map[A];
276  const float max = s->max;
277  const int step = s->step;
278  uint8_t *dstrow;
279  int i, j;
280 
281  dstrow = out->data[0] + slice_start * out->linesize[0];
282  for (i = slice_start; i < slice_end; i++) {
283  const uint8_t *src = srcrow;
284  uint8_t *dst = dstrow;
285 
286  for (j = 0; j < outlink->w * step; j += step) {
287  float r = src[j + roffset] / max;
288  float g = src[j + goffset] / max;
289  float b = src[j + boffset] / max;
290  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
291 
292  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
293  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
294  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
295 
296  if (s->preserve_lightness)
297  preservel(&r, &g, &b, l);
298 
299  dst[j + roffset] = av_clip_uint8(lrintf(r * max));
300  dst[j + goffset] = av_clip_uint8(lrintf(g * max));
301  dst[j + boffset] = av_clip_uint8(lrintf(b * max));
302  if (in != out && step == 4)
303  dst[j + aoffset] = src[j + aoffset];
304  }
305 
306  srcrow += in->linesize[0];
307  dstrow += out->linesize[0];
308  }
309 
310  return 0;
311 }
312 
313 static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
314 {
315  ColorBalanceContext *s = ctx->priv;
316  ThreadData *td = arg;
317  AVFrame *in = td->in;
318  AVFrame *out = td->out;
319  AVFilterLink *outlink = ctx->outputs[0];
320  const int slice_start = (out->height * jobnr) / nb_jobs;
321  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
322  const uint16_t *srcrow = (const uint16_t *)in->data[0] + slice_start * in->linesize[0] / 2;
323  const uint8_t roffset = s->rgba_map[R];
324  const uint8_t goffset = s->rgba_map[G];
325  const uint8_t boffset = s->rgba_map[B];
326  const uint8_t aoffset = s->rgba_map[A];
327  const int step = s->step / 2;
328  const int depth = s->depth;
329  const float max = s->max;
330  uint16_t *dstrow;
331  int i, j;
332 
333  dstrow = (uint16_t *)out->data[0] + slice_start * out->linesize[0] / 2;
334  for (i = slice_start; i < slice_end; i++) {
335  const uint16_t *src = srcrow;
336  uint16_t *dst = dstrow;
337 
338  for (j = 0; j < outlink->w * step; j += step) {
339  float r = src[j + roffset] / max;
340  float g = src[j + goffset] / max;
341  float b = src[j + boffset] / max;
342  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
343 
344  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
345  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
346  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
347 
348  if (s->preserve_lightness)
349  preservel(&r, &g, &b, l);
350 
351  dst[j + roffset] = av_clip_uintp2_c(lrintf(r * max), depth);
352  dst[j + goffset] = av_clip_uintp2_c(lrintf(g * max), depth);
353  dst[j + boffset] = av_clip_uintp2_c(lrintf(b * max), depth);
354  if (in != out && step == 4)
355  dst[j + aoffset] = src[j + aoffset];
356  }
357 
358  srcrow += in->linesize[0] / 2;
359  dstrow += out->linesize[0] / 2;
360  }
361 
362  return 0;
363 }
364 
365 static int config_output(AVFilterLink *outlink)
366 {
367  AVFilterContext *ctx = outlink->src;
368  ColorBalanceContext *s = ctx->priv;
370  const int depth = desc->comp[0].depth;
371  const int max = (1 << depth) - 1;
372  const int planar = av_pix_fmt_count_planes(outlink->format) > 1;
373 
374  s->depth = depth;
375  s->max = max;
376 
377  if (max == 255 && planar) {
378  s->color_balance = color_balance8_p;
379  } else if (planar) {
380  s->color_balance = color_balance16_p;
381  } else if (max == 255) {
382  s->color_balance = color_balance8;
383  } else {
384  s->color_balance = color_balance16;
385  }
386 
387  ff_fill_rgba_map(s->rgba_map, outlink->format);
388  s->step = av_get_padded_bits_per_pixel(desc) >> 3;
389 
390  return 0;
391 }
392 
394 {
395  AVFilterContext *ctx = inlink->dst;
396  ColorBalanceContext *s = ctx->priv;
397  AVFilterLink *outlink = ctx->outputs[0];
398  ThreadData td;
399  AVFrame *out;
400 
401  if (av_frame_is_writable(in)) {
402  out = in;
403  } else {
404  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
405  if (!out) {
406  av_frame_free(&in);
407  return AVERROR(ENOMEM);
408  }
410  }
411 
412  td.in = in;
413  td.out = out;
414  ff_filter_execute(ctx, s->color_balance, &td, NULL,
415  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
416 
417  if (in != out)
418  av_frame_free(&in);
419  return ff_filter_frame(outlink, out);
420 }
421 
423  {
424  .name = "default",
425  .type = AVMEDIA_TYPE_VIDEO,
426  .filter_frame = filter_frame,
427  },
428 };
429 
431  {
432  .name = "default",
433  .type = AVMEDIA_TYPE_VIDEO,
434  .config_props = config_output,
435  },
436 };
437 
439  .name = "colorbalance",
440  .description = NULL_IF_CONFIG_SMALL("Adjust the color balance."),
441  .priv_size = sizeof(ColorBalanceContext),
442  .priv_class = &colorbalance_class,
447  .process_command = ff_filter_process_command,
448 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:411
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorbalance.c:393
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
colorbalance_outputs
static const AVFilterPad colorbalance_outputs[]
Definition: vf_colorbalance.c:430
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_component
static float get_component(float v, float l, float s, float m, float h)
Definition: vf_colorbalance.c:98
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
colorbalance_inputs
static const AVFilterPad colorbalance_inputs[]
Definition: vf_colorbalance.c:422
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
G
#define G
Definition: vf_colorbalance.c:30
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
ColorBalanceContext::cyan_red
Range cyan_red
Definition: vf_colorbalance.c:46
ColorBalanceContext::color_balance
int(* color_balance)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:56
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:276
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_colorbalance.c:77
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:492
video.h
preservel
static void preservel(float *r, float *g, float *b, float l)
Definition: vf_colorbalance.c:122
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
formats.h
Range::highlights
float highlights
Definition: vf_colorbalance.c:41
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2580
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:407
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
color_balance16_p
static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:208
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:405
ColorBalanceContext
Definition: vf_colorbalance.c:44
ColorBalanceContext::preserve_lightness
int preserve_lightness
Definition: vf_colorbalance.c:49
FLAGS
#define FLAGS
Definition: vf_colorbalance.c:60
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1376
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
ColorBalanceContext::step
int step
Definition: vf_colorbalance.c:54
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:409
s
#define s(width, name)
Definition: cbs_vp9.c:257
ColorBalanceContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_colorbalance.c:51
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:410
colorbalance_options
static const AVOption colorbalance_options[]
Definition: vf_colorbalance.c:61
g
const char * g
Definition: vf_curves.c:117
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2041
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
ctx
AVFormatContext * ctx
Definition: movenc.c:48
f
#define f(width, name)
Definition: cbs_vp9.c:255
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:67
Range::shadows
float shadows
Definition: vf_colorbalance.c:39
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:408
ColorBalanceContext::yellow_blue
Range yellow_blue
Definition: vf_colorbalance.c:48
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:379
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:380
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
av_clipf
#define av_clipf
Definition: common.h:144
hfun
static float hfun(float n, float h, float s, float l)
Definition: vf_colorbalance.c:114
src
#define src
Definition: vp8dsp.c:255
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorbalance)
A
#define A
Definition: vf_colorbalance.c:32
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:230
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:404
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
R
#define R
Definition: vf_colorbalance.c:29
Range
Definition: vf_colorbalance.c:38
ColorBalanceContext::max
int max
Definition: vf_colorbalance.c:53
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_get_padded_bits_per_pixel
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:2505
color_balance16
static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:313
B
#define B
Definition: vf_colorbalance.c:31
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:375
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
color_balance8_p
static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:155
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:883
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:228
ColorBalanceContext::depth
int depth
Definition: vf_colorbalance.c:52
ff_vf_colorbalance
const AVFilter ff_vf_colorbalance
Definition: vf_colorbalance.c:438
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:130
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_colorbalance.c:365
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:384
i
int i
Definition: input.c:406
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
OFFSET
#define OFFSET(x)
Definition: vf_colorbalance.c:59
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:406
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:804
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
ColorBalanceContext::magenta_green
Range magenta_green
Definition: vf_colorbalance.c:47
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
color_balance8
static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:262
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AVFilter
Filter definition.
Definition: avfilter.h:149
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:229
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:171
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
Range::midtones
float midtones
Definition: vf_colorbalance.c:40
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:33
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:334
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:227
h
h
Definition: vp9dsp_template.c:2038
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
int
int
Definition: ffmpeg_filter.c:156
min
float min
Definition: vorbis_enc_data.h:429