FFmpeg
vf_colorbalance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "libavutil/pixdesc.h"
23 #include "avfilter.h"
24 #include "drawutils.h"
25 #include "internal.h"
26 #include "video.h"
27 
28 #define R 0
29 #define G 1
30 #define B 2
31 #define A 3
32 
33 typedef struct ThreadData {
34  AVFrame *in, *out;
35 } ThreadData;
36 
37 typedef struct Range {
38  float shadows;
39  float midtones;
40  float highlights;
41 } Range;
42 
43 typedef struct ColorBalanceContext {
44  const AVClass *class;
49 
50  uint8_t rgba_map[4];
51  int depth;
52  int max;
53  int step;
54 
55  int (*color_balance)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
57 
58 #define OFFSET(x) offsetof(ColorBalanceContext, x)
59 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
60 static const AVOption colorbalance_options[] = {
61  { "rs", "set red shadows", OFFSET(cyan_red.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
62  { "gs", "set green shadows", OFFSET(magenta_green.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
63  { "bs", "set blue shadows", OFFSET(yellow_blue.shadows), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
64  { "rm", "set red midtones", OFFSET(cyan_red.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
65  { "gm", "set green midtones", OFFSET(magenta_green.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
66  { "bm", "set blue midtones", OFFSET(yellow_blue.midtones), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
67  { "rh", "set red highlights", OFFSET(cyan_red.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
68  { "gh", "set green highlights", OFFSET(magenta_green.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
69  { "bh", "set blue highlights", OFFSET(yellow_blue.highlights), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS },
70  { "pl", "preserve lightness", OFFSET(preserve_lightness), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
71  { NULL }
72 };
73 
74 AVFILTER_DEFINE_CLASS(colorbalance);
75 
76 static const enum AVPixelFormat pix_fmts[] = {
91 };
92 
93 static float get_component(float v, float l,
94  float s, float m, float h)
95 {
96  const float a = 4.f, b = 0.333f, scale = 0.7f;
97 
98  s *= av_clipf((b - l) * a + 0.5f, 0.f, 1.f) * scale;
99  m *= av_clipf((l - b) * a + 0.5f, 0.f, 1.f) * av_clipf((1.f - l - b) * a + 0.5f, 0.f, 1.f) * scale;
100  h *= av_clipf((l + b - 1) * a + 0.5f, 0.f, 1.f) * scale;
101 
102  v += s;
103  v += m;
104  v += h;
105 
106  return av_clipf(v, 0.f, 1.f);
107 }
108 
109 static float hfun(float n, float h, float s, float l)
110 {
111  float a = s * FFMIN(l, 1.f - l);
112  float k = fmodf(n + h / 30.f, 12.f);
113 
114  return av_clipf(l - a * FFMAX(FFMIN3(k - 3.f, 9.f - k, 1), -1.f), 0.f, 1.f);
115 }
116 
117 static void preservel(float *r, float *g, float *b, float l)
118 {
119  float max = FFMAX3(*r, *g, *b);
120  float min = FFMIN3(*r, *g, *b);
121  float h, s;
122 
123  l *= 0.5f;
124 
125  if (*r == *g && *g == *b) {
126  h = 0.f;
127  } else if (max == *r) {
128  h = 60.f * (0.f + (*g - *b) / (max - min));
129  } else if (max == *g) {
130  h = 60.f * (2.f + (*b - *r) / (max - min));
131  } else if (max == *b) {
132  h = 60.f * (4.f + (*r - *g) / (max - min));
133  } else {
134  h = 0.f;
135  }
136  if (h < 0.f)
137  h += 360.f;
138 
139  if (max == 1.f || min == 0.f) {
140  s = 0.f;
141  } else {
142  s = (max - min) / (1.f - (FFABS(2.f * l - 1.f)));
143  }
144 
145  *r = hfun(0.f, h, s, l);
146  *g = hfun(8.f, h, s, l);
147  *b = hfun(4.f, h, s, l);
148 }
149 
150 static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
151 {
152  ColorBalanceContext *s = ctx->priv;
153  ThreadData *td = arg;
154  AVFrame *in = td->in;
155  AVFrame *out = td->out;
156  const int slice_start = (out->height * jobnr) / nb_jobs;
157  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
158  const uint8_t *srcg = in->data[0] + slice_start * in->linesize[0];
159  const uint8_t *srcb = in->data[1] + slice_start * in->linesize[1];
160  const uint8_t *srcr = in->data[2] + slice_start * in->linesize[2];
161  const uint8_t *srca = in->data[3] + slice_start * in->linesize[3];
162  uint8_t *dstg = out->data[0] + slice_start * out->linesize[0];
163  uint8_t *dstb = out->data[1] + slice_start * out->linesize[1];
164  uint8_t *dstr = out->data[2] + slice_start * out->linesize[2];
165  uint8_t *dsta = out->data[3] + slice_start * out->linesize[3];
166  const float max = s->max;
167  int i, j;
168 
169  for (i = slice_start; i < slice_end; i++) {
170  for (j = 0; j < out->width; j++) {
171  float r = srcr[j] / max;
172  float g = srcg[j] / max;
173  float b = srcb[j] / max;
174  const float l = FFMAX3(r, g, b) + FFMIN3(r, g, b);
175 
176  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
177  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
178  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
179 
180  if (s->preserve_lightness)
181  preservel(&r, &g, &b, l);
182 
183  dstr[j] = av_clip_uint8(lrintf(r * max));
184  dstg[j] = av_clip_uint8(lrintf(g * max));
185  dstb[j] = av_clip_uint8(lrintf(b * max));
186  if (in != out && out->linesize[3])
187  dsta[j] = srca[j];
188  }
189 
190  srcg += in->linesize[0];
191  srcb += in->linesize[1];
192  srcr += in->linesize[2];
193  srca += in->linesize[3];
194  dstg += out->linesize[0];
195  dstb += out->linesize[1];
196  dstr += out->linesize[2];
197  dsta += out->linesize[3];
198  }
199 
200  return 0;
201 }
202 
203 static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
204 {
205  ColorBalanceContext *s = ctx->priv;
206  ThreadData *td = arg;
207  AVFrame *in = td->in;
208  AVFrame *out = td->out;
209  const int slice_start = (out->height * jobnr) / nb_jobs;
210  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
211  const uint16_t *srcg = (const uint16_t *)in->data[0] + slice_start * in->linesize[0] / 2;
212  const uint16_t *srcb = (const uint16_t *)in->data[1] + slice_start * in->linesize[1] / 2;
213  const uint16_t *srcr = (const uint16_t *)in->data[2] + slice_start * in->linesize[2] / 2;
214  const uint16_t *srca = (const uint16_t *)in->data[3] + slice_start * in->linesize[3] / 2;
215  uint16_t *dstg = (uint16_t *)out->data[0] + slice_start * out->linesize[0] / 2;
216  uint16_t *dstb = (uint16_t *)out->data[1] + slice_start * out->linesize[1] / 2;
217  uint16_t *dstr = (uint16_t *)out->data[2] + slice_start * out->linesize[2] / 2;
218  uint16_t *dsta = (uint16_t *)out->data[3] + slice_start * out->linesize[3] / 2;
219  const int depth = s->depth;
220  const float max = s->max;
221  int i, j;
222 
223  for (i = slice_start; i < slice_end; i++) {
224  for (j = 0; j < out->width; j++) {
225  float r = srcr[j] / max;
226  float g = srcg[j] / max;
227  float b = srcb[j] / max;
228  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
229 
230  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
231  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
232  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
233 
234  if (s->preserve_lightness)
235  preservel(&r, &g, &b, l);
236 
237  dstr[j] = av_clip_uintp2_c(lrintf(r * max), depth);
238  dstg[j] = av_clip_uintp2_c(lrintf(g * max), depth);
239  dstb[j] = av_clip_uintp2_c(lrintf(b * max), depth);
240  if (in != out && out->linesize[3])
241  dsta[j] = srca[j];
242  }
243 
244  srcg += in->linesize[0] / 2;
245  srcb += in->linesize[1] / 2;
246  srcr += in->linesize[2] / 2;
247  srca += in->linesize[3] / 2;
248  dstg += out->linesize[0] / 2;
249  dstb += out->linesize[1] / 2;
250  dstr += out->linesize[2] / 2;
251  dsta += out->linesize[3] / 2;
252  }
253 
254  return 0;
255 }
256 
257 static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
258 {
259  ColorBalanceContext *s = ctx->priv;
260  ThreadData *td = arg;
261  AVFrame *in = td->in;
262  AVFrame *out = td->out;
263  AVFilterLink *outlink = ctx->outputs[0];
264  const int slice_start = (out->height * jobnr) / nb_jobs;
265  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
266  const uint8_t *srcrow = in->data[0] + slice_start * in->linesize[0];
267  const uint8_t roffset = s->rgba_map[R];
268  const uint8_t goffset = s->rgba_map[G];
269  const uint8_t boffset = s->rgba_map[B];
270  const uint8_t aoffset = s->rgba_map[A];
271  const float max = s->max;
272  const int step = s->step;
273  uint8_t *dstrow;
274  int i, j;
275 
276  dstrow = out->data[0] + slice_start * out->linesize[0];
277  for (i = slice_start; i < slice_end; i++) {
278  const uint8_t *src = srcrow;
279  uint8_t *dst = dstrow;
280 
281  for (j = 0; j < outlink->w * step; j += step) {
282  float r = src[j + roffset] / max;
283  float g = src[j + goffset] / max;
284  float b = src[j + boffset] / max;
285  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
286 
287  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
288  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
289  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
290 
291  if (s->preserve_lightness)
292  preservel(&r, &g, &b, l);
293 
294  dst[j + roffset] = av_clip_uint8(lrintf(r * max));
295  dst[j + goffset] = av_clip_uint8(lrintf(g * max));
296  dst[j + boffset] = av_clip_uint8(lrintf(b * max));
297  if (in != out && step == 4)
298  dst[j + aoffset] = src[j + aoffset];
299  }
300 
301  srcrow += in->linesize[0];
302  dstrow += out->linesize[0];
303  }
304 
305  return 0;
306 }
307 
308 static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
309 {
310  ColorBalanceContext *s = ctx->priv;
311  ThreadData *td = arg;
312  AVFrame *in = td->in;
313  AVFrame *out = td->out;
314  AVFilterLink *outlink = ctx->outputs[0];
315  const int slice_start = (out->height * jobnr) / nb_jobs;
316  const int slice_end = (out->height * (jobnr+1)) / nb_jobs;
317  const uint16_t *srcrow = (const uint16_t *)in->data[0] + slice_start * in->linesize[0] / 2;
318  const uint8_t roffset = s->rgba_map[R];
319  const uint8_t goffset = s->rgba_map[G];
320  const uint8_t boffset = s->rgba_map[B];
321  const uint8_t aoffset = s->rgba_map[A];
322  const int step = s->step / 2;
323  const int depth = s->depth;
324  const float max = s->max;
325  uint16_t *dstrow;
326  int i, j;
327 
328  dstrow = (uint16_t *)out->data[0] + slice_start * out->linesize[0] / 2;
329  for (i = slice_start; i < slice_end; i++) {
330  const uint16_t *src = srcrow;
331  uint16_t *dst = dstrow;
332 
333  for (j = 0; j < outlink->w * step; j += step) {
334  float r = src[j + roffset] / max;
335  float g = src[j + goffset] / max;
336  float b = src[j + boffset] / max;
337  const float l = (FFMAX3(r, g, b) + FFMIN3(r, g, b));
338 
339  r = get_component(r, l, s->cyan_red.shadows, s->cyan_red.midtones, s->cyan_red.highlights);
340  g = get_component(g, l, s->magenta_green.shadows, s->magenta_green.midtones, s->magenta_green.highlights);
341  b = get_component(b, l, s->yellow_blue.shadows, s->yellow_blue.midtones, s->yellow_blue.highlights);
342 
343  if (s->preserve_lightness)
344  preservel(&r, &g, &b, l);
345 
346  dst[j + roffset] = av_clip_uintp2_c(lrintf(r * max), depth);
347  dst[j + goffset] = av_clip_uintp2_c(lrintf(g * max), depth);
348  dst[j + boffset] = av_clip_uintp2_c(lrintf(b * max), depth);
349  if (in != out && step == 4)
350  dst[j + aoffset] = src[j + aoffset];
351  }
352 
353  srcrow += in->linesize[0] / 2;
354  dstrow += out->linesize[0] / 2;
355  }
356 
357  return 0;
358 }
359 
360 static int config_output(AVFilterLink *outlink)
361 {
362  AVFilterContext *ctx = outlink->src;
363  ColorBalanceContext *s = ctx->priv;
365  const int depth = desc->comp[0].depth;
366  const int max = (1 << depth) - 1;
367  const int planar = av_pix_fmt_count_planes(outlink->format) > 1;
368 
369  s->depth = depth;
370  s->max = max;
371 
372  if (max == 255 && planar) {
373  s->color_balance = color_balance8_p;
374  } else if (planar) {
375  s->color_balance = color_balance16_p;
376  } else if (max == 255) {
377  s->color_balance = color_balance8;
378  } else {
379  s->color_balance = color_balance16;
380  }
381 
382  ff_fill_rgba_map(s->rgba_map, outlink->format);
383  s->step = av_get_padded_bits_per_pixel(desc) >> 3;
384 
385  return 0;
386 }
387 
389 {
390  AVFilterContext *ctx = inlink->dst;
391  ColorBalanceContext *s = ctx->priv;
392  AVFilterLink *outlink = ctx->outputs[0];
393  ThreadData td;
394  AVFrame *out;
395 
396  if (av_frame_is_writable(in)) {
397  out = in;
398  } else {
399  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
400  if (!out) {
401  av_frame_free(&in);
402  return AVERROR(ENOMEM);
403  }
405  }
406 
407  td.in = in;
408  td.out = out;
409  ff_filter_execute(ctx, s->color_balance, &td, NULL,
410  FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
411 
412  if (in != out)
413  av_frame_free(&in);
414  return ff_filter_frame(outlink, out);
415 }
416 
418  {
419  .name = "default",
420  .type = AVMEDIA_TYPE_VIDEO,
421  .filter_frame = filter_frame,
422  },
423 };
424 
426  {
427  .name = "default",
428  .type = AVMEDIA_TYPE_VIDEO,
429  .config_props = config_output,
430  },
431 };
432 
434  .name = "colorbalance",
435  .description = NULL_IF_CONFIG_SMALL("Adjust the color balance."),
436  .priv_size = sizeof(ColorBalanceContext),
437  .priv_class = &colorbalance_class,
442  .process_command = ff_filter_process_command,
443 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:108
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:491
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_colorbalance.c:388
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
colorbalance_outputs
static const AVFilterPad colorbalance_outputs[]
Definition: vf_colorbalance.c:425
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_component
static float get_component(float v, float l, float s, float m, float h)
Definition: vf_colorbalance.c:93
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
colorbalance_inputs
static const AVFilterPad colorbalance_inputs[]
Definition: vf_colorbalance.c:417
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:172
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
G
#define G
Definition: vf_colorbalance.c:29
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
ColorBalanceContext::cyan_red
Range cyan_red
Definition: vf_colorbalance.c:45
ColorBalanceContext::color_balance
int(* color_balance)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:55
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:276
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:153
preservel
static void preservel(float *r, float *g, float *b, float l)
Definition: vf_colorbalance.c:117
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
Range::highlights
float highlights
Definition: vf_colorbalance.c:40
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3004
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:486
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
color_balance16_p
static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:203
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
ColorBalanceContext
Definition: vf_colorbalance.c:43
ColorBalanceContext::preserve_lightness
int preserve_lightness
Definition: vf_colorbalance.c:48
FLAGS
#define FLAGS
Definition: vf_colorbalance.c:59
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
ColorBalanceContext::step
int step
Definition: vf_colorbalance.c:53
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:488
s
#define s(width, name)
Definition: cbs_vp9.c:198
ColorBalanceContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_colorbalance.c:50
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:489
colorbalance_options
static const AVOption colorbalance_options[]
Definition: vf_colorbalance.c:60
g
const char * g
Definition: vf_curves.c:127
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1979
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:67
Range::shadows
float shadows
Definition: vf_colorbalance.c:38
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:487
ColorBalanceContext::yellow_blue
Range yellow_blue
Definition: vf_colorbalance.c:47
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:458
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:459
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:736
hfun
static float hfun(float n, float h, float s, float l)
Definition: vf_colorbalance.c:109
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorbalance)
A
#define A
Definition: vf_colorbalance.c:31
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:258
av_clipf
av_clipf
Definition: af_crystalizer.c:121
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:483
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
R
#define R
Definition: vf_colorbalance.c:28
Range
Definition: vf_colorbalance.c:37
ColorBalanceContext::max
int max
Definition: vf_colorbalance.c:52
f
f
Definition: af_crystalizer.c:121
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_get_padded_bits_per_pixel
int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel for the pixel format described by pixdesc, including any padding ...
Definition: pixdesc.c:2929
color_balance16
static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:308
B
#define B
Definition: vf_colorbalance.c:30
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:454
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:666
color_balance8_p
static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:150
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:851
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:256
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_colorbalance.c:76
ColorBalanceContext::depth
int depth
Definition: vf_colorbalance.c:51
ff_vf_colorbalance
const AVFilter ff_vf_colorbalance
Definition: vf_colorbalance.c:433
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_colorbalance.c:360
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:463
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
OFFSET
#define OFFSET(x)
Definition: vf_colorbalance.c:58
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:485
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:786
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
ColorBalanceContext::magenta_green
Range magenta_green
Definition: vf_colorbalance.c:46
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
color_balance8
static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorbalance.c:257
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
AVFilter
Filter definition.
Definition: avfilter.h:166
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:257
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
Range::midtones
float midtones
Definition: vf_colorbalance.c:39
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:255
h
h
Definition: vp9dsp_template.c:2038
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:144
int
int
Definition: ffmpeg_filter.c:368
min
float min
Definition: vorbis_enc_data.h:429