FFmpeg
vf_colorcontrast.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "drawutils.h"
27 #include "internal.h"
28 #include "video.h"
29 
30 #define R 0
31 #define G 1
32 #define B 2
33 
34 typedef struct ColorContrastContext {
35  const AVClass *class;
36 
37  float rc, gm, by;
38  float rcw, gmw, byw;
39  float preserve;
40 
41  int step;
42  int depth;
43  uint8_t rgba_map[4];
44 
46  int jobnr, int nb_jobs);
48 
49 static inline float lerpf(float v0, float v1, float f)
50 {
51  return v0 + (v1 - v0) * f;
52 }
53 
54 #define PROCESS(max) \
55  br = (b + r) * 0.5f; \
56  gb = (g + b) * 0.5f; \
57  rg = (r + g) * 0.5f; \
58  \
59  gd = g - br; \
60  bd = b - rg; \
61  rd = r - gb; \
62  \
63  g0 = g + gd * gm; \
64  b0 = b - gd * gm; \
65  r0 = r - gd * gm; \
66  \
67  g1 = g - bd * by; \
68  b1 = b + bd * by; \
69  r1 = r - bd * by; \
70  \
71  g2 = g - rd * rc; \
72  b2 = b - rd * rc; \
73  r2 = r + rd * rc; \
74  \
75  ng = av_clipf((g0 * gmw + g1 * byw + g2 * rcw) * scale, 0.f, max); \
76  nb = av_clipf((b0 * gmw + b1 * byw + b2 * rcw) * scale, 0.f, max); \
77  nr = av_clipf((r0 * gmw + r1 * byw + r2 * rcw) * scale, 0.f, max); \
78  \
79  li = FFMAX3(r, g, b) + FFMIN3(r, g, b); \
80  lo = FFMAX3(nr, ng, nb) + FFMIN3(nr, ng, nb) + FLT_EPSILON; \
81  lf = li / lo; \
82  \
83  r = nr * lf; \
84  g = ng * lf; \
85  b = nb * lf; \
86  \
87  nr = lerpf(nr, r, preserve); \
88  ng = lerpf(ng, g, preserve); \
89  nb = lerpf(nb, b, preserve);
90 
91 static int colorcontrast_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
92 {
93  ColorContrastContext *s = ctx->priv;
94  AVFrame *frame = arg;
95  const int width = frame->width;
96  const int height = frame->height;
97  const int slice_start = (height * jobnr) / nb_jobs;
98  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
99  const ptrdiff_t glinesize = frame->linesize[0];
100  const ptrdiff_t blinesize = frame->linesize[1];
101  const ptrdiff_t rlinesize = frame->linesize[2];
102  uint8_t *gptr = frame->data[0] + slice_start * glinesize;
103  uint8_t *bptr = frame->data[1] + slice_start * blinesize;
104  uint8_t *rptr = frame->data[2] + slice_start * rlinesize;
105  const float preserve = s->preserve;
106  const float gm = s->gm * 0.5f;
107  const float by = s->by * 0.5f;
108  const float rc = s->rc * 0.5f;
109  const float gmw = s->gmw;
110  const float byw = s->byw;
111  const float rcw = s->rcw;
112  const float sum = gmw + byw + rcw;
113  const float scale = 1.f / sum;
114 
115  for (int y = slice_start; y < slice_end && sum > FLT_EPSILON; y++) {
116  for (int x = 0; x < width; x++) {
117  float g = gptr[x];
118  float b = bptr[x];
119  float r = rptr[x];
120  float g0, g1, g2;
121  float b0, b1, b2;
122  float r0, r1, r2;
123  float gd, bd, rd;
124  float gb, br, rg;
125  float nr, ng, nb;
126  float li, lo, lf;
127 
128  PROCESS(255.f);
129 
130  gptr[x] = av_clip_uint8(ng);
131  bptr[x] = av_clip_uint8(nb);
132  rptr[x] = av_clip_uint8(nr);
133  }
134 
135  gptr += glinesize;
136  bptr += blinesize;
137  rptr += rlinesize;
138  }
139 
140  return 0;
141 }
142 
143 static int colorcontrast_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
144 {
145  ColorContrastContext *s = ctx->priv;
146  AVFrame *frame = arg;
147  const int depth = s->depth;
148  const float max = (1 << depth) - 1;
149  const int width = frame->width;
150  const int height = frame->height;
151  const int slice_start = (height * jobnr) / nb_jobs;
152  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
153  const ptrdiff_t glinesize = frame->linesize[0] / 2;
154  const ptrdiff_t blinesize = frame->linesize[1] / 2;
155  const ptrdiff_t rlinesize = frame->linesize[2] / 2;
156  uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize;
157  uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize;
158  uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize;
159  const float preserve = s->preserve;
160  const float gm = s->gm * 0.5f;
161  const float by = s->by * 0.5f;
162  const float rc = s->rc * 0.5f;
163  const float gmw = s->gmw;
164  const float byw = s->byw;
165  const float rcw = s->rcw;
166  const float sum = gmw + byw + rcw;
167  const float scale = 1.f / sum;
168 
169  for (int y = slice_start; y < slice_end && sum > FLT_EPSILON; y++) {
170  for (int x = 0; x < width; x++) {
171  float g = gptr[x];
172  float b = bptr[x];
173  float r = rptr[x];
174  float g0, g1, g2;
175  float b0, b1, b2;
176  float r0, r1, r2;
177  float gd, bd, rd;
178  float gb, br, rg;
179  float nr, ng, nb;
180  float li, lo, lf;
181 
182  PROCESS(max);
183 
184  gptr[x] = av_clip_uintp2_c(ng, depth);
185  bptr[x] = av_clip_uintp2_c(nb, depth);
186  rptr[x] = av_clip_uintp2_c(nr, depth);
187  }
188 
189  gptr += glinesize;
190  bptr += blinesize;
191  rptr += rlinesize;
192  }
193 
194  return 0;
195 }
196 
197 static int colorcontrast_slice8p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
198 {
199  ColorContrastContext *s = ctx->priv;
200  AVFrame *frame = arg;
201  const int step = s->step;
202  const int width = frame->width;
203  const int height = frame->height;
204  const int slice_start = (height * jobnr) / nb_jobs;
205  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
206  const ptrdiff_t linesize = frame->linesize[0];
207  const uint8_t roffset = s->rgba_map[R];
208  const uint8_t goffset = s->rgba_map[G];
209  const uint8_t boffset = s->rgba_map[B];
210  uint8_t *ptr = frame->data[0] + slice_start * linesize;
211  const float preserve = s->preserve;
212  const float gm = s->gm * 0.5f;
213  const float by = s->by * 0.5f;
214  const float rc = s->rc * 0.5f;
215  const float gmw = s->gmw;
216  const float byw = s->byw;
217  const float rcw = s->rcw;
218  const float sum = gmw + byw + rcw;
219  const float scale = 1.f / sum;
220 
221  for (int y = slice_start; y < slice_end && sum > FLT_EPSILON; y++) {
222  for (int x = 0; x < width; x++) {
223  float g = ptr[x * step + goffset];
224  float b = ptr[x * step + boffset];
225  float r = ptr[x * step + roffset];
226  float g0, g1, g2;
227  float b0, b1, b2;
228  float r0, r1, r2;
229  float gd, bd, rd;
230  float gb, br, rg;
231  float nr, ng, nb;
232  float li, lo, lf;
233 
234  PROCESS(255.f);
235 
236  ptr[x * step + goffset] = av_clip_uint8(ng);
237  ptr[x * step + boffset] = av_clip_uint8(nb);
238  ptr[x * step + roffset] = av_clip_uint8(nr);
239  }
240 
241  ptr += linesize;
242  }
243 
244  return 0;
245 }
246 
247 static int colorcontrast_slice16p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
248 {
249  ColorContrastContext *s = ctx->priv;
250  AVFrame *frame = arg;
251  const int step = s->step;
252  const int depth = s->depth;
253  const float max = (1 << depth) - 1;
254  const int width = frame->width;
255  const int height = frame->height;
256  const int slice_start = (height * jobnr) / nb_jobs;
257  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
258  const ptrdiff_t linesize = frame->linesize[0] / 2;
259  const uint8_t roffset = s->rgba_map[R];
260  const uint8_t goffset = s->rgba_map[G];
261  const uint8_t boffset = s->rgba_map[B];
262  uint16_t *ptr = (uint16_t *)frame->data[0] + slice_start * linesize;
263  const float preserve = s->preserve;
264  const float gm = s->gm * 0.5f;
265  const float by = s->by * 0.5f;
266  const float rc = s->rc * 0.5f;
267  const float gmw = s->gmw;
268  const float byw = s->byw;
269  const float rcw = s->rcw;
270  const float sum = gmw + byw + rcw;
271  const float scale = 1.f / sum;
272 
273  for (int y = slice_start; y < slice_end && sum > FLT_EPSILON; y++) {
274  for (int x = 0; x < width; x++) {
275  float g = ptr[x * step + goffset];
276  float b = ptr[x * step + boffset];
277  float r = ptr[x * step + roffset];
278  float g0, g1, g2;
279  float b0, b1, b2;
280  float r0, r1, r2;
281  float gd, bd, rd;
282  float gb, br, rg;
283  float nr, ng, nb;
284  float li, lo, lf;
285 
286  PROCESS(max);
287 
288  ptr[x * step + goffset] = av_clip_uintp2_c(ng, depth);
289  ptr[x * step + boffset] = av_clip_uintp2_c(nb, depth);
290  ptr[x * step + roffset] = av_clip_uintp2_c(nr, depth);
291  }
292 
293  ptr += linesize;
294  }
295 
296  return 0;
297 }
298 
300 {
301  AVFilterContext *ctx = link->dst;
302  ColorContrastContext *s = ctx->priv;
303  int res;
304 
305  if (res = ff_filter_execute(ctx, s->do_slice, frame, NULL,
307  return res;
308 
309  return ff_filter_frame(ctx->outputs[0], frame);
310 }
311 
312 static const enum AVPixelFormat pixel_fmts[] = {
325 };
326 
328 {
329  AVFilterContext *ctx = inlink->dst;
330  ColorContrastContext *s = ctx->priv;
332  int planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
333 
334  s->step = desc->nb_components;
335  if (inlink->format == AV_PIX_FMT_RGB0 ||
336  inlink->format == AV_PIX_FMT_0RGB ||
337  inlink->format == AV_PIX_FMT_BGR0 ||
338  inlink->format == AV_PIX_FMT_0BGR)
339  s->step = 4;
340 
341  s->depth = desc->comp[0].depth;
342  s->do_slice = s->depth <= 8 ? colorcontrast_slice8 : colorcontrast_slice16;
343  if (!planar)
344  s->do_slice = s->depth <= 8 ? colorcontrast_slice8p : colorcontrast_slice16p;
345 
346  ff_fill_rgba_map(s->rgba_map, inlink->format);
347 
348  return 0;
349 }
350 
352  {
353  .name = "default",
354  .type = AVMEDIA_TYPE_VIDEO,
356  .filter_frame = filter_frame,
357  .config_props = config_input,
358  },
359 };
360 
361 #define OFFSET(x) offsetof(ColorContrastContext, x)
362 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
363 
364 static const AVOption colorcontrast_options[] = {
365  { "rc", "set the red-cyan contrast", OFFSET(rc), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, VF },
366  { "gm", "set the green-magenta contrast", OFFSET(gm), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, VF },
367  { "by", "set the blue-yellow contrast", OFFSET(by), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, VF },
368  { "rcw", "set the red-cyan weight", OFFSET(rcw), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, VF },
369  { "gmw", "set the green-magenta weight", OFFSET(gmw), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, VF },
370  { "byw", "set the blue-yellow weight", OFFSET(byw), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, VF },
371  { "pl", "set the amount of preserving lightness", OFFSET(preserve), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, VF },
372  { NULL }
373 };
374 
375 AVFILTER_DEFINE_CLASS(colorcontrast);
376 
378  .name = "colorcontrast",
379  .description = NULL_IF_CONFIG_SMALL("Adjust color contrast between RGB components."),
380  .priv_size = sizeof(ColorContrastContext),
381  .priv_class = &colorcontrast_class,
386  .process_command = ff_filter_process_command,
387 };
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:501
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ColorContrastContext::depth
int depth
Definition: vf_colorcontrast.c:42
colorcontrast_slice8
static int colorcontrast_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcontrast.c:91
ColorContrastContext::preserve
float preserve
Definition: vf_colorcontrast.c:39
r
const char * r
Definition: vf_curves.c:127
opt.h
VF
#define VF
Definition: vf_colorcontrast.c:362
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
ColorContrastContext::rcw
float rcw
Definition: vf_colorcontrast.c:38
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:162
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
colorcontrast_slice16
static int colorcontrast_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcontrast.c:143
ColorContrastContext::step
int step
Definition: vf_colorcontrast.c:41
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
pixdesc.h
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:279
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
PROCESS
#define PROCESS(max)
Definition: vf_colorcontrast.c:54
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
ff_vf_colorcontrast
const AVFilter ff_vf_colorcontrast
Definition: vf_colorcontrast.c:377
float.h
ColorContrastContext::byw
float byw
Definition: vf_colorcontrast.c:38
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
video.h
ColorContrastContext::do_slice
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcontrast.c:45
OFFSET
#define OFFSET(x)
Definition: vf_colorcontrast.c:361
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:2035
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
v0
#define v0
Definition: regdef.h:26
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
ColorContrastContext
Definition: vf_colorcontrast.c:34
ColorContrastContext::gmw
float gmw
Definition: vf_colorcontrast.c:38
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
av_cold
#define av_cold
Definition: attributes.h:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:499
g
const char * g
Definition: vf_curves.c:128
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1730
ColorContrastContext::rc
float rc
Definition: vf_colorcontrast.c:37
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorcontrast)
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:468
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:469
NULL
#define NULL
Definition: coverity.c:32
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
f
f
Definition: af_crystalizer.c:121
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ColorContrastContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_colorcontrast.c:43
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:464
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:887
height
#define height
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:2036
ColorContrastContext::gm
float gm
Definition: vf_colorcontrast.c:37
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:99
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:473
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:827
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: vf_colorcontrast.c:299
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
colorcontrast_slice16p
static int colorcontrast_slice16p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcontrast.c:247
R
#define R
Definition: vf_colorcontrast.c:30
AVFilter
Filter definition.
Definition: avfilter.h:166
colorcontrast_options
static const AVOption colorcontrast_options[]
Definition: vf_colorcontrast.c:364
lerpf
static float lerpf(float v0, float v1, float f)
Definition: vf_colorcontrast.c:49
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:264
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pixel_fmts
static enum AVPixelFormat pixel_fmts[]
Definition: vf_colorcontrast.c:312
colorcontrast_slice8p
static int colorcontrast_slice8p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_colorcontrast.c:197
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
colorcontrast_inputs
static const AVFilterPad colorcontrast_inputs[]
Definition: vf_colorcontrast.c:351
avfilter.h
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:688
av_clip_uint8
#define av_clip_uint8
Definition: common.h:105
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
G
#define G
Definition: vf_colorcontrast.c:31
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:262
B
#define B
Definition: vf_colorcontrast.c:32
b0
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:2034
ColorContrastContext::by
float by
Definition: vf_colorcontrast.c:37
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
int
int
Definition: ffmpeg_filter.c:424
config_input
static av_cold int config_input(AVFilterLink *inlink)
Definition: vf_colorcontrast.c:327
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: internal.h:52