FFmpeg
vf_vibrance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "libavutil/imgutils.h"
23 #include "avfilter.h"
24 #include "drawutils.h"
25 #include "formats.h"
26 #include "internal.h"
27 #include "video.h"
28 
29 #define R 0
30 #define G 1
31 #define B 2
32 
33 typedef struct VibranceContext {
34  const AVClass *class;
35 
36  float intensity;
37  float balance[3];
38  float lcoeffs[3];
39  int alternate;
40 
41  int step;
42  int depth;
44 
46  int jobnr, int nb_jobs);
48 
49 static inline float lerpf(float v0, float v1, float f)
50 {
51  return v0 + (v1 - v0) * f;
52 }
53 
54 static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
55 {
56  VibranceContext *s = avctx->priv;
57  AVFrame *frame = arg;
58  const int width = frame->width;
59  const int height = frame->height;
60  const float scale = 1.f / 255.f;
61  const float gc = s->lcoeffs[0];
62  const float bc = s->lcoeffs[1];
63  const float rc = s->lcoeffs[2];
64  const float intensity = s->intensity;
65  const float alternate = s->alternate ? 1.f : -1.f;
66  const float gintensity = intensity * s->balance[0];
67  const float bintensity = intensity * s->balance[1];
68  const float rintensity = intensity * s->balance[2];
69  const float sgintensity = alternate * FFSIGN(gintensity);
70  const float sbintensity = alternate * FFSIGN(bintensity);
71  const float srintensity = alternate * FFSIGN(rintensity);
72  const int slice_start = (height * jobnr) / nb_jobs;
73  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
74  const int glinesize = frame->linesize[0];
75  const int blinesize = frame->linesize[1];
76  const int rlinesize = frame->linesize[2];
77  uint8_t *gptr = frame->data[0] + slice_start * glinesize;
78  uint8_t *bptr = frame->data[1] + slice_start * blinesize;
79  uint8_t *rptr = frame->data[2] + slice_start * rlinesize;
80 
81  for (int y = slice_start; y < slice_end; y++) {
82  for (int x = 0; x < width; x++) {
83  float g = gptr[x] * scale;
84  float b = bptr[x] * scale;
85  float r = rptr[x] * scale;
86  float max_color = FFMAX3(r, g, b);
87  float min_color = FFMIN3(r, g, b);
88  float color_saturation = max_color - min_color;
89  float luma = g * gc + r * rc + b * bc;
90  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
91  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
92  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
93 
94  g = lerpf(luma, g, cg);
95  b = lerpf(luma, b, cb);
96  r = lerpf(luma, r, cr);
97 
98  gptr[x] = av_clip_uint8(g * 255.f);
99  bptr[x] = av_clip_uint8(b * 255.f);
100  rptr[x] = av_clip_uint8(r * 255.f);
101  }
102 
103  gptr += glinesize;
104  bptr += blinesize;
105  rptr += rlinesize;
106  }
107 
108  return 0;
109 }
110 
111 static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
112 {
113  VibranceContext *s = avctx->priv;
114  AVFrame *frame = arg;
115  const int depth = s->depth;
116  const float max = (1 << depth) - 1;
117  const float scale = 1.f / max;
118  const float gc = s->lcoeffs[0];
119  const float bc = s->lcoeffs[1];
120  const float rc = s->lcoeffs[2];
121  const int width = frame->width;
122  const int height = frame->height;
123  const float intensity = s->intensity;
124  const float alternate = s->alternate ? 1.f : -1.f;
125  const float gintensity = intensity * s->balance[0];
126  const float bintensity = intensity * s->balance[1];
127  const float rintensity = intensity * s->balance[2];
128  const float sgintensity = alternate * FFSIGN(gintensity);
129  const float sbintensity = alternate * FFSIGN(bintensity);
130  const float srintensity = alternate * FFSIGN(rintensity);
131  const int slice_start = (height * jobnr) / nb_jobs;
132  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
133  const int glinesize = frame->linesize[0] / 2;
134  const int blinesize = frame->linesize[1] / 2;
135  const int rlinesize = frame->linesize[2] / 2;
136  uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize;
137  uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize;
138  uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize;
139 
140  for (int y = slice_start; y < slice_end; y++) {
141  for (int x = 0; x < width; x++) {
142  float g = gptr[x] * scale;
143  float b = bptr[x] * scale;
144  float r = rptr[x] * scale;
145  float max_color = FFMAX3(r, g, b);
146  float min_color = FFMIN3(r, g, b);
147  float color_saturation = max_color - min_color;
148  float luma = g * gc + r * rc + b * bc;
149  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
150  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
151  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
152 
153  g = lerpf(luma, g, cg);
154  b = lerpf(luma, b, cb);
155  r = lerpf(luma, r, cr);
156 
157  gptr[x] = av_clip_uintp2_c(g * max, depth);
158  bptr[x] = av_clip_uintp2_c(b * max, depth);
159  rptr[x] = av_clip_uintp2_c(r * max, depth);
160  }
161 
162  gptr += glinesize;
163  bptr += blinesize;
164  rptr += rlinesize;
165  }
166 
167  return 0;
168 }
169 
170 static int vibrance_slice8p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
171 {
172  VibranceContext *s = avctx->priv;
173  AVFrame *frame = arg;
174  const int step = s->step;
175  const int width = frame->width;
176  const int height = frame->height;
177  const float scale = 1.f / 255.f;
178  const float gc = s->lcoeffs[0];
179  const float bc = s->lcoeffs[1];
180  const float rc = s->lcoeffs[2];
181  const uint8_t roffset = s->rgba_map[R];
182  const uint8_t goffset = s->rgba_map[G];
183  const uint8_t boffset = s->rgba_map[B];
184  const float intensity = s->intensity;
185  const float alternate = s->alternate ? 1.f : -1.f;
186  const float gintensity = intensity * s->balance[0];
187  const float bintensity = intensity * s->balance[1];
188  const float rintensity = intensity * s->balance[2];
189  const float sgintensity = alternate * FFSIGN(gintensity);
190  const float sbintensity = alternate * FFSIGN(bintensity);
191  const float srintensity = alternate * FFSIGN(rintensity);
192  const int slice_start = (height * jobnr) / nb_jobs;
193  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
194  const int linesize = frame->linesize[0];
195  uint8_t *ptr = frame->data[0] + slice_start * linesize;
196 
197  for (int y = slice_start; y < slice_end; y++) {
198  for (int x = 0; x < width; x++) {
199  float g = ptr[x * step + goffset] * scale;
200  float b = ptr[x * step + boffset] * scale;
201  float r = ptr[x * step + roffset] * scale;
202  float max_color = FFMAX3(r, g, b);
203  float min_color = FFMIN3(r, g, b);
204  float color_saturation = max_color - min_color;
205  float luma = g * gc + r * rc + b * bc;
206  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
207  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
208  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
209 
210  g = lerpf(luma, g, cg);
211  b = lerpf(luma, b, cb);
212  r = lerpf(luma, r, cr);
213 
214  ptr[x * step + goffset] = av_clip_uint8(g * 255.f);
215  ptr[x * step + boffset] = av_clip_uint8(b * 255.f);
216  ptr[x * step + roffset] = av_clip_uint8(r * 255.f);
217  }
218 
219  ptr += linesize;
220  }
221 
222  return 0;
223 }
224 
225 static int vibrance_slice16p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
226 {
227  VibranceContext *s = avctx->priv;
228  AVFrame *frame = arg;
229  const int step = s->step;
230  const int depth = s->depth;
231  const float max = (1 << depth) - 1;
232  const float scale = 1.f / max;
233  const float gc = s->lcoeffs[0];
234  const float bc = s->lcoeffs[1];
235  const float rc = s->lcoeffs[2];
236  const uint8_t roffset = s->rgba_map[R];
237  const uint8_t goffset = s->rgba_map[G];
238  const uint8_t boffset = s->rgba_map[B];
239  const int width = frame->width;
240  const int height = frame->height;
241  const float intensity = s->intensity;
242  const float alternate = s->alternate ? 1.f : -1.f;
243  const float gintensity = intensity * s->balance[0];
244  const float bintensity = intensity * s->balance[1];
245  const float rintensity = intensity * s->balance[2];
246  const float sgintensity = alternate * FFSIGN(gintensity);
247  const float sbintensity = alternate * FFSIGN(bintensity);
248  const float srintensity = alternate * FFSIGN(rintensity);
249  const int slice_start = (height * jobnr) / nb_jobs;
250  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
251  const int linesize = frame->linesize[0] / 2;
252  uint16_t *ptr = (uint16_t *)frame->data[0] + slice_start * linesize;
253 
254  for (int y = slice_start; y < slice_end; y++) {
255  for (int x = 0; x < width; x++) {
256  float g = ptr[x * step + goffset] * scale;
257  float b = ptr[x * step + boffset] * scale;
258  float r = ptr[x * step + roffset] * scale;
259  float max_color = FFMAX3(r, g, b);
260  float min_color = FFMIN3(r, g, b);
261  float color_saturation = max_color - min_color;
262  float luma = g * gc + r * rc + b * bc;
263  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
264  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
265  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
266 
267  g = lerpf(luma, g, cg);
268  b = lerpf(luma, b, cb);
269  r = lerpf(luma, r, cr);
270 
271  ptr[x * step + goffset] = av_clip_uintp2_c(g * max, depth);
272  ptr[x * step + boffset] = av_clip_uintp2_c(b * max, depth);
273  ptr[x * step + roffset] = av_clip_uintp2_c(r * max, depth);
274  }
275 
276  ptr += linesize;
277  }
278 
279  return 0;
280 }
281 
283 {
284  AVFilterContext *avctx = link->dst;
285  VibranceContext *s = avctx->priv;
286  int res;
287 
288  if (res = avctx->internal->execute(avctx, s->do_slice, frame, NULL,
289  FFMIN(frame->height, ff_filter_get_nb_threads(avctx))))
290  return res;
291 
292  return ff_filter_frame(avctx->outputs[0], frame);
293 }
294 
296 {
297  static const enum AVPixelFormat pixel_fmts[] = {
310  };
311 
313 
314  formats = ff_make_format_list(pixel_fmts);
315  if (!formats)
316  return AVERROR(ENOMEM);
317 
318  return ff_set_common_formats(avctx, formats);
319 }
320 
322 {
323  AVFilterContext *avctx = inlink->dst;
324  VibranceContext *s = avctx->priv;
326  int planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
327 
328  s->step = desc->nb_components;
329  if (inlink->format == AV_PIX_FMT_RGB0 ||
330  inlink->format == AV_PIX_FMT_0RGB ||
331  inlink->format == AV_PIX_FMT_BGR0 ||
332  inlink->format == AV_PIX_FMT_0BGR)
333  s->step = 4;
334 
335  s->depth = desc->comp[0].depth;
336  s->do_slice = s->depth <= 8 ? vibrance_slice8 : vibrance_slice16;
337  if (!planar)
338  s->do_slice = s->depth <= 8 ? vibrance_slice8p : vibrance_slice16p;
339 
340  ff_fill_rgba_map(s->rgba_map, inlink->format);
341 
342  return 0;
343 }
344 
345 static const AVFilterPad vibrance_inputs[] = {
346  {
347  .name = "default",
348  .type = AVMEDIA_TYPE_VIDEO,
349  .needs_writable = 1,
350  .filter_frame = filter_frame,
351  .config_props = config_input,
352  },
353  { NULL }
354 };
355 
356 static const AVFilterPad vibrance_outputs[] = {
357  {
358  .name = "default",
359  .type = AVMEDIA_TYPE_VIDEO,
360  },
361  { NULL }
362 };
363 
364 #define OFFSET(x) offsetof(VibranceContext, x)
365 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
366 
367 static const AVOption vibrance_options[] = {
368  { "intensity", "set the intensity value", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0}, -2, 2, VF },
369  { "rbal", "set the red balance value", OFFSET(balance[2]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
370  { "gbal", "set the green balance value", OFFSET(balance[0]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
371  { "bbal", "set the blue balance value", OFFSET(balance[1]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
372  { "rlum", "set the red luma coefficient", OFFSET(lcoeffs[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.072186}, 0, 1, VF },
373  { "glum", "set the green luma coefficient", OFFSET(lcoeffs[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.715158}, 0, 1, VF },
374  { "blum", "set the blue luma coefficient", OFFSET(lcoeffs[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.212656}, 0, 1, VF },
375  { "alternate", "use alternate colors", OFFSET(alternate), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
376  { NULL }
377 };
378 
379 AVFILTER_DEFINE_CLASS(vibrance);
380 
382  .name = "vibrance",
383  .description = NULL_IF_CONFIG_SMALL("Boost or alter saturation."),
384  .priv_size = sizeof(VibranceContext),
385  .priv_class = &vibrance_class,
391 };
formats
formats
Definition: signature.h:48
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:421
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
VibranceContext::alternate
int alternate
Definition: vf_vibrance.c:39
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:302
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VF
#define VF
Definition: vf_vibrance.c:365
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
vibrance_options
static const AVOption vibrance_options[]
Definition: vf_vibrance.c:367
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
VibranceContext::balance
float balance[3]
Definition: vf_vibrance.c:37
video.h
VibranceContext::depth
int depth
Definition: vf_vibrance.c:42
VibranceContext::do_slice
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:45
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
formats.h
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:356
v0
#define v0
Definition: regdef.h:26
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:106
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:419
width
#define width
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:104
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
VibranceContext::intensity
float intensity
Definition: vf_vibrance.c:36
g
const char * g
Definition: vf_curves.c:117
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(vibrance)
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
vibrance_inputs
static const AVFilterPad vibrance_inputs[]
Definition: vf_vibrance.c:345
f
#define f(width, name)
Definition: cbs_vp9.c:255
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:66
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:389
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:390
NULL
#define NULL
Definition: coverity.c:32
vibrance_slice8
static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:54
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
VibranceContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_vibrance.c:43
lerpf
static float lerpf(float v0, float v1, float f)
Definition: vf_vibrance.c:49
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:336
R
#define R
Definition: vf_vibrance.c:29
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:385
query_formats
static av_cold int query_formats(AVFilterContext *avctx)
Definition: vf_vibrance.c:295
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
VibranceContext::lcoeffs
float lcoeffs[3]
Definition: vf_vibrance.c:38
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
vibrance_slice16
static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:111
VibranceContext::step
int step
Definition: vf_vibrance.c:41
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:394
OFFSET
#define OFFSET(x)
Definition: vf_vibrance.c:364
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
ff_vf_vibrance
AVFilter ff_vf_vibrance
Definition: vf_vibrance.c:381
uint8_t
uint8_t
Definition: audio_convert.c:194
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: vf_vibrance.c:282
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVFilter
Filter definition.
Definition: avfilter.h:145
vibrance_slice16p
static int vibrance_slice16p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:225
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:239
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
config_input
static av_cold int config_input(AVFilterLink *inlink)
Definition: vf_vibrance.c:321
AVFilterInternal::execute
avfilter_execute_func * execute
Definition: internal.h:136
vibrance_outputs
static const AVFilterPad vibrance_outputs[]
Definition: vf_vibrance.c:356
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
AVFilterContext::internal
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:381
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
av_clip_uint8
#define av_clip_uint8
Definition: common.h:128
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
G
#define G
Definition: vf_vibrance.c:30
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
imgutils.h
B
#define B
Definition: vf_vibrance.c:31
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
VibranceContext
Definition: vf_vibrance.c:33
drawutils.h
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
int
int
Definition: ffmpeg_filter.c:170
vibrance_slice8p
static int vibrance_slice8p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:170
AVFilterContext::outputs
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353