FFmpeg
vf_vibrance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "libavutil/imgutils.h"
23 #include "avfilter.h"
24 #include "formats.h"
25 #include "internal.h"
26 #include "video.h"
27 
28 typedef struct VibranceContext {
29  const AVClass *class;
30 
31  float intensity;
32  float balance[3];
33  float lcoeffs[3];
34  int alternate;
35 
36  int depth;
37 
39  int jobnr, int nb_jobs);
41 
42 static inline float lerpf(float v0, float v1, float f)
43 {
44  return v0 + (v1 - v0) * f;
45 }
46 
47 static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
48 {
49  VibranceContext *s = avctx->priv;
50  AVFrame *frame = arg;
51  const int width = frame->width;
52  const int height = frame->height;
53  const float scale = 1.f / 255.f;
54  const float gc = s->lcoeffs[0];
55  const float bc = s->lcoeffs[1];
56  const float rc = s->lcoeffs[2];
57  const float intensity = s->intensity;
58  const float alternate = s->alternate ? 1.f : -1.f;
59  const float gintensity = intensity * s->balance[0];
60  const float bintensity = intensity * s->balance[1];
61  const float rintensity = intensity * s->balance[2];
62  const float sgintensity = alternate * FFSIGN(gintensity);
63  const float sbintensity = alternate * FFSIGN(bintensity);
64  const float srintensity = alternate * FFSIGN(rintensity);
65  const int slice_start = (height * jobnr) / nb_jobs;
66  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
67  const int glinesize = frame->linesize[0];
68  const int blinesize = frame->linesize[1];
69  const int rlinesize = frame->linesize[2];
70  uint8_t *gptr = frame->data[0] + slice_start * glinesize;
71  uint8_t *bptr = frame->data[1] + slice_start * blinesize;
72  uint8_t *rptr = frame->data[2] + slice_start * rlinesize;
73 
74  for (int y = slice_start; y < slice_end; y++) {
75  for (int x = 0; x < width; x++) {
76  float g = gptr[x] * scale;
77  float b = bptr[x] * scale;
78  float r = rptr[x] * scale;
79  float max_color = FFMAX3(r, g, b);
80  float min_color = FFMIN3(r, g, b);
81  float color_saturation = max_color - min_color;
82  float luma = g * gc + r * rc + b * bc;
83  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
84  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
85  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
86 
87  g = lerpf(luma, g, cg);
88  b = lerpf(luma, b, cb);
89  r = lerpf(luma, r, cr);
90 
91  gptr[x] = av_clip_uint8(g * 255.f);
92  bptr[x] = av_clip_uint8(b * 255.f);
93  rptr[x] = av_clip_uint8(r * 255.f);
94  }
95 
96  gptr += glinesize;
97  bptr += blinesize;
98  rptr += rlinesize;
99  }
100 
101  return 0;
102 }
103 
104 static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
105 {
106  VibranceContext *s = avctx->priv;
107  AVFrame *frame = arg;
108  const int depth = s->depth;
109  const float max = (1 << depth) - 1;
110  const float scale = 1.f / max;
111  const float gc = s->lcoeffs[0];
112  const float bc = s->lcoeffs[1];
113  const float rc = s->lcoeffs[2];
114  const int width = frame->width;
115  const int height = frame->height;
116  const float intensity = s->intensity;
117  const float alternate = s->alternate ? 1.f : -1.f;
118  const float gintensity = intensity * s->balance[0];
119  const float bintensity = intensity * s->balance[1];
120  const float rintensity = intensity * s->balance[2];
121  const float sgintensity = alternate * FFSIGN(gintensity);
122  const float sbintensity = alternate * FFSIGN(bintensity);
123  const float srintensity = alternate * FFSIGN(rintensity);
124  const int slice_start = (height * jobnr) / nb_jobs;
125  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
126  const int glinesize = frame->linesize[0] / 2;
127  const int blinesize = frame->linesize[1] / 2;
128  const int rlinesize = frame->linesize[2] / 2;
129  uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize;
130  uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize;
131  uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize;
132 
133  for (int y = slice_start; y < slice_end; y++) {
134  for (int x = 0; x < width; x++) {
135  float g = gptr[x] * scale;
136  float b = bptr[x] * scale;
137  float r = rptr[x] * scale;
138  float max_color = FFMAX3(r, g, b);
139  float min_color = FFMIN3(r, g, b);
140  float color_saturation = max_color - min_color;
141  float luma = g * gc + r * rc + b * bc;
142  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
143  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
144  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
145 
146  g = lerpf(luma, g, cg);
147  b = lerpf(luma, b, cb);
148  r = lerpf(luma, r, cr);
149 
150  gptr[x] = av_clip_uintp2_c(g * max, depth);
151  bptr[x] = av_clip_uintp2_c(b * max, depth);
152  rptr[x] = av_clip_uintp2_c(r * max, depth);
153  }
154 
155  gptr += glinesize;
156  bptr += blinesize;
157  rptr += rlinesize;
158  }
159 
160  return 0;
161 }
162 
164 {
165  AVFilterContext *avctx = link->dst;
166  VibranceContext *s = avctx->priv;
167  int res;
168 
169  if (res = avctx->internal->execute(avctx, s->do_slice, frame, NULL,
170  FFMIN(frame->height, ff_filter_get_nb_threads(avctx))))
171  return res;
172 
173  return ff_filter_frame(avctx->outputs[0], frame);
174 }
175 
177 {
178  static const enum AVPixelFormat pixel_fmts[] = {
184  };
185 
187 
188  formats = ff_make_format_list(pixel_fmts);
189  if (!formats)
190  return AVERROR(ENOMEM);
191 
192  return ff_set_common_formats(avctx, formats);
193 }
194 
196 {
197  AVFilterContext *avctx = inlink->dst;
198  VibranceContext *s = avctx->priv;
200 
201  s->depth = desc->comp[0].depth;
203 
204  return 0;
205 }
206 
207 static const AVFilterPad vibrance_inputs[] = {
208  {
209  .name = "default",
210  .type = AVMEDIA_TYPE_VIDEO,
211  .needs_writable = 1,
212  .filter_frame = filter_frame,
213  .config_props = config_input,
214  },
215  { NULL }
216 };
217 
218 static const AVFilterPad vibrance_outputs[] = {
219  {
220  .name = "default",
221  .type = AVMEDIA_TYPE_VIDEO,
222  },
223  { NULL }
224 };
225 
226 #define OFFSET(x) offsetof(VibranceContext, x)
227 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
228 
229 static const AVOption vibrance_options[] = {
230  { "intensity", "set the intensity value", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0}, -2, 2, VF },
231  { "rbal", "set the red balance value", OFFSET(balance[2]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
232  { "gbal", "set the green balance value", OFFSET(balance[0]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
233  { "bbal", "set the blue balance value", OFFSET(balance[1]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
234  { "rlum", "set the red luma coefficient", OFFSET(lcoeffs[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.072186}, 0, 1, VF },
235  { "glum", "set the green luma coefficient", OFFSET(lcoeffs[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.715158}, 0, 1, VF },
236  { "blum", "set the blue luma coefficient", OFFSET(lcoeffs[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.212656}, 0, 1, VF },
237  { "alternate", "use alternate colors", OFFSET(alternate), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
238  { NULL }
239 };
240 
241 AVFILTER_DEFINE_CLASS(vibrance);
242 
244  .name = "vibrance",
245  .description = NULL_IF_CONFIG_SMALL("Boost or alter saturation."),
246  .priv_size = sizeof(VibranceContext),
247  .priv_class = &vibrance_class,
249  .inputs = vibrance_inputs,
250  .outputs = vibrance_outputs,
252 };
AVFILTER_DEFINE_CLASS(vibrance)
#define NULL
Definition: coverity.c:32
static av_cold int query_formats(AVFilterContext *avctx)
Definition: vf_vibrance.c:176
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
misc image utilities
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:115
const char * desc
Definition: nvenc.c:68
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
GLfloat v0
Definition: opengl_enc.c:106
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:38
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:139
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
#define height
float lcoeffs[3]
Definition: vf_vibrance.c:33
#define FFMIN3(a, b, c)
Definition: common.h:97
#define max(a, b)
Definition: cuda_runtime.h:33
static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:47
#define VF
Definition: vf_vibrance.c:227
A filter pad used for either input or output.
Definition: internal.h:54
int width
Definition: frame.h:353
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
const char * arg
Definition: jacosubdec.c:66
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
static const AVOption vibrance_options[]
Definition: vf_vibrance.c:229
float balance[3]
Definition: vf_vibrance.c:32
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
#define b
Definition: input.c:41
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define FFMIN(a, b)
Definition: common.h:96
static const AVFilterPad vibrance_inputs[]
Definition: vf_vibrance.c:207
#define width
#define FFSIGN(a)
Definition: common.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: vf_vibrance.c:163
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static const AVFilterPad vibrance_outputs[]
Definition: vf_vibrance.c:218
static av_cold int config_input(AVFilterLink *inlink)
Definition: vf_vibrance.c:195
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static float lerpf(float v0, float v1, float f)
Definition: vf_vibrance.c:42
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:104
avfilter_execute_func * execute
Definition: internal.h:155
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2036
AVFilter ff_vf_vibrance
Definition: vf_vibrance.c:243
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:353
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:140
int depth
Number of bits in the component.
Definition: pixdesc.h:58
#define OFFSET(x)
Definition: vf_vibrance.c:226
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:229