FFmpeg
vf_vibrance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "libavutil/imgutils.h"
23 #include "avfilter.h"
24 #include "drawutils.h"
25 #include "formats.h"
26 #include "internal.h"
27 #include "video.h"
28 
29 #define R 0
30 #define G 1
31 #define B 2
32 #define A 3
33 
34 typedef struct VibranceContext {
35  const AVClass *class;
36 
37  float intensity;
38  float balance[3];
39  float lcoeffs[3];
40  int alternate;
41 
42  int step;
43  int depth;
44  uint8_t rgba_map[4];
45 
47  int jobnr, int nb_jobs);
49 
50 static inline float lerpf(float v0, float v1, float f)
51 {
52  return v0 + (v1 - v0) * f;
53 }
54 
55 typedef struct ThreadData {
56  AVFrame *out, *in;
57 } ThreadData;
58 
59 static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
60 {
61  VibranceContext *s = avctx->priv;
62  ThreadData *td = arg;
63  AVFrame *frame = td->out;
64  AVFrame *in = td->in;
65  const int width = frame->width;
66  const int height = frame->height;
67  const float scale = 1.f / 255.f;
68  const float gc = s->lcoeffs[0];
69  const float bc = s->lcoeffs[1];
70  const float rc = s->lcoeffs[2];
71  const float intensity = s->intensity;
72  const float alternate = s->alternate ? 1.f : -1.f;
73  const float gintensity = intensity * s->balance[0];
74  const float bintensity = intensity * s->balance[1];
75  const float rintensity = intensity * s->balance[2];
76  const float sgintensity = alternate * FFSIGN(gintensity);
77  const float sbintensity = alternate * FFSIGN(bintensity);
78  const float srintensity = alternate * FFSIGN(rintensity);
79  const int slice_start = (height * jobnr) / nb_jobs;
80  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
81  const int glinesize = frame->linesize[0];
82  const int blinesize = frame->linesize[1];
83  const int rlinesize = frame->linesize[2];
84  const int alinesize = frame->linesize[3];
85  const int gslinesize = in->linesize[0];
86  const int bslinesize = in->linesize[1];
87  const int rslinesize = in->linesize[2];
88  const int aslinesize = in->linesize[3];
89  const uint8_t *gsrc = in->data[0] + slice_start * glinesize;
90  const uint8_t *bsrc = in->data[1] + slice_start * blinesize;
91  const uint8_t *rsrc = in->data[2] + slice_start * rlinesize;
92  uint8_t *gptr = frame->data[0] + slice_start * glinesize;
93  uint8_t *bptr = frame->data[1] + slice_start * blinesize;
94  uint8_t *rptr = frame->data[2] + slice_start * rlinesize;
95  const uint8_t *asrc = in->data[3];
96  uint8_t *aptr = frame->data[3];
97 
98  for (int y = slice_start; y < slice_end; y++) {
99  for (int x = 0; x < width; x++) {
100  float g = gsrc[x] * scale;
101  float b = bsrc[x] * scale;
102  float r = rsrc[x] * scale;
103  float max_color = FFMAX3(r, g, b);
104  float min_color = FFMIN3(r, g, b);
105  float color_saturation = max_color - min_color;
106  float luma = g * gc + r * rc + b * bc;
107  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
108  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
109  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
110 
111  g = lerpf(luma, g, cg);
112  b = lerpf(luma, b, cb);
113  r = lerpf(luma, r, cr);
114 
115  gptr[x] = av_clip_uint8(g * 255.f);
116  bptr[x] = av_clip_uint8(b * 255.f);
117  rptr[x] = av_clip_uint8(r * 255.f);
118  }
119 
120  if (aptr && alinesize && frame != in)
121  memcpy(aptr + alinesize * y, asrc + aslinesize * y, width);
122 
123  gsrc += gslinesize;
124  bsrc += bslinesize;
125  rsrc += rslinesize;
126  gptr += glinesize;
127  bptr += blinesize;
128  rptr += rlinesize;
129  }
130 
131  return 0;
132 }
133 
134 static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
135 {
136  VibranceContext *s = avctx->priv;
137  ThreadData *td = arg;
138  AVFrame *frame = td->out;
139  AVFrame *in = td->in;
140  const int depth = s->depth;
141  const float max = (1 << depth) - 1;
142  const float scale = 1.f / max;
143  const float gc = s->lcoeffs[0];
144  const float bc = s->lcoeffs[1];
145  const float rc = s->lcoeffs[2];
146  const int width = frame->width;
147  const int height = frame->height;
148  const float intensity = s->intensity;
149  const float alternate = s->alternate ? 1.f : -1.f;
150  const float gintensity = intensity * s->balance[0];
151  const float bintensity = intensity * s->balance[1];
152  const float rintensity = intensity * s->balance[2];
153  const float sgintensity = alternate * FFSIGN(gintensity);
154  const float sbintensity = alternate * FFSIGN(bintensity);
155  const float srintensity = alternate * FFSIGN(rintensity);
156  const int slice_start = (height * jobnr) / nb_jobs;
157  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
158  const int gslinesize = in->linesize[0] / 2;
159  const int bslinesize = in->linesize[1] / 2;
160  const int rslinesize = in->linesize[2] / 2;
161  const int aslinesize = in->linesize[3] / 2;
162  const int glinesize = frame->linesize[0] / 2;
163  const int blinesize = frame->linesize[1] / 2;
164  const int rlinesize = frame->linesize[2] / 2;
165  const int alinesize = frame->linesize[3] / 2;
166  const uint16_t *gsrc = (const uint16_t *)in->data[0] + slice_start * gslinesize;
167  const uint16_t *bsrc = (const uint16_t *)in->data[1] + slice_start * bslinesize;
168  const uint16_t *rsrc = (const uint16_t *)in->data[2] + slice_start * rslinesize;
169  uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize;
170  uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize;
171  uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize;
172  const uint16_t *asrc = (const uint16_t *)in->data[3];
173  uint16_t *aptr = (uint16_t *)frame->data[3];
174 
175  for (int y = slice_start; y < slice_end; y++) {
176  for (int x = 0; x < width; x++) {
177  float g = gsrc[x] * scale;
178  float b = bsrc[x] * scale;
179  float r = rsrc[x] * scale;
180  float max_color = FFMAX3(r, g, b);
181  float min_color = FFMIN3(r, g, b);
182  float color_saturation = max_color - min_color;
183  float luma = g * gc + r * rc + b * bc;
184  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
185  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
186  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
187 
188  g = lerpf(luma, g, cg);
189  b = lerpf(luma, b, cb);
190  r = lerpf(luma, r, cr);
191 
192  gptr[x] = av_clip_uintp2_c(g * max, depth);
193  bptr[x] = av_clip_uintp2_c(b * max, depth);
194  rptr[x] = av_clip_uintp2_c(r * max, depth);
195  }
196 
197  if (aptr && alinesize && frame != in)
198  memcpy(aptr + alinesize * y, asrc + aslinesize * y, width * 2);
199 
200  gsrc += gslinesize;
201  bsrc += bslinesize;
202  rsrc += rslinesize;
203  gptr += glinesize;
204  bptr += blinesize;
205  rptr += rlinesize;
206  }
207 
208  return 0;
209 }
210 
211 static int vibrance_slice8p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
212 {
213  VibranceContext *s = avctx->priv;
214  ThreadData *td = arg;
215  AVFrame *frame = td->out;
216  AVFrame *in = td->in;
217  const int step = s->step;
218  const int width = frame->width;
219  const int height = frame->height;
220  const float scale = 1.f / 255.f;
221  const float gc = s->lcoeffs[0];
222  const float bc = s->lcoeffs[1];
223  const float rc = s->lcoeffs[2];
224  const uint8_t roffset = s->rgba_map[R];
225  const uint8_t goffset = s->rgba_map[G];
226  const uint8_t boffset = s->rgba_map[B];
227  const uint8_t aoffset = s->rgba_map[A];
228  const float intensity = s->intensity;
229  const float alternate = s->alternate ? 1.f : -1.f;
230  const float gintensity = intensity * s->balance[0];
231  const float bintensity = intensity * s->balance[1];
232  const float rintensity = intensity * s->balance[2];
233  const float sgintensity = alternate * FFSIGN(gintensity);
234  const float sbintensity = alternate * FFSIGN(bintensity);
235  const float srintensity = alternate * FFSIGN(rintensity);
236  const int slice_start = (height * jobnr) / nb_jobs;
237  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
238  const int linesize = frame->linesize[0];
239  const int slinesize = in->linesize[0];
240  const uint8_t *src = in->data[0] + slice_start * slinesize;
241  uint8_t *ptr = frame->data[0] + slice_start * linesize;
242 
243  for (int y = slice_start; y < slice_end; y++) {
244  for (int x = 0; x < width; x++) {
245  float g = src[x * step + goffset] * scale;
246  float b = src[x * step + boffset] * scale;
247  float r = src[x * step + roffset] * scale;
248  float max_color = FFMAX3(r, g, b);
249  float min_color = FFMIN3(r, g, b);
250  float color_saturation = max_color - min_color;
251  float luma = g * gc + r * rc + b * bc;
252  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
253  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
254  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
255 
256  g = lerpf(luma, g, cg);
257  b = lerpf(luma, b, cb);
258  r = lerpf(luma, r, cr);
259 
260  ptr[x * step + goffset] = av_clip_uint8(g * 255.f);
261  ptr[x * step + boffset] = av_clip_uint8(b * 255.f);
262  ptr[x * step + roffset] = av_clip_uint8(r * 255.f);
263 
264  if (frame != in)
265  ptr[x * step + aoffset] = src[x * step + aoffset];
266  }
267 
268  ptr += linesize;
269  src += slinesize;
270  }
271 
272  return 0;
273 }
274 
275 static int vibrance_slice16p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
276 {
277  VibranceContext *s = avctx->priv;
278  ThreadData *td = arg;
279  AVFrame *frame = td->out;
280  AVFrame *in = td->in;
281  const int step = s->step;
282  const int depth = s->depth;
283  const float max = (1 << depth) - 1;
284  const float scale = 1.f / max;
285  const float gc = s->lcoeffs[0];
286  const float bc = s->lcoeffs[1];
287  const float rc = s->lcoeffs[2];
288  const uint8_t roffset = s->rgba_map[R];
289  const uint8_t goffset = s->rgba_map[G];
290  const uint8_t boffset = s->rgba_map[B];
291  const uint8_t aoffset = s->rgba_map[A];
292  const int width = frame->width;
293  const int height = frame->height;
294  const float intensity = s->intensity;
295  const float alternate = s->alternate ? 1.f : -1.f;
296  const float gintensity = intensity * s->balance[0];
297  const float bintensity = intensity * s->balance[1];
298  const float rintensity = intensity * s->balance[2];
299  const float sgintensity = alternate * FFSIGN(gintensity);
300  const float sbintensity = alternate * FFSIGN(bintensity);
301  const float srintensity = alternate * FFSIGN(rintensity);
302  const int slice_start = (height * jobnr) / nb_jobs;
303  const int slice_end = (height * (jobnr + 1)) / nb_jobs;
304  const int linesize = frame->linesize[0] / 2;
305  const int slinesize = in->linesize[0] / 2;
306  const uint16_t *src = (const uint16_t *)in->data[0] + slice_start * slinesize;
307  uint16_t *ptr = (uint16_t *)frame->data[0] + slice_start * linesize;
308 
309  for (int y = slice_start; y < slice_end; y++) {
310  for (int x = 0; x < width; x++) {
311  float g = src[x * step + goffset] * scale;
312  float b = src[x * step + boffset] * scale;
313  float r = src[x * step + roffset] * scale;
314  float max_color = FFMAX3(r, g, b);
315  float min_color = FFMIN3(r, g, b);
316  float color_saturation = max_color - min_color;
317  float luma = g * gc + r * rc + b * bc;
318  const float cg = 1.f + gintensity * (1.f - sgintensity * color_saturation);
319  const float cb = 1.f + bintensity * (1.f - sbintensity * color_saturation);
320  const float cr = 1.f + rintensity * (1.f - srintensity * color_saturation);
321 
322  g = lerpf(luma, g, cg);
323  b = lerpf(luma, b, cb);
324  r = lerpf(luma, r, cr);
325 
326  ptr[x * step + goffset] = av_clip_uintp2_c(g * max, depth);
327  ptr[x * step + boffset] = av_clip_uintp2_c(b * max, depth);
328  ptr[x * step + roffset] = av_clip_uintp2_c(r * max, depth);
329  if (frame != in)
330  ptr[x * step + aoffset] = src[x * step + aoffset];
331  }
332 
333  ptr += linesize;
334  src += slinesize;
335  }
336 
337  return 0;
338 }
339 
341 {
342  AVFilterContext *avctx = link->dst;
343  AVFilterLink *outlink = avctx->outputs[0];
344  VibranceContext *s = avctx->priv;
345  ThreadData td;
346  AVFrame *out;
347  int res;
348 
349  if (av_frame_is_writable(in)) {
350  out = in;
351  } else {
352  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
353  if (!out) {
354  av_frame_free(&in);
355  return AVERROR(ENOMEM);
356  }
358  }
359 
360  td.out = out;
361  td.in = in;
362  if (res = ff_filter_execute(avctx, s->do_slice, &td, NULL,
363  FFMIN(out->height, ff_filter_get_nb_threads(avctx))))
364  return res;
365 
366  if (out != in)
367  av_frame_free(&in);
368  return ff_filter_frame(outlink, out);
369 }
370 
371 static const enum AVPixelFormat pixel_fmts[] = {
384 };
385 
387 {
388  AVFilterContext *avctx = inlink->dst;
389  VibranceContext *s = avctx->priv;
391  int planar = desc->flags & AV_PIX_FMT_FLAG_PLANAR;
392 
393  s->step = desc->nb_components;
394  if (inlink->format == AV_PIX_FMT_RGB0 ||
395  inlink->format == AV_PIX_FMT_0RGB ||
396  inlink->format == AV_PIX_FMT_BGR0 ||
397  inlink->format == AV_PIX_FMT_0BGR)
398  s->step = 4;
399 
400  s->depth = desc->comp[0].depth;
401  s->do_slice = s->depth <= 8 ? vibrance_slice8 : vibrance_slice16;
402  if (!planar)
403  s->do_slice = s->depth <= 8 ? vibrance_slice8p : vibrance_slice16p;
404 
405  ff_fill_rgba_map(s->rgba_map, inlink->format);
406 
407  return 0;
408 }
409 
410 static const AVFilterPad vibrance_inputs[] = {
411  {
412  .name = "default",
413  .type = AVMEDIA_TYPE_VIDEO,
414  .filter_frame = filter_frame,
415  .config_props = config_input,
416  },
417 };
418 
419 static const AVFilterPad vibrance_outputs[] = {
420  {
421  .name = "default",
422  .type = AVMEDIA_TYPE_VIDEO,
423  },
424 };
425 
426 #define OFFSET(x) offsetof(VibranceContext, x)
427 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
428 
429 static const AVOption vibrance_options[] = {
430  { "intensity", "set the intensity value", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0}, -2, 2, VF },
431  { "rbal", "set the red balance value", OFFSET(balance[2]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
432  { "gbal", "set the green balance value", OFFSET(balance[0]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
433  { "bbal", "set the blue balance value", OFFSET(balance[1]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF },
434  { "rlum", "set the red luma coefficient", OFFSET(lcoeffs[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.072186}, 0, 1, VF },
435  { "glum", "set the green luma coefficient", OFFSET(lcoeffs[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.715158}, 0, 1, VF },
436  { "blum", "set the blue luma coefficient", OFFSET(lcoeffs[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.212656}, 0, 1, VF },
437  { "alternate", "use alternate colors", OFFSET(alternate), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
438  { NULL }
439 };
440 
441 AVFILTER_DEFINE_CLASS(vibrance);
442 
444  .name = "vibrance",
445  .description = NULL_IF_CONFIG_SMALL("Boost or alter saturation."),
446  .priv_size = sizeof(VibranceContext),
447  .priv_class = &vibrance_class,
452  .process_command = ff_filter_process_command,
453 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:101
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:481
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_vibrance.c:340
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_vf_vibrance
const AVFilter ff_vf_vibrance
Definition: vf_vibrance.c:443
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:239
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
VibranceContext::alternate
int alternate
Definition: vf_vibrance.c:40
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:174
pixel_fmts
static enum AVPixelFormat pixel_fmts[]
Definition: vf_vibrance.c:371
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:275
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VF
#define VF
Definition: vf_vibrance.c:427
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
vibrance_options
static const AVOption vibrance_options[]
Definition: vf_vibrance.c:429
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:473
VibranceContext::balance
float balance[3]
Definition: vf_vibrance.c:38
video.h
VibranceContext::depth
int depth
Definition: vf_vibrance.c:43
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
VibranceContext::do_slice
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:46
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
formats.h
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:477
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:407
v0
#define v0
Definition: regdef.h:26
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:475
FFSIGN
#define FFSIGN(a)
Definition: common.h:65
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:479
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:480
VibranceContext::intensity
float intensity
Definition: vf_vibrance.c:37
g
const char * g
Definition: vf_curves.c:127
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2006
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(vibrance)
vibrance_inputs
static const AVFilterPad vibrance_inputs[]
Definition: vf_vibrance.c:410
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:67
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:478
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:449
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:450
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
A
#define A
Definition: vf_vibrance.c:32
vibrance_slice8
static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:59
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:258
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:474
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
VibranceContext::rgba_map
uint8_t rgba_map[4]
Definition: vf_vibrance.c:44
lerpf
static float lerpf(float v0, float v1, float f)
Definition: vf_vibrance.c:50
f
f
Definition: af_crystalizer.c:122
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
R
#define R
Definition: vf_vibrance.c:29
AV_PIX_FMT_RGB48
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:445
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:524
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:842
height
#define height
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:256
VibranceContext::lcoeffs
float lcoeffs[3]
Definition: vf_vibrance.c:39
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:142
AV_PIX_FMT_ARGB
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
vibrance_slice16
static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:134
VibranceContext::step
int step
Definition: vf_vibrance.c:42
AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:454
OFFSET
#define OFFSET(x)
Definition: vf_vibrance.c:426
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:476
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:777
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
AVFilter
Filter definition.
Definition: avfilter.h:161
vibrance_slice16p
static int vibrance_slice16p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:275
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:257
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
config_input
static av_cold int config_input(AVFilterLink *inlink)
Definition: vf_vibrance.c:386
vibrance_outputs
static const AVFilterPad vibrance_outputs[]
Definition: vf_vibrance.c:419
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
av_clip_uint8
#define av_clip_uint8
Definition: common.h:101
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
G
#define G
Definition: vf_vibrance.c:30
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:240
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
ff_fill_rgba_map
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
B
#define B
Definition: vf_vibrance.c:31
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:255
VibranceContext
Definition: vf_vibrance.c:34
drawutils.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:146
int
int
Definition: ffmpeg_filter.c:156
vibrance_slice8p
static int vibrance_slice8p(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_vibrance.c:211
AVFilterContext::outputs
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:404