FFmpeg
af_adynamicequalizer.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <float.h>
20 
21 #include "libavutil/opt.h"
22 #include "avfilter.h"
23 #include "audio.h"
24 #include "formats.h"
25 
27  const AVClass *class;
28 
29  double threshold;
30  double dfrequency;
31  double dqfactor;
32  double tfrequency;
33  double tqfactor;
34  double ratio;
35  double range;
36  double makeup;
37  double attack;
38  double release;
39  double attack_coef;
40  double release_coef;
41  int mode;
42  int direction;
43  int detection;
44  int type;
45 
48 
50 {
51  AVFilterContext *ctx = inlink->dst;
53 
54  s->state = ff_get_audio_buffer(inlink, 8);
55  if (!s->state)
56  return AVERROR(ENOMEM);
57 
58  for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) {
59  double *state = (double *)s->state->extended_data[ch];
60 
61  state[4] = 1.;
62  }
63 
64  return 0;
65 }
66 
67 static double get_svf(double in, double *m, double *a, double *b)
68 {
69  const double v0 = in;
70  const double v3 = v0 - b[1];
71  const double v1 = a[0] * b[0] + a[1] * v3;
72  const double v2 = b[1] + a[1] * b[0] + a[2] * v3;
73 
74  b[0] = 2. * v1 - b[0];
75  b[1] = 2. * v2 - b[1];
76 
77  return m[0] * v0 + m[1] * v1 + m[2] * v2;
78 }
79 
80 typedef struct ThreadData {
81  AVFrame *in, *out;
82 } ThreadData;
83 
84 static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
85 {
87  ThreadData *td = arg;
88  AVFrame *in = td->in;
89  AVFrame *out = td->out;
90  const double sample_rate = in->sample_rate;
91  const double makeup = s->makeup;
92  const double ratio = s->ratio;
93  const double range = s->range;
94  const double dfrequency = fmin(s->dfrequency, sample_rate * 0.5);
95  const double tfrequency = fmin(s->tfrequency, sample_rate * 0.5);
96  const double release = s->release_coef;
97  const double irelease = 1. - release;
98  const double attack = s->attack_coef;
99  const double iattack = 1. - attack;
100  const double dqfactor = s->dqfactor;
101  const double tqfactor = s->tqfactor;
102  const double fg = tan(M_PI * tfrequency / sample_rate);
103  const double dg = tan(M_PI * dfrequency / sample_rate);
104  const int start = (in->ch_layout.nb_channels * jobnr) / nb_jobs;
105  const int end = (in->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
106  const int detection = s->detection;
107  const int direction = s->direction;
108  const int mode = s->mode;
109  const int type = s->type;
110  double da[3], dm[3];
111 
112  {
113  double k = 1. / dqfactor;
114 
115  da[0] = 1. / (1. + dg * (dg + k));
116  da[1] = dg * da[0];
117  da[2] = dg * da[1];
118 
119  dm[0] = 0.;
120  dm[1] = k;
121  dm[2] = 0.;
122  }
123 
124  for (int ch = start; ch < end; ch++) {
125  const double *src = (const double *)in->extended_data[ch];
126  double *dst = (double *)out->extended_data[ch];
127  double *state = (double *)s->state->extended_data[ch];
128  const double threshold = detection == 0 ? state[5] : s->threshold;
129 
130  if (detection < 0)
131  state[5] = threshold;
132 
133  for (int n = 0; n < out->nb_samples; n++) {
134  double detect, gain, v, listen;
135  double fa[3], fm[3];
136  double k, g;
137 
138  detect = listen = get_svf(src[n], dm, da, state);
139  detect = fabs(detect);
140 
141  if (detection > 0)
142  state[5] = fmax(state[5], detect);
143 
144  if (direction == 0 && mode == 0 && detect < threshold)
145  detect = 1. / av_clipd(1. + makeup + (threshold - detect) * ratio, 1., range);
146  else if (direction == 0 && mode == 1 && detect < threshold)
147  detect = av_clipd(1. + makeup + (threshold - detect) * ratio, 1., range);
148  else if (direction == 1 && mode == 0 && detect > threshold)
149  detect = 1. / av_clipd(1. + makeup + (detect - threshold) * ratio, 1., range);
150  else if (direction == 1 && mode == 1 && detect > threshold)
151  detect = av_clipd(1. + makeup + (detect - threshold) * ratio, 1., range);
152  else
153  detect = 1.;
154 
155  if (direction == 0) {
156  if (detect > state[4]) {
157  detect = iattack * detect + attack * state[4];
158  } else {
159  detect = irelease * detect + release * state[4];
160  }
161  } else {
162  if (detect < state[4]) {
163  detect = iattack * detect + attack * state[4];
164  } else {
165  detect = irelease * detect + release * state[4];
166  }
167  }
168 
169  if (state[4] != detect || n == 0) {
170  state[4] = gain = detect;
171 
172  switch (type) {
173  case 0:
174  k = 1. / (tqfactor * gain);
175 
176  fa[0] = 1. / (1. + fg * (fg + k));
177  fa[1] = fg * fa[0];
178  fa[2] = fg * fa[1];
179 
180  fm[0] = 1.;
181  fm[1] = k * (gain * gain - 1.);
182  fm[2] = 0.;
183  break;
184  case 1:
185  k = 1. / tqfactor;
186  g = fg / sqrt(gain);
187 
188  fa[0] = 1. / (1. + g * (g + k));
189  fa[1] = g * fa[0];
190  fa[2] = g * fa[1];
191 
192  fm[0] = 1.;
193  fm[1] = k * (gain - 1.);
194  fm[2] = gain * gain - 1.;
195  break;
196  case 2:
197  k = 1. / tqfactor;
198  g = fg / sqrt(gain);
199 
200  fa[0] = 1. / (1. + g * (g + k));
201  fa[1] = g * fa[0];
202  fa[2] = g * fa[1];
203 
204  fm[0] = gain * gain;
205  fm[1] = k * (1. - gain) * gain;
206  fm[2] = 1. - gain * gain;
207  break;
208  }
209  }
210 
211  v = get_svf(src[n], fm, fa, &state[2]);
212  v = mode == -1 ? listen : v;
213  dst[n] = ctx->is_disabled ? src[n] : v;
214  }
215  }
216 
217  return 0;
218 }
219 
220 static double get_coef(double x, double sr)
221 {
222  return exp(-1000. / (x * sr));
223 }
224 
226 {
227  AVFilterContext *ctx = inlink->dst;
228  AVFilterLink *outlink = ctx->outputs[0];
230  ThreadData td;
231  AVFrame *out;
232 
233  if (av_frame_is_writable(in)) {
234  out = in;
235  } else {
236  out = ff_get_audio_buffer(outlink, in->nb_samples);
237  if (!out) {
238  av_frame_free(&in);
239  return AVERROR(ENOMEM);
240  }
242  }
243 
244  s->attack_coef = get_coef(s->attack, in->sample_rate);
245  s->release_coef = get_coef(s->release, in->sample_rate);
246 
247  td.in = in;
248  td.out = out;
251 
252  if (out != in)
253  av_frame_free(&in);
254  return ff_filter_frame(outlink, out);
255 }
256 
258 {
260 
261  av_frame_free(&s->state);
262 }
263 
264 #define OFFSET(x) offsetof(AudioDynamicEqualizerContext, x)
265 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
266 
268  { "threshold", "set detection threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 100, FLAGS },
269  { "dfrequency", "set detection frequency", OFFSET(dfrequency), AV_OPT_TYPE_DOUBLE, {.dbl=1000}, 2, 1000000, FLAGS },
270  { "dqfactor", "set detection Q factor", OFFSET(dqfactor), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.001, 1000, FLAGS },
271  { "tfrequency", "set target frequency", OFFSET(tfrequency), AV_OPT_TYPE_DOUBLE, {.dbl=1000}, 2, 1000000, FLAGS },
272  { "tqfactor", "set target Q factor", OFFSET(tqfactor), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.001, 1000, FLAGS },
273  { "attack", "set attack duration", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 2000, FLAGS },
274  { "release", "set release duration", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=200}, 1, 2000, FLAGS },
275  { "ratio", "set ratio factor", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 30, FLAGS },
276  { "makeup", "set makeup gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 100, FLAGS },
277  { "range", "set max gain", OFFSET(range), AV_OPT_TYPE_DOUBLE, {.dbl=50}, 1, 200, FLAGS },
278  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, -1, 1, FLAGS, "mode" },
279  { "listen", 0, 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "mode" },
280  { "cut", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
281  { "boost", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
282  { "tftype", "set target filter type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "type" },
283  { "bell", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
284  { "lowshelf", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
285  { "highshelf",0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "type" },
286  { "direction", "set direction", OFFSET(direction), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "direction" },
287  { "downward", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "direction" },
288  { "upward", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "direction" },
289  { "auto", "set auto threshold", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=-1}, -1, 1, FLAGS, "auto" },
290  { "disabled", 0, 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "auto" },
291  { "off", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "auto" },
292  { "on", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "auto" },
293  { NULL }
294 };
295 
296 AVFILTER_DEFINE_CLASS(adynamicequalizer);
297 
298 static const AVFilterPad inputs[] = {
299  {
300  .name = "default",
301  .type = AVMEDIA_TYPE_AUDIO,
302  .filter_frame = filter_frame,
303  .config_props = config_input,
304  },
305 };
306 
307 static const AVFilterPad outputs[] = {
308  {
309  .name = "default",
310  .type = AVMEDIA_TYPE_AUDIO,
311  },
312 };
313 
315  .name = "adynamicequalizer",
316  .description = NULL_IF_CONFIG_SMALL("Apply Dynamic Equalization of input audio."),
317  .priv_size = sizeof(AudioDynamicEqualizerContext),
318  .priv_class = &adynamicequalizer_class,
319  .uninit = uninit,
325  .process_command = ff_filter_process_command,
326 };
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:100
td
#define td
Definition: regdef.h:70
AudioDynamicEqualizerContext::dfrequency
double dfrequency
Definition: af_adynamicequalizer.c:30
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioDynamicEqualizerContext::range
double range
Definition: af_adynamicequalizer.c:35
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:183
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ff_af_adynamicequalizer
const AVFilter ff_af_adynamicequalizer
Definition: af_adynamicequalizer.c:314
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AudioDynamicEqualizerContext::makeup
double makeup
Definition: af_adynamicequalizer.c:36
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AudioDynamicEqualizerContext::type
int type
Definition: af_adynamicequalizer.c:44
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_adynamicequalizer.c:49
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
FLAGS
#define FLAGS
Definition: af_adynamicequalizer.c:265
float.h
filter_channels
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_adynamicequalizer.c:84
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
AudioDynamicEqualizerContext::mode
int mode
Definition: af_adynamicequalizer.c:41
AudioDynamicEqualizerContext::state
AVFrame * state
Definition: af_adynamicequalizer.c:46
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:473
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
get_coef
static double get_coef(double x, double sr)
Definition: af_adynamicequalizer.c:220
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
formats.h
AudioDynamicEqualizerContext::detection
int detection
Definition: af_adynamicequalizer.c:43
v0
#define v0
Definition: regdef.h:26
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_adynamicequalizer.c:225
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:714
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_adynamicequalizer.c:257
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
get_svf
static double get_svf(double in, double *m, double *a, double *b)
Definition: af_adynamicequalizer.c:67
AudioDynamicEqualizerContext::release_coef
double release_coef
Definition: af_adynamicequalizer.c:40
av_cold
#define av_cold
Definition: attributes.h:90
adynamicequalizer_options
static const AVOption adynamicequalizer_options[]
Definition: af_adynamicequalizer.c:267
s
#define s(width, name)
Definition: cbs_vp9.c:256
g
const char * g
Definition: vf_curves.c:127
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
AudioDynamicEqualizerContext::tqfactor
double tqfactor
Definition: af_adynamicequalizer.c:33
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(adynamicequalizer)
exp
int8_t exp
Definition: eval.c:72
outputs
static const AVFilterPad outputs[]
Definition: af_adynamicequalizer.c:307
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
OFFSET
#define OFFSET(x)
Definition: af_adynamicequalizer.c:264
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:507
fmin
double fmin(double, double)
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:533
AudioDynamicEqualizerContext
Definition: af_adynamicequalizer.c:26
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:858
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:52
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:410
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:391
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:793
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
AudioDynamicEqualizerContext::ratio
double ratio
Definition: af_adynamicequalizer.c:34
AVFilter
Filter definition.
Definition: avfilter.h:171
AudioDynamicEqualizerContext::attack
double attack
Definition: af_adynamicequalizer.c:37
fmax
double fmax(double, double)
AudioDynamicEqualizerContext::tfrequency
double tfrequency
Definition: af_adynamicequalizer.c:32
mode
mode
Definition: ebur128.h:83
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
AudioDynamicEqualizerContext::dqfactor
double dqfactor
Definition: af_adynamicequalizer.c:31
AudioDynamicEqualizerContext::release
double release
Definition: af_adynamicequalizer.c:38
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:127
audio.h
AudioDynamicEqualizerContext::threshold
double threshold
Definition: af_adynamicequalizer.c:29
AudioDynamicEqualizerContext::attack_coef
double attack_coef
Definition: af_adynamicequalizer.c:39
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:160
inputs
static const AVFilterPad inputs[]
Definition: af_adynamicequalizer.c:298
state
static struct @345 state
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:142
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
av_clipd
av_clipd
Definition: af_crystalizer.c:132
AudioDynamicEqualizerContext::direction
int direction
Definition: af_adynamicequalizer.c:42