Go to the documentation of this file.
35 #define FF_BUFQUEUE_SIZE (1024)
42 #define MAX_ITEMS 882000
43 #define MIN_PEAK (1. / 32768.)
89 const uint8_t *srcp,
int nb_samples);
94 #define OFFSET(x) offsetof(SpeechNormalizerContext, x)
95 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
127 if (pi[start].
type == 0)
131 while (start != end) {
135 if (pi[start].
type == 0)
138 sum += pi[start].
size;
148 int min_pi_nb_samples;
150 min_pi_nb_samples =
get_pi_samples(
s->cc[0].pi,
s->cc[0].pi_start,
s->cc[0].pi_end,
s->cc[0].pi_size);
151 for (
int ch = 1; ch <
inlink->ch_layout.nb_channels && min_pi_nb_samples > 0; ch++) {
157 return min_pi_nb_samples;
162 if (cc->
pi_size >= nb_samples) {
170 double pi_rms_sum,
int pi_size)
173 const double compression = 1. /
s->max_compression;
174 const int type =
s->invert ? pi_max_peak <=
s->threshold_value : pi_max_peak >=
s->threshold_value;
175 double expansion =
FFMIN(
s->max_expansion,
s->peak_value / pi_max_peak);
177 if (
s->rms_value > DBL_EPSILON)
178 expansion =
FFMIN(expansion,
s->rms_value / sqrt(pi_rms_sum / pi_size));
220 while (
size <= max_size) {
235 #define ANALYZE_CHANNEL(name, ptype, zero, min_peak) \
236 static void analyze_channel_## name (AVFilterContext *ctx, ChannelContext *cc, \
237 const uint8_t *srcp, int nb_samples) \
239 SpeechNormalizerContext *s = ctx->priv; \
240 const ptype *src = (const ptype *)srcp; \
241 const int max_period = s->max_period; \
242 PeriodItem *pi = (PeriodItem *)&cc->pi; \
243 int pi_end = cc->pi_end; \
247 cc->state = src[0] >= zero; \
249 while (n < nb_samples) { \
250 ptype new_max_peak; \
254 if ((cc->state != (src[n] >= zero)) || \
255 (pi[pi_end].size > max_period)) { \
256 ptype max_peak = pi[pi_end].max_peak; \
257 ptype rms_sum = pi[pi_end].rms_sum; \
258 int state = cc->state; \
260 cc->state = src[n] >= zero; \
261 av_assert1(pi[pi_end].size > 0); \
262 if (max_peak >= min_peak || \
263 pi[pi_end].size > max_period) { \
264 pi[pi_end].type = 1; \
266 if (pi_end >= MAX_ITEMS) \
268 if (cc->state != state) { \
269 pi[pi_end].max_peak = DBL_MIN; \
270 pi[pi_end].rms_sum = 0.0; \
272 pi[pi_end].max_peak = max_peak; \
273 pi[pi_end].rms_sum = rms_sum; \
275 pi[pi_end].type = 0; \
276 pi[pi_end].size = 0; \
277 av_assert1(pi_end != cc->pi_start); \
281 new_max_peak = pi[pi_end].max_peak; \
282 new_rms_sum = pi[pi_end].rms_sum; \
283 new_size = pi[pi_end].size; \
285 while (src[n] >= zero) { \
286 new_max_peak = FFMAX(new_max_peak, src[n]); \
287 new_rms_sum += src[n] * src[n]; \
290 if (n >= nb_samples) \
294 while (src[n] < zero) { \
295 new_max_peak = FFMAX(new_max_peak, -src[n]); \
296 new_rms_sum += src[n] * src[n]; \
299 if (n >= nb_samples) \
304 pi[pi_end].max_peak = new_max_peak; \
305 pi[pi_end].rms_sum = new_rms_sum; \
306 pi[pi_end].size = new_size; \
308 cc->pi_end = pi_end; \
314 #define FILTER_CHANNELS(name, ptype) \
315 static void filter_channels_## name (AVFilterContext *ctx, \
316 AVFrame *in, AVFrame *out, int nb_samples) \
318 SpeechNormalizerContext *s = ctx->priv; \
319 AVFilterLink *inlink = ctx->inputs[0]; \
321 for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) { \
322 ChannelContext *cc = &s->cc[ch]; \
323 const ptype *src = (const ptype *)in->extended_data[ch]; \
324 ptype *dst = (ptype *)out->extended_data[ch]; \
325 enum AVChannel channel = av_channel_layout_channel_from_index(&inlink->ch_layout, ch); \
326 const int bypass = av_channel_layout_index_from_channel(&s->ch_layout, channel) < 0; \
329 while (n < nb_samples) { \
333 next_pi(ctx, cc, bypass); \
334 size = FFMIN(nb_samples - n, cc->pi_size); \
335 av_assert1(size > 0); \
336 gain = cc->gain_state; \
337 consume_pi(cc, size); \
338 for (int i = n; !ctx->is_disabled && i < n + size; i++) \
339 dst[i] = src[i] * gain; \
358 #define FILTER_LINK_CHANNELS(name, ptype, tlerp) \
359 static void filter_link_channels_## name (AVFilterContext *ctx, \
360 AVFrame *in, AVFrame *out, \
363 SpeechNormalizerContext *s = ctx->priv; \
364 AVFilterLink *inlink = ctx->inputs[0]; \
367 while (n < nb_samples) { \
368 int min_size = nb_samples - n; \
369 ptype gain = s->max_expansion; \
371 for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) { \
372 ChannelContext *cc = &s->cc[ch]; \
374 enum AVChannel channel = av_channel_layout_channel_from_index(&inlink->ch_layout, ch); \
375 cc->bypass = av_channel_layout_index_from_channel(&s->ch_layout, channel) < 0; \
377 next_pi(ctx, cc, cc->bypass); \
378 min_size = FFMIN(min_size, cc->pi_size); \
381 av_assert1(min_size > 0); \
382 for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) { \
383 ChannelContext *cc = &s->cc[ch]; \
387 gain = FFMIN(gain, min_gain(ctx, cc, min_size)); \
390 for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) { \
391 ChannelContext *cc = &s->cc[ch]; \
392 const ptype *src = (const ptype *)in->extended_data[ch]; \
393 ptype *dst = (ptype *)out->extended_data[ch]; \
395 consume_pi(cc, min_size); \
399 for (int i = n; !ctx->is_disabled && i < n + min_size; i++) { \
400 ptype g = tlerp(s->prev_gain, gain, (i - n) / (ptype)min_size); \
401 dst[i] = src[i] * g; \
405 s->prev_gain = gain; \
420 while (
s->queue.available > 0) {
421 int min_pi_nb_samples;
429 if (min_pi_nb_samples < in->nb_samples && !
s->eof)
445 s->filter_channels[
s->link](
ctx, in,
out, in->nb_samples);
466 for (
int ch = 0; ch <
inlink->ch_layout.nb_channels; ch++) {
487 if (strcmp(
s->ch_layout_str,
"all"))
503 s->queue.available == 0) {
508 if (
s->queue.available > 0) {
528 s->max_period =
inlink->sample_rate / 10;
535 for (
int ch = 0; ch <
inlink->ch_layout.nb_channels; ch++) {
544 s->analyze_channel = analyze_channel_flt;
545 s->filter_channels[0] = filter_channels_flt;
546 s->filter_channels[1] = filter_link_channels_flt;
549 s->analyze_channel = analyze_channel_dbl;
550 s->filter_channels[0] = filter_channels_dbl;
551 s->filter_channels[1] = filter_link_channels_dbl;
561 char *res,
int res_len,
int flags)
594 .
name =
"speechnorm",
597 .priv_class = &speechnorm_class,
static const AVFilterPad inputs[]
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
@ AV_SAMPLE_FMT_FLTP
float, planar
static int get_pi_samples(PeriodItem *pi, int start, int end, int remain)
static int mix(int c0, int c1)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void consume_pi(ChannelContext *cc, int nb_samples)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
#define AVERROR_EOF
End of file.
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
AVChannelLayout ch_layout
void(* filter_channels[2])(AVFilterContext *ctx, AVFrame *in, AVFrame *out, int nb_samples)
const char * name
Filter name.
static double next_gain(AVFilterContext *ctx, double pi_max_peak, int bypass, double state, double pi_rms_sum, int pi_size)
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
static double dlerp(double min, double max, double mix)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
AVFILTER_DEFINE_CLASS(speechnorm)
A filter pad used for either input or output.
#define FILTER_SAMPLEFMTS(...)
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
static int available_samples(AVFilterContext *ctx)
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
#define FILTER_OUTPUTS(array)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static double min_gain(AVFilterContext *ctx, ChannelContext *cc, int max_size)
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
An AVChannelLayout holds information about the channel layout of audio data.
static AVRational av_make_q(int num, int den)
Create an AVRational.
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
#define FILTER_CHANNELS(name, ptype)
int sample_rate
samples per second
int av_channel_layout_from_string(AVChannelLayout *channel_layout, const char *str)
Initialize a channel layout from a given string description.
int nb_samples
number of audio samples (per channel) described by this frame
static AVFrame * ff_bufqueue_peek(struct FFBufQueue *queue, unsigned index)
Get a buffer from the queue without altering it.
Structure holding the queue.
static void invert(float *h, int n)
uint8_t ** extended_data
pointers to the data planes/channels.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
const char * name
Pad name.
int ff_inlink_queued_samples(AVFilterLink *link)
#define ANALYZE_CHANNEL(name, ptype, zero, min_peak)
void * av_calloc(size_t nmemb, size_t size)
static int activate(AVFilterContext *ctx)
static av_cold void uninit(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
static void next_pi(AVFilterContext *ctx, ChannelContext *cc, int bypass)
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
@ AV_SAMPLE_FMT_DBLP
double, planar
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
static int filter_frame(AVFilterContext *ctx)
#define FILTER_LINK_CHANNELS(name, ptype, tlerp)
static float flerp(float min, float max, float mix)
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
#define flags(name, subs,...)
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
static const AVOption speechnorm_options[]
void(* analyze_channel)(AVFilterContext *ctx, ChannelContext *cc, const uint8_t *srcp, int nb_samples)
const AVFilter ff_af_speechnorm
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.