Go to the source code of this file.
|
| AVFILTER_DEFINE_CLASS (speechnorm) |
|
static int | get_pi_samples (PeriodItem *pi, int start, int end, int remain) |
|
static int | available_samples (AVFilterContext *ctx) |
|
static void | consume_pi (ChannelContext *cc, int nb_samples) |
|
static double | next_gain (AVFilterContext *ctx, double pi_max_peak, int bypass, double state) |
|
static void | next_pi (AVFilterContext *ctx, ChannelContext *cc, int bypass) |
|
static double | min_gain (AVFilterContext *ctx, ChannelContext *cc, int max_size) |
|
static double | dlerp (double min, double max, double mix) |
|
static float | flerp (float min, float max, float mix) |
|
static int | filter_frame (AVFilterContext *ctx) |
|
static int | activate (AVFilterContext *ctx) |
|
static int | config_input (AVFilterLink *inlink) |
|
static int | process_command (AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags) |
|
static av_cold void | uninit (AVFilterContext *ctx) |
|
Speech Normalizer
Definition in file af_speechnorm.c.
◆ FF_BUFQUEUE_SIZE
#define FF_BUFQUEUE_SIZE (1024) |
◆ MAX_ITEMS
◆ MIN_PEAK
#define MIN_PEAK (1. / 32768.) |
◆ OFFSET
◆ FLAGS
◆ ANALYZE_CHANNEL
#define ANALYZE_CHANNEL |
( |
|
name, |
|
|
|
ptype, |
|
|
|
zero, |
|
|
|
min_peak |
|
) |
| |
◆ FILTER_CHANNELS
#define FILTER_CHANNELS |
( |
|
name, |
|
|
|
ptype |
|
) |
| |
Value:
{ \
SpeechNormalizerContext *
s =
ctx->priv; \
for (
int ch = 0; ch <
inlink->channels; ch++) { \
ChannelContext *cc = &
s->cc[ch]; \
const ptype *
src = (
const ptype *)in->extended_data[ch]; \
ptype *dst = (ptype *)
out->extended_data[ch]; \
int n = 0; \
\
while (n < nb_samples) { \
ptype gain; \
size =
FFMIN(nb_samples - n, cc->pi_size); \
gain = cc->gain_state; \
for (
int i = n; !
ctx->is_disabled &&
i < n +
size;
i++) \
dst[
i] =
src[
i] * gain; \
} \
} \
}
Definition at line 279 of file af_speechnorm.c.
◆ FILTER_LINK_CHANNELS
#define FILTER_LINK_CHANNELS |
( |
|
name, |
|
|
|
ptype, |
|
|
|
tlerp |
|
) |
| |
◆ AVFILTER_DEFINE_CLASS()
AVFILTER_DEFINE_CLASS |
( |
speechnorm |
| ) |
|
◆ get_pi_samples()
◆ available_samples()
◆ consume_pi()
◆ next_gain()
static double next_gain |
( |
AVFilterContext * |
ctx, |
|
|
double |
pi_max_peak, |
|
|
int |
bypass, |
|
|
double |
state |
|
) |
| |
|
static |
◆ next_pi()
◆ min_gain()
◆ dlerp()
static double dlerp |
( |
double |
min, |
|
|
double |
max, |
|
|
double |
mix |
|
) |
| |
|
static |
◆ flerp()
static float flerp |
( |
float |
min, |
|
|
float |
max, |
|
|
float |
mix |
|
) |
| |
|
static |
◆ filter_frame()
◆ activate()
◆ config_input()
◆ process_command()
static int process_command |
( |
AVFilterContext * |
ctx, |
|
|
const char * |
cmd, |
|
|
const char * |
args, |
|
|
char * |
res, |
|
|
int |
res_len, |
|
|
int |
flags |
|
) |
| |
|
static |
◆ uninit()
◆ speechnorm_options
◆ inputs
Initial value:= {
{
.name = "default",
},
}
Definition at line 542 of file af_speechnorm.c.
◆ outputs
Initial value:= {
{
.name = "default",
},
}
Definition at line 550 of file af_speechnorm.c.
◆ ff_af_speechnorm
Initial value:= {
.name = "speechnorm",
.priv_class = &speechnorm_class,
}
Definition at line 557 of file af_speechnorm.c.
static const AVFilterPad inputs[]
@ AV_SAMPLE_FMT_FLTP
float, planar
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
This structure describes decoded (raw) audio or video data.
static const AVFilterPad outputs[]
#define FILTER_INPUTS(array)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AV_OPT_TYPE_CHANNEL_LAYOUT
#define i(width, name, range_min, range_max)
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
static void invert(float *h, int n)
static int activate(AVFilterContext *ctx)
static av_cold void uninit(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
static void next_pi(AVFilterContext *ctx, ChannelContext *cc, int bypass)
@ AV_SAMPLE_FMT_DBLP
double, planar
#define FILTER_OUTPUTS(array)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
#define FILTER_SAMPLEFMTS(...)