Go to the documentation of this file.
70 #define OFFSET(x) offsetof(ShowFreqsContext, x)
71 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
113 {
"colors",
"set channels colors",
OFFSET(colors),
AV_OPT_TYPE_STRING, {.str =
"red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0,
FLAGS },
172 s->nb_freq = 1 << (
s->fft_bits - 1);
173 s->win_size =
s->nb_freq << 1;
179 "The window size might be too high.\n");
186 for (
i = 0;
i <
s->nb_channels;
i++) {
192 s->nb_channels =
inlink->channels;
194 s->fft_data =
av_calloc(
s->nb_channels,
sizeof(*
s->fft_data));
197 s->avg_data =
av_calloc(
s->nb_channels,
sizeof(*
s->avg_data));
200 for (
i = 0;
i <
s->nb_channels;
i++) {
201 s->fft_data[
i] =
av_calloc(
s->win_size,
sizeof(**
s->fft_data));
202 s->avg_data[
i] =
av_calloc(
s->nb_freq,
sizeof(**
s->avg_data));
203 if (!
s->fft_data[
i] || !
s->avg_data[
i])
209 sizeof(*
s->window_func_lut));
210 if (!
s->window_func_lut)
213 if (
s->overlap == 1.)
214 s->overlap = overlap;
215 s->hop_size = (1. -
s->overlap) *
s->win_size;
216 if (
s->hop_size < 1) {
221 for (
s->scale = 0,
i = 0; i < s->win_size;
i++) {
222 s->scale +=
s->window_func_lut[
i] *
s->window_func_lut[
i];
241 if ((
color & 0xffffff) != 0)
251 return (
s->w/(
float)
s->nb_freq)*
f;
253 return s->w-pow(
s->w, (
s->nb_freq-
f-1)/(
s->nb_freq-1.));
255 return pow(
s->w,
f/(
s->nb_freq-1.));
265 return s->w/(float)
s->nb_freq;
267 return pow(
s->w, (
s->nb_freq-
f-1)/(
s->nb_freq-1.))-
268 pow(
s->w, (
s->nb_freq-
f-2)/(
s->nb_freq-1.));
270 return pow(
s->w, (
f+1)/(
s->nb_freq-1.))-
271 pow(
s->w,
f /(
s->nb_freq-1.));
278 double a,
int f,
uint8_t fg[4],
int *prev_y,
282 const float min =
s->minamp;
283 const float avg =
s->avg_data[ch][
f];
286 int end = outlink->
h;
297 a = log(av_clipd(
a,
min, 1)) / log(
min);
306 y =
a * outlink->
h - 1;
309 end = (outlink->
h /
s->nb_channels) * (ch + 1);
310 y = (outlink->
h /
s->nb_channels) * ch +
a * (outlink->
h /
s->nb_channels) - 1;
326 y =
s->avg_data[ch][
f];
336 for (x = sx + 1; x < sx + bsize && x <
w; x++)
338 for (
i = y;
i <= *prev_y;
i++)
341 for (
i = *prev_y;
i <= y;
i++)
343 for (x = sx + 1; x < sx + bsize && x <
w; x++)
349 for (x = sx; x < sx + bsize && x <
w; x++)
354 for (x = sx; x < sx + bsize && x <
w; x++)
365 const int win_size =
s->win_size;
374 for (n = 0; n < outlink->
h; n++)
375 memset(
out->data[0] +
out->linesize[0] * n, 0, outlink->
w * 4);
378 for (ch = 0; ch <
s->nb_channels; ch++) {
379 const float *p = (
float *)
in->extended_data[ch];
381 for (n = 0; n <
in->nb_samples; n++) {
382 s->fft_data[ch][n].re = p[n] *
s->window_func_lut[n];
383 s->fft_data[ch][n].im = 0;
385 for (; n < win_size; n++) {
386 s->fft_data[ch][n].re = 0;
387 s->fft_data[ch][n].im = 0;
392 for (ch = 0; ch <
s->nb_channels; ch++) {
397 #define RE(x, ch) s->fft_data[ch][x].re
398 #define IM(x, ch) s->fft_data[ch][x].im
399 #define M(a, b) (sqrt((a) * (a) + (b) * (b)))
407 for (ch = 0; ch <
s->nb_channels; ch++) {
408 uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
416 a = av_clipd(
M(
RE(0, ch), 0) /
s->scale, 0, 1);
419 for (
f = 1;
f <
s->nb_freq;
f++) {
420 a = av_clipd(
M(
RE(
f, ch),
IM(
f, ch)) /
s->scale, 0, 1);
446 s->pts +=
s->hop_size;
499 for (
i = 0;
i <
s->nb_channels;
i++) {
538 .priv_class = &showfreqs_class,
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
av_cold void av_fft_end(FFTContext *s)
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
@ AV_SAMPLE_FMT_FLTP
float, planar
A list of supported channel layouts.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFilter ff_avf_showfreqs
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
enum MovChannelLayoutTag * layouts
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
AVFilterFormats * in_formats
Lists of formats and channel layouts supported by the input and output filters respectively.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static void draw_dot(AVFrame *out, int x, int y, uint8_t fg[4])
static const AVFilterPad showfreqs_inputs[]
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
static int query_formats(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Context for an Audio FIFO Buffer.
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples)
Drain data from an AVAudioFifo.
A filter pad used for either input or output.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int config_output(AVFilterLink *outlink)
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples)
Write data to an AVAudioFifo.
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const AVFilterPad outputs[]
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
static int filter_frame(AVFilterLink *inlink)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
#define av_realloc_f(p, o, n)
Describe the class of an AVClass context structure.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Rational number (pair of numerator and denominator).
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
static av_cold int init(AVFilterContext *ctx)
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int activate(AVFilterContext *ctx)
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
static AVRational av_make_q(int num, int den)
Create an AVRational.
#define AV_NOPTS_VALUE
Undefined timestamp value.
static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
AVFilterContext * src
source filter
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
FF_FILTER_FORWARD_WANTED(outlink, inlink)
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
int w
agreed upon image width
uint8_t ** extended_data
pointers to the data planes/channels.
AVSampleFormat
Audio sample formats.
static int get_sx(ShowFreqsContext *s, int f)
const char * name
Pad name.
static void plot_freq(ShowFreqsContext *s, int ch, double a, int f, uint8_t fg[4], int *prev_y, AVFrame *out, AVFilterLink *outlink)
static const AVFilterPad showfreqs_outputs[]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
static const AVOption showfreqs_options[]
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
int h
agreed upon image height
static av_cold void uninit(AVFilterContext *ctx)
static float get_bsize(ShowFreqsContext *s, int f)
char * av_strdup(const char *s)
Duplicate a string.
int64_t frame_count_in
Number of past frames sent through the link.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
AVFILTER_DEFINE_CLASS(showfreqs)
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples)
Peek data from an AVAudioFifo.
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().