Go to the documentation of this file.
157 #define YAE_ATEMPO_MIN 0.5
158 #define YAE_ATEMPO_MAX 100.0
160 #define OFFSET(x) offsetof(ATempoContext, x)
163 {
"tempo",
"set tempo scale factor",
180 return &atempo->
frag[(atempo->
nfrag + 1) % 2];
257 uint32_t nlevels = 0;
258 float scale = 1.f, iscale = 1.f;
268 atempo->
window = sample_rate / 24;
275 if (pot < atempo->
window) {
328 double h = 0.5 * (1.0 - cos(2.0 *
M_PI * t));
353 #define yae_init_xdat(scalar_type, scalar_max) \
355 const uint8_t *src_end = src + \
356 frag->nsamples * atempo->channels * sizeof(scalar_type); \
358 float *xdat = frag->xdat_in; \
361 if (atempo->channels == 1) { \
362 for (; src < src_end; xdat++) { \
363 tmp = *(const scalar_type *)src; \
364 src += sizeof(scalar_type); \
366 *xdat = (float)tmp; \
369 float s, max, ti, si; \
372 for (; src < src_end; xdat++) { \
373 tmp = *(const scalar_type *)src; \
374 src += sizeof(scalar_type); \
377 s = FFMIN((float)scalar_max, \
378 (float)fabsf(max)); \
380 for (i = 1; i < atempo->channels; i++) { \
381 tmp = *(const scalar_type *)src; \
382 src += sizeof(scalar_type); \
385 si = FFMIN((float)scalar_max, \
406 const uint8_t *
src = frag->
data;
432 const uint8_t **src_ref,
433 const uint8_t *src_end,
437 const uint8_t *
src = *src_ref;
438 const int read_size = stop_here - atempo->
position[0];
440 if (stop_here <= atempo->position[0]) {
447 while (atempo->
position[0] < stop_here &&
src < src_end) {
448 int src_samples = (src_end -
src) / atempo->
stride;
451 int nsamples =
FFMIN(read_size, src_samples);
455 nsamples =
FFMIN(nsamples, atempo->
ring);
507 const uint8_t **src_ref,
508 const uint8_t *src_end)
515 const uint8_t *
a, *
b;
516 int i0, i1, n0, n1, na, nb;
519 if (src_ref &&
yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
526 stop_here - atempo->
position[0] : 0;
530 (uint32_t)(atempo->
window - missing) : 0;
543 if (zeros == nsamples) {
565 i0 = frag->
position[0] + zeros - start;
566 i1 = i0 < na ? 0 : i0 - na;
568 n0 = i0 < na ?
FFMIN(na - i0, (
int)(nsamples - zeros)) : 0;
569 n1 = nsamples - zeros - n0;
619 for (
i = 0;
i <=
window;
i++, xa++, xb++, xc++) {
620 xc->
re = (xa->re * xb->re + xa->im * xb->im);
621 xc->
im = (xa->im * xb->re - xa->re * xb->im);
625 c2r_fn(complex_to_real, xcorr, xcorr_in,
sizeof(*xc));
639 float *correlation_in,
644 int best_offset = -drift;
645 float best_metric = -FLT_MAX;
670 for (
i = i0;
i < i1;
i++, xcorr++) {
671 float metric = *xcorr;
674 float drifti = (
float)(drift +
i);
677 if (metric > best_metric) {
678 best_metric = metric;
697 const double prev_output_position =
701 const double ideal_output_position =
704 const int drift = (int)(prev_output_position - ideal_output_position);
706 const int delta_max = atempo->
window / 2;
732 #define yae_blend(scalar_type) \
734 const scalar_type *aaa = (const scalar_type *)a; \
735 const scalar_type *bbb = (const scalar_type *)b; \
737 scalar_type *out = (scalar_type *)dst; \
738 scalar_type *out_end = (scalar_type *)dst_end; \
741 for (i = 0; i < overlap && out < out_end; \
742 i++, atempo->position[1]++, wa++, wb++) { \
747 for (j = 0; j < atempo->channels; \
748 j++, aaa++, bbb++, out++) { \
749 float t0 = (float)*aaa; \
750 float t1 = (float)*bbb; \
753 frag->position[0] + i < 0 ? \
755 (scalar_type)(t0 * w0 + t1 * w1); \
758 dst = (uint8_t *)out; \
783 const int64_t overlap = stop_here - start_here;
788 const float *wa = atempo->
hann + ia;
789 const float *wb = atempo->
hann +
ib;
791 const uint8_t *
a = prev->
data + ia * atempo->
stride;
794 uint8_t *
dst = *dst_ref;
798 overlap <= frag->nsamples);
825 const uint8_t **src_ref,
826 const uint8_t *src_end,
844 if (!atempo->
nfrag) {
917 if (!atempo->
nfrag) {
951 while (atempo->
position[1] < overlap_end) {
967 av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
970 dst = (uint8_t *)*dst_ref;
972 src_size = (int)(stop_here - start_here) * atempo->
stride;
973 dst_size = dst_end -
dst;
974 nbytes =
FFMIN(src_size, dst_size);
982 *dst_ref = (uint8_t *)
dst;
1022 int sample_rate = (int)
inlink->sample_rate;
1061 int n_out = (int)(0.5 + ((
double)n_in) / atempo->
tempo);
1063 const uint8_t *
src = src_buffer->
data[0];
1064 const uint8_t *src_end =
src + n_in * atempo->
stride;
1071 while (
src < src_end) {
1111 int n_max = atempo->
ring;
1115 while (err ==
AVERROR(EAGAIN)) {
1186 .priv_class = &atempo_class,
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
static int yae_update(AVFilterContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int push_samples(ATempoContext *atempo, AVFilterLink *outlink, int n_out)
static int config_props(AVFilterLink *inlink)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
#define AVERROR_EOF
End of file.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
Initialize complex data buffer of a given audio fragment with down-mixed mono data of appropriate sca...
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define FILTER_INPUTS(array)
static int yae_load_data(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, int64_t stop_here)
Populate the internal data buffer on as-needed basis.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
const char * name
Filter name.
A link between two filters.
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
static av_cold int init(AVFilterContext *ctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static SDL_Window * window
AVTXContext * complex_to_real
static const AVFilterPad atempo_outputs[]
A filter pad used for either input or output.
#define AV_OPT_FLAG_AUDIO_PARAM
static void yae_apply(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, uint8_t **dst_ref, uint8_t *dst_end)
Feed as much data to the filter as it is able to consume and receive as much processed data in the de...
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define yae_init_xdat(scalar_type, scalar_max)
A helper macro for initializing complex data buffer with scalar data of a given type.
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
const AVFilter ff_af_atempo
#define FILTER_OUTPUTS(array)
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Rational number (pair of numerator and denominator).
AVFILTER_DEFINE_CLASS(atempo)
static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
static void yae_xcorr_via_rdft(float *xcorr_in, float *xcorr, AVTXContext *complex_to_real, av_tx_fn c2r_fn, const AVComplexFloat *xa, const AVComplexFloat *xb, const int window)
Calculate cross-correlation via rDFT.
static AudioFragment * yae_curr_frag(ATempoContext *atempo)
static int yae_reset(ATempoContext *atempo, enum AVSampleFormat format, int sample_rate, int channels)
Prepare filter for processing audio data of given format, sample rate and number of channels.
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
static int yae_overlap_add(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Blend the overlap region of previous and current audio fragment and output the results to the given d...
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
static int yae_load_frag(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end)
Populate current audio fragment data buffer.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static AudioFragment * yae_prev_frag(ATempoContext *atempo)
int sample_rate
Sample rate of the audio data.
#define AV_NOPTS_VALUE
Undefined timestamp value.
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define yae_blend(scalar_type)
A helper macro for blending the overlap region of previous and current audio fragment.
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
int sample_rate
samples per second
int nb_samples
number of audio samples (per channel) described by this frame
#define i(width, name, range_min, range_max)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
static void yae_advance_to_next_frag(ATempoContext *atempo)
Prepare for loading next audio fragment.
#define av_malloc_array(a, b)
static enum AVSampleFormat sample_fmts[]
static void yae_clear(ATempoContext *atempo)
Reset filter to initial state, do not deallocate existing local buffers.
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
#define FILTER_SAMPLEFMTS_ARRAY(array)
const char * name
Pad name.
void * av_calloc(size_t nmemb, size_t size)
static const AVFilterPad atempo_inputs[]
static int yae_flush(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Flush any buffered data from the filter.
static const AVOption atempo_options[]
static int request_frame(AVFilterLink *outlink)
enum AVSampleFormat format
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
AVTXContext * real_to_complex
static int yae_adjust_position(ATempoContext *atempo)
Adjust current fragment position for better alignment with previous fragment.
#define AV_OPT_FLAG_RUNTIME_PARAM
A generic parameter which can be set by the user at runtime.
FilterState
Filter state machine states.
static void correlation(int32_t *corr, int32_t *ener, const int16_t *buffer, int16_t lag, int16_t blen, int16_t srange, int16_t scale)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int yae_align(AudioFragment *frag, const AudioFragment *prev, const int window, const int delta_max, const int drift, float *correlation_in, float *correlation, AVTXContext *complex_to_real, av_tx_fn c2r_fn)
Calculate alignment offset for given fragment relative to the previous fragment.
static void scale(int *out, const int *in, const int w, const int h, const int shift)
static void yae_release_buffers(ATempoContext *atempo)
Reset filter to initial state and deallocate all buffers.
static av_cold void uninit(AVFilterContext *ctx)
#define flags(name, subs,...)
@ AV_SAMPLE_FMT_DBL
double
@ AV_SAMPLE_FMT_S32
signed 32 bits
A fragment of audio waveform.