Go to the documentation of this file.
158 #define YAE_ATEMPO_MIN 0.5
159 #define YAE_ATEMPO_MAX 100.0
161 #define OFFSET(x) offsetof(ATempoContext, x)
164 {
"tempo",
"set tempo scale factor",
181 return &atempo->
frag[(atempo->
nfrag + 1) % 2];
250 #define RE_MALLOC_OR_FAIL(field, field_size, element_size) \
253 field = av_calloc(field_size, element_size); \
255 yae_release_buffers(atempo); \
256 return AVERROR(ENOMEM); \
270 uint32_t nlevels = 0;
271 float scale = 1.f, iscale = 1.f;
287 if (pot < atempo->
window) {
327 double h = 0.5 * (1.0 - cos(2.0 *
M_PI * t));
350 #define yae_init_xdat(scalar_type, scalar_max) \
352 const uint8_t *src_end = src + \
353 frag->nsamples * atempo->channels * sizeof(scalar_type); \
355 float *xdat = frag->xdat_in; \
358 if (atempo->channels == 1) { \
359 for (; src < src_end; xdat++) { \
360 tmp = *(const scalar_type *)src; \
361 src += sizeof(scalar_type); \
363 *xdat = (float)tmp; \
366 float s, max, ti, si; \
369 for (; src < src_end; xdat++) { \
370 tmp = *(const scalar_type *)src; \
371 src += sizeof(scalar_type); \
374 s = FFMIN((float)scalar_max, \
375 (float)fabsf(max)); \
377 for (i = 1; i < atempo->channels; i++) { \
378 tmp = *(const scalar_type *)src; \
379 src += sizeof(scalar_type); \
382 si = FFMIN((float)scalar_max, \
403 const uint8_t *
src = frag->
data;
429 const uint8_t **src_ref,
430 const uint8_t *src_end,
434 const uint8_t *
src = *src_ref;
435 const int read_size = stop_here - atempo->
position[0];
437 if (stop_here <= atempo->position[0]) {
444 while (atempo->
position[0] < stop_here &&
src < src_end) {
445 int src_samples = (src_end -
src) / atempo->
stride;
448 int nsamples =
FFMIN(read_size, src_samples);
452 nsamples =
FFMIN(nsamples, atempo->
ring);
504 const uint8_t **src_ref,
505 const uint8_t *src_end)
510 int64_t missing, start, zeros;
512 const uint8_t *
a, *
b;
513 int i0, i1, n0, n1, na, nb;
516 if (src_ref &&
yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
523 stop_here - atempo->
position[0] : 0;
526 missing < (int64_t)atempo->
window ?
527 (uint32_t)(atempo->
window - missing) : 0;
541 memset(dst, 0, zeros * atempo->
stride);
542 dst += zeros * atempo->
stride;
545 if (zeros == nsamples) {
562 i0 = frag->
position[0] + zeros - start;
563 i1 = i0 < na ? 0 : i0 - na;
565 n0 = i0 < na ?
FFMIN(na - i0, (
int)(nsamples - zeros)) : 0;
566 n1 = nsamples - zeros - n0;
570 dst += n0 * atempo->
stride;
616 for (
i = 0;
i <=
window;
i++, xa++, xb++, xc++) {
617 xc->
re = (xa->re * xb->re + xa->im * xb->im);
618 xc->
im = (xa->im * xb->re - xa->re * xb->im);
622 c2r_fn(complex_to_real, xcorr, xcorr_in,
sizeof(*xc));
636 float *correlation_in,
641 int best_offset = -drift;
642 float best_metric = -FLT_MAX;
667 for (
i = i0;
i < i1;
i++, xcorr++) {
668 float metric = *xcorr;
671 float drifti = (
float)(drift +
i);
674 if (metric > best_metric) {
675 best_metric = metric;
694 const double prev_output_position =
698 const double ideal_output_position =
701 const int drift = (
int)(prev_output_position - ideal_output_position);
703 const int delta_max = atempo->
window / 2;
729 #define yae_blend(scalar_type) \
731 const scalar_type *aaa = (const scalar_type *)a; \
732 const scalar_type *bbb = (const scalar_type *)b; \
734 scalar_type *out = (scalar_type *)dst; \
735 scalar_type *out_end = (scalar_type *)dst_end; \
738 for (i = 0; i < overlap && out < out_end; \
739 i++, atempo->position[1]++, wa++, wb++) { \
744 for (j = 0; j < atempo->channels; \
745 j++, aaa++, bbb++, out++) { \
746 float t0 = (float)*aaa; \
747 float t1 = (float)*bbb; \
750 frag->position[0] + i < 0 ? \
752 (scalar_type)(t0 * w0 + t1 * w1); \
755 dst = (uint8_t *)out; \
780 const int64_t overlap = stop_here - start_here;
782 const int64_t ia = start_here - prev->
position[1];
783 const int64_t
ib = start_here - frag->
position[1];
785 const float *wa = atempo->
hann + ia;
786 const float *wb = atempo->
hann +
ib;
788 const uint8_t *
a = prev->
data + ia * atempo->
stride;
791 uint8_t *dst = *dst_ref;
795 overlap <= frag->nsamples);
822 const uint8_t **src_ref,
823 const uint8_t *src_end,
841 if (!atempo->
nfrag) {
914 if (!atempo->
nfrag) {
948 while (atempo->
position[1] < overlap_end) {
964 av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
967 dst = (uint8_t *)*dst_ref;
969 src_size = (
int)(stop_here - start_here) * atempo->
stride;
970 dst_size = dst_end - dst;
971 nbytes =
FFMIN(src_size, dst_size);
973 memcpy(dst,
src, nbytes);
979 *dst_ref = (uint8_t *)dst;
1058 int n_out = (
int)(0.5 + ((
double)n_in) / atempo->
tempo);
1060 const uint8_t *
src = src_buffer->
data[0];
1061 const uint8_t *src_end =
src + n_in * atempo->
stride;
1068 while (
src < src_end) {
1108 int n_max = atempo->
ring;
1112 while (err ==
AVERROR(EAGAIN)) {
1183 .priv_class = &atempo_class,
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
static int yae_update(AVFilterContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int push_samples(ATempoContext *atempo, AVFilterLink *outlink, int n_out)
static int config_props(AVFilterLink *inlink)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
#define AVERROR_EOF
End of file.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
Initialize complex data buffer of a given audio fragment with down-mixed mono data of appropriate sca...
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int yae_load_data(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, int64_t stop_here)
Populate the internal data buffer on as-needed basis.
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
#define AV_OPT_FLAG_RUNTIME_PARAM
a generic parameter which can be set by the user at runtime
const char * name
Filter name.
A link between two filters.
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
static av_cold int init(AVFilterContext *ctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static SDL_Window * window
AVTXContext * complex_to_real
static av_always_inline float scale(float x, float s)
static const AVFilterPad atempo_outputs[]
A filter pad used for either input or output.
static void yae_apply(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end, uint8_t **dst_ref, uint8_t *dst_end)
Feed as much data to the filter as it is able to consume and receive as much processed data in the de...
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define yae_init_xdat(scalar_type, scalar_max)
A helper macro for initializing complex data buffer with scalar data of a given type.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
const AVFilter ff_af_atempo
#define FILTER_INPUTS(array)
#define AV_OPT_FLAG_AUDIO_PARAM
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Rational number (pair of numerator and denominator).
AVFILTER_DEFINE_CLASS(atempo)
static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
static void yae_xcorr_via_rdft(float *xcorr_in, float *xcorr, AVTXContext *complex_to_real, av_tx_fn c2r_fn, const AVComplexFloat *xa, const AVComplexFloat *xb, const int window)
Calculate cross-correlation via rDFT.
static AudioFragment * yae_curr_frag(ATempoContext *atempo)
static int yae_reset(ATempoContext *atempo, enum AVSampleFormat format, int sample_rate, int channels)
Prepare filter for processing audio data of given format, sample rate and number of channels.
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
static int yae_overlap_add(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Blend the overlap region of previous and current audio fragment and output the results to the given d...
static int yae_load_frag(ATempoContext *atempo, const uint8_t **src_ref, const uint8_t *src_end)
Populate current audio fragment data buffer.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static AudioFragment * yae_prev_frag(ATempoContext *atempo)
int sample_rate
Sample rate of the audio data.
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define FILTER_SAMPLEFMTS_ARRAY(array)
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define yae_blend(scalar_type)
A helper macro for blending the overlap region of previous and current audio fragment.
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
int sample_rate
samples per second
int nb_samples
number of audio samples (per channel) described by this frame
#define RE_MALLOC_OR_FAIL(field, field_size, element_size)
#define i(width, name, range_min, range_max)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
@ AV_SAMPLE_FMT_U8
unsigned 8 bits
static void yae_advance_to_next_frag(ATempoContext *atempo)
Prepare for loading next audio fragment.
static enum AVSampleFormat sample_fmts[]
static void yae_clear(ATempoContext *atempo)
Reset filter to initial state, do not deallocate existing local buffers.
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
const char * name
Pad name.
static const AVFilterPad atempo_inputs[]
static int yae_flush(ATempoContext *atempo, uint8_t **dst_ref, uint8_t *dst_end)
Flush any buffered data from the filter.
static const AVOption atempo_options[]
static int request_frame(AVFilterLink *outlink)
enum AVSampleFormat format
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
AVTXContext * real_to_complex
static int yae_adjust_position(ATempoContext *atempo)
Adjust current fragment position for better alignment with previous fragment.
FilterState
Filter state machine states.
static void correlation(int32_t *corr, int32_t *ener, const int16_t *buffer, int16_t lag, int16_t blen, int16_t srange, int16_t scale)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int yae_align(AudioFragment *frag, const AudioFragment *prev, const int window, const int delta_max, const int drift, float *correlation_in, float *correlation, AVTXContext *complex_to_real, av_tx_fn c2r_fn)
Calculate alignment offset for given fragment relative to the previous fragment.
static void yae_release_buffers(ATempoContext *atempo)
Reset filter to initial state and deallocate all buffers.
#define FILTER_OUTPUTS(array)
static av_cold void uninit(AVFilterContext *ctx)
#define flags(name, subs,...)
@ AV_SAMPLE_FMT_DBL
double
@ AV_SAMPLE_FMT_S32
signed 32 bits
A fragment of audio waveform.