42 static void fcmul_add_c(
float *sum,
const float *t,
const float *
c, ptrdiff_t
len)
46 for (n = 0; n <
len; n++) {
47 const float cre = c[2 *
n ];
48 const float cim = c[2 * n + 1];
49 const float tre = t[2 *
n ];
50 const float tim = t[2 * n + 1];
52 sum[2 *
n ] += tre * cre - tim * cim;
53 sum[2 * n + 1] += tre * cim + tim * cre;
56 sum[2 *
n] += t[2 *
n] * c[2 *
n];
83 for (n = 0; n < nb_samples; n++) {
89 memset(sum, 0,
sizeof(*sum) * seg->
fft_length);
93 memcpy(block, src,
sizeof(*src) * seg->
part_size);
130 for (n = 0; n < nb_samples; n++) {
156 const int end = (out->
channels * (jobnr+1)) / nb_jobs;
200 for (i = 0; txt[i]; i++) {
204 for (char_y = 0; char_y < font_height; char_y++) {
205 for (mask = 0x80;
mask; mask >>= 1) {
206 if (font[txt[i] * font_height + char_y] & mask)
217 int dx =
FFABS(x1-x0);
218 int dy =
FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
219 int err = (dx>dy ? dx : -dy) / 2, e2;
224 if (x0 == x1 && y0 == y1)
244 float *mag, *phase, *delay,
min = FLT_MAX, max = FLT_MIN;
245 float min_delay = FLT_MAX, max_delay = FLT_MIN;
246 int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
255 if (!mag || !phase || !delay)
259 for (i = 0; i < s->
w; i++) {
261 double w = i *
M_PI / (s->
w - 1);
262 double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
264 for (x = 0; x < s->
nb_taps; x++) {
265 real += cos(-x *
w) * src[x];
266 imag += sin(-x * w) * src[x];
267 real_num += cos(-x * w) * src[x] * x;
268 imag_num += sin(-x * w) * src[x] * x;
271 mag[i] =
hypot(real, imag);
272 phase[i] = atan2(imag, real);
273 div = real * real + imag * imag;
274 delay[i] = (real_num * real + imag_num * imag) / div;
275 min =
fminf(min, mag[i]);
276 max =
fmaxf(max, mag[i]);
277 min_delay =
fminf(min_delay, delay[i]);
278 max_delay =
fmaxf(max_delay, delay[i]);
281 for (i = 0; i < s->
w; i++) {
282 int ymag = mag[i] / max * (s->
h - 1);
283 int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->
h - 1);
284 int yphase = (0.5 * (1. + phase[i] /
M_PI)) * (s->
h - 1);
286 ymag = s->
h - 1 - av_clip(ymag, 0, s->
h - 1);
287 yphase = s->
h - 1 - av_clip(yphase, 0, s->
h - 1);
288 ydelay = s->
h - 1 - av_clip(ydelay, 0, s->
h - 1);
293 prev_yphase = yphase;
295 prev_ydelay = ydelay;
298 draw_line(out, i, yphase,
FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
299 draw_line(out, i, ydelay,
FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
302 prev_yphase = yphase;
303 prev_ydelay = ydelay;
306 if (s->
w > 400 && s->
h > 100) {
307 drawtext(out, 2, 2,
"Max Magnitude:", 0xDDDDDDDD);
308 snprintf(text,
sizeof(text),
"%.2f", max);
309 drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
311 drawtext(out, 2, 12,
"Min Magnitude:", 0xDDDDDDDD);
312 snprintf(text,
sizeof(text),
"%.2f", min);
313 drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
315 drawtext(out, 2, 22,
"Max Delay:", 0xDDDDDDDD);
316 snprintf(text,
sizeof(text),
"%.2f", max_delay);
317 drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD);
319 drawtext(out, 2, 32,
"Min Delay:", 0xDDDDDDDD);
320 snprintf(text,
sizeof(text),
"%.2f", min_delay);
321 drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD);
331 int offset,
int nb_partitions,
int part_size)
375 int left,
offset = 0, part_size, max_part_size;
393 for (i = 0; left > 0; i++) {
394 int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0);
395 int nb_partitions =
FFMIN(step, (left + part_size - 1) / part_size);
401 offset += nb_partitions * part_size;
402 left -= nb_partitions * part_size;
404 part_size =
FFMIN(part_size, max_part_size);
427 power +=
FFABS(time[i]);
445 power += time[i] * time[i];
447 s->
gain = sqrtf(ch / power);
481 const float scale = 1.f / seg->
part_size;
483 const int remaining = s->
nb_taps - toffset;
486 memset(block, 0,
sizeof(*block) * seg->
fft_length);
487 memcpy(block, time + toffset, size *
sizeof(*block));
491 coeff[coffset].re = block[0] * scale;
492 coeff[coffset].im = 0;
494 coeff[coffset +
n].re = block[2 *
n] * scale;
495 coeff[coffset +
n].im = block[2 * n + 1] * scale;
523 int nb_taps, max_nb_taps;
527 if (nb_taps > max_nb_taps) {
528 av_log(ctx,
AV_LOG_ERROR,
"Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
539 int ret, status, available, wanted;
818 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
819 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
820 #define OFFSET(x) offsetof(AudioFIRContext, x)
837 {
"channel",
"set IR channel to display frequency response",
OFFSET(ir_channel),
AV_OPT_TYPE_INT, {.i64=0}, 0, 1024,
VF },
849 .description =
NULL_IF_CONFIG_SMALL(
"Apply Finite Impulse Response filter with supplied coefficients in 2nd stream."),
851 .priv_class = &afir_class,
This structure describes decoded (raw) audio or video data.
static int config_video(AVFilterLink *outlink)
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Main libavfilter public API header.
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
static const AVOption afir_options[]
int h
agreed upon image height
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
float fminf(float, float)
static int convert_coeffs(AVFilterContext *ctx)
#define FFERROR_NOT_READY
Filters implementation helper functions.
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
struct AVFilterChannelLayouts * in_channel_layouts
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
static int config_output(AVFilterLink *outlink)
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
AVFilterPad * output_pads
array of output pads
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
static av_cold int end(AVCodecContext *avctx)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static av_cold int init(AVFilterContext *ctx)
#define AVERROR_EOF
End of file.
void ff_afir_init_x86(AudioFIRContext *s)
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
static int query_formats(AVFilterContext *ctx)
A filter pad used for either input or output.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
A link between two filters.
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
static int activate(AVFilterContext *ctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const uint8_t avpriv_cga_font[2048]
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0, will be automatically copied from the first input of the source filter if it exists.
int sample_rate
samples per second
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
static const uint16_t mask[17]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
unsigned nb_outputs
number of output pads
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int offset, int nb_partitions, int part_size)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
struct AVFilterChannelLayouts * out_channel_layouts
AVFilterFormats * in_formats
Lists of formats and channel layouts supported by the input and output filters respectively.
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
static const uint8_t offset[127][2]
void av_rdft_calc(RDFTContext *s, FFTSample *data)
int w
agreed upon image width
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
static av_const double hypot(double x, double y)
int channels
number of audio channels, only used for audio.
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
float fmaxf(float, float)
int ff_inlink_queued_samples(AVFilterLink *link)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
void(* vector_fmul_scalar)(float *dst, const float *src, float mul, int len)
Multiply a vector of floats by a scalar float.
void av_rdft_end(RDFTContext *s)
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
AVFilterContext * src
source filter
static const AVFilterPad inputs[]
void(* fcmul_add)(float *sum, const float *t, const float *c, ptrdiff_t len)
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
A list of supported channel layouts.
static int check_ir(AVFilterLink *link, AVFrame *frame)
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
static void draw_response(AVFilterContext *ctx, AVFrame *out)
char * av_strdup(const char *s)
Duplicate a string.
AVSampleFormat
Audio sample formats.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Rational number (pair of numerator and denominator).
static av_cold void uninit(AVFilterContext *ctx)
AVFILTER_DEFINE_CLASS(afir)
offset must point to AVRational
const char * name
Filter name.
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
offset must point to two consecutive integers
AVFilterLink ** outputs
array of pointers to output links
enum MovChannelLayoutTag * layouts
static enum AVPixelFormat pix_fmts[]
#define flags(name, subs,...)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
common internal and external API header
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
AudioFIRSegment seg[1024]
channel
Use these values when setting the channel map with ebur128_set_channel().
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
uint64_t channel_layout
channel layout of current buffer (see libavutil/channel_layout.h)
int channels
Number of channels.
avfilter_execute_func * execute
AVFilterContext * dst
dest filter
static const double coeff[2][5]
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
static const AVFilterPad afir_inputs[]
static enum AVSampleFormat sample_fmts[]
#define av_malloc_array(a, b)
uint8_t ** extended_data
pointers to the data planes/channels.
#define AV_CH_LAYOUT_MONO
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
AVPixelFormat
Pixel format.
int nb_samples
number of audio samples (per channel) described by this frame
#define AV_NOPTS_VALUE
Undefined timestamp value.
CGA/EGA/VGA ROM font data.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch