FFmpeg
af_earwax.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Mina Nagy Zaki
3  * Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
4  * This source code is freely redistributable and may be used for any purpose.
5  * This copyright notice must be maintained. Edward Beingessner And Sundry
6  * Contributors are not responsible for the consequences of using this
7  * software.
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 /**
27  * @file
28  * Stereo Widening Effect. Adds audio cues to move stereo image in
29  * front of the listener. Adapted from the libsox earwax effect.
30  */
31 
33 #include "avfilter.h"
34 #include "audio.h"
35 #include "formats.h"
36 
37 #define NUMTAPS 32
38 
39 static const int8_t filt[NUMTAPS * 2] = {
40 /* 30° 330° */
41  4, -6, /* 32 tap stereo FIR filter. */
42  4, -11, /* One side filters as if the */
43  -1, -5, /* signal was from 30 degrees */
44  3, 3, /* from the ear, the other as */
45  -2, 5, /* if 330 degrees. */
46  -5, 0,
47  9, 1,
48  6, 3, /* Input */
49  -4, -1, /* Left Right */
50  -5, -3, /* __________ __________ */
51  -2, -5, /* | | | | */
52  -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */
53  6, -7, /* / |__________| |__________| \ */
54  30, -29, /* / \ / \ */
55  12, -3, /* / X \ */
56  -11, 4, /* / / \ \ */
57  -3, 7, /* ____V_____ __________V V__________ _____V____ */
58  -20, 23, /* | | | | | | | | */
59  2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */
60  1, -6, /* |__________| |__________| |__________| |__________| */
61  -14, -5, /* \ ___ / \ ___ / */
62  15, -18, /* \ / \ / _____ \ / \ / */
63  6, 7, /* `->| + |<--' / \ `-->| + |<-' */
64  15, -10, /* \___/ _/ \_ \___/ */
65  -14, 22, /* \ / \ / \ / */
66  -7, -2, /* `--->| | | |<---' */
67  -4, 9, /* \_/ \_/ */
68  6, -12, /* */
69  6, -6, /* Headphones */
70  0, -11,
71  0, -5,
72  4, 0};
73 
74 typedef struct EarwaxContext {
75  int16_t filter[2][NUMTAPS];
76  int16_t taps[4][NUMTAPS * 2];
77 
80 
82 {
83  static const int sample_rates[] = { 44100, -1 };
84  int ret;
85 
88 
89  if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_S16P )) < 0 ||
90  (ret = ff_set_common_formats (ctx , formats )) < 0 ||
94  return ret;
95 
96  return 0;
97 }
98 
99 //FIXME: replace with DSPContext.scalarproduct_int16
100 static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin,
101  const int16_t *filt, int16_t *out)
102 {
103  int32_t sample;
104  int16_t j;
105 
106  while (in < endin) {
107  sample = 0;
108  for (j = 0; j < NUMTAPS; j++)
109  sample += in[j] * filt[j];
110  *out = av_clip_int16(sample >> 7);
111  out++;
112  in++;
113  }
114 
115  return out;
116 }
117 
119 {
120  EarwaxContext *s = inlink->dst->priv;
121 
122  for (int i = 0; i < NUMTAPS; i++) {
123  s->filter[0][i] = filt[i * 2];
124  s->filter[1][i] = filt[i * 2 + 1];
125  }
126 
127  return 0;
128 }
129 
131  int input_ch, int output_ch,
132  int filter_ch, int tap_ch)
133 {
134  EarwaxContext *s = ctx->priv;
135  int16_t *taps, *endin, *dst, *src;
136  int len;
137 
138  taps = s->taps[tap_ch];
139  dst = (int16_t *)s->frame[input_ch]->data[output_ch];
140  src = (int16_t *)in->data[input_ch];
141 
142  len = FFMIN(NUMTAPS, in->nb_samples);
143  // copy part of new input and process with saved input
144  memcpy(taps+NUMTAPS, src, len * sizeof(*taps));
145  dst = scalarproduct(taps, taps + len, s->filter[filter_ch], dst);
146 
147  // process current input
148  if (in->nb_samples >= NUMTAPS) {
149  endin = src + in->nb_samples - NUMTAPS;
150  scalarproduct(src, endin, s->filter[filter_ch], dst);
151 
152  // save part of input for next round
153  memcpy(taps, endin, NUMTAPS * sizeof(*taps));
154  } else {
155  memmove(taps, taps + in->nb_samples, NUMTAPS * sizeof(*taps));
156  }
157 }
158 
160  int output_ch, int f0, int f1, int i0, int i1)
161 {
162  EarwaxContext *s = ctx->priv;
163  const int16_t *srcl = (const int16_t *)s->frame[f0]->data[i0];
164  const int16_t *srcr = (const int16_t *)s->frame[f1]->data[i1];
165  int16_t *dst = (int16_t *)out->data[output_ch];
166 
167  for (int n = 0; n < out->nb_samples; n++)
168  dst[n] = av_clip_int16(srcl[n] + srcr[n]);
169 }
170 
172 {
173  AVFilterContext *ctx = inlink->dst;
174  EarwaxContext *s = ctx->priv;
175  AVFilterLink *outlink = ctx->outputs[0];
176  AVFrame *out = ff_get_audio_buffer(outlink, in->nb_samples);
177 
178  for (int ch = 0; ch < 2; ch++) {
179  if (!s->frame[ch] || s->frame[ch]->nb_samples < in->nb_samples) {
180  av_frame_free(&s->frame[ch]);
181  s->frame[ch] = ff_get_audio_buffer(outlink, in->nb_samples);
182  if (!s->frame[ch]) {
183  av_frame_free(&in);
184  av_frame_free(&out);
185  return AVERROR(ENOMEM);
186  }
187  }
188  }
189 
190  if (!out) {
191  av_frame_free(&in);
192  return AVERROR(ENOMEM);
193  }
195 
196  convolve(ctx, in, 0, 0, 0, 0);
197  convolve(ctx, in, 0, 1, 1, 1);
198  convolve(ctx, in, 1, 0, 0, 2);
199  convolve(ctx, in, 1, 1, 1, 3);
200 
201  mix(ctx, out, 0, 0, 1, 1, 0);
202  mix(ctx, out, 1, 0, 1, 0, 1);
203 
204  av_frame_free(&in);
205  return ff_filter_frame(outlink, out);
206 }
207 
209 {
210  EarwaxContext *s = ctx->priv;
211 
212  av_frame_free(&s->frame[0]);
213  av_frame_free(&s->frame[1]);
214 }
215 
216 static const AVFilterPad earwax_inputs[] = {
217  {
218  .name = "default",
219  .type = AVMEDIA_TYPE_AUDIO,
220  .filter_frame = filter_frame,
221  .config_props = config_input,
222  },
223  { NULL }
224 };
225 
226 static const AVFilterPad earwax_outputs[] = {
227  {
228  .name = "default",
229  .type = AVMEDIA_TYPE_AUDIO,
230  },
231  { NULL }
232 };
233 
235  .name = "earwax",
236  .description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
237  .query_formats = query_formats,
238  .priv_size = sizeof(EarwaxContext),
239  .uninit = uninit,
242 };
formats
formats
Definition: signature.h:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:86
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
EarwaxContext::taps
int16_t taps[4][NUMTAPS *2]
Definition: af_earwax.c:76
earwax_inputs
static const AVFilterPad earwax_inputs[]
Definition: af_earwax.c:216
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
formats.h
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:91
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:338
scalarproduct
static int16_t * scalarproduct(const int16_t *in, const int16_t *endin, const int16_t *filt, int16_t *out)
Definition: af_earwax.c:100
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
earwax_outputs
static const AVFilterPad earwax_outputs[]
Definition: af_earwax.c:226
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
ctx
AVFormatContext * ctx
Definition: movenc.c:48
ff_af_earwax
AVFilter ff_af_earwax
Definition: af_earwax.c:234
int32_t
int32_t
Definition: audio_convert.c:194
av_clip_int16
#define av_clip_int16
Definition: common.h:137
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_earwax.c:171
src
#define src
Definition: vp8dsp.c:255
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:332
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_earwax.c:208
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
NUMTAPS
#define NUMTAPS
Definition: af_earwax.c:37
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
EarwaxContext::frame
AVFrame * frame[2]
Definition: af_earwax.c:78
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_earwax.c:118
sample
#define sample
Definition: flacdsp_template.c:44
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
sample_rates
sample_rates
Definition: ffmpeg_filter.c:170
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
int i
Definition: input.c:407
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_earwax.c:81
len
int len
Definition: vorbis_enc_data.h:452
filt
static const int8_t filt[NUMTAPS *2]
Definition: af_earwax.c:39
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
mix
static void mix(AVFilterContext *ctx, AVFrame *out, int output_ch, int f0, int f1, int i0, int i1)
Definition: af_earwax.c:159
AVFilter
Filter definition.
Definition: avfilter.h:145
ret
ret
Definition: filter_design.txt:187
channel_layout.h
EarwaxContext
Definition: af_earwax.c:74
avfilter.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
audio.h
convolve
static void convolve(AVFilterContext *ctx, AVFrame *in, int input_ch, int output_ch, int filter_ch, int tap_ch)
Definition: af_earwax.c:130
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:575
EarwaxContext::filter
int16_t filter[2][NUMTAPS]
Definition: af_earwax.c:75
ff_set_common_channel_layouts
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *channel_layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:568