FFmpeg
af_aemphasis.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Damien Zammit and others
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "avfilter.h"
23 #include "internal.h"
24 #include "audio.h"
25 
26 typedef struct BiquadCoeffs {
27  double a0, a1, a2, b1, b2;
28 } BiquadCoeffs;
29 
30 typedef struct BiquadD2 {
31  double a0, a1, a2, b1, b2, w1, w2;
32 } BiquadD2;
33 
34 typedef struct RIAACurve {
38 } RIAACurve;
39 
40 typedef struct AudioEmphasisContext {
41  const AVClass *class;
42  int mode, type;
44 
47 
48 #define OFFSET(x) offsetof(AudioEmphasisContext, x)
49 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
50 
51 static const AVOption aemphasis_options[] = {
52  { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
53  { "level_out", "set output gain", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
54  { "mode", "set filter mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "mode" },
55  { "reproduction", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
56  { "production", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
57  { "type", "set filter type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=4}, 0, 8, FLAGS, "type" },
58  { "col", "Columbia", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
59  { "emi", "EMI", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
60  { "bsi", "BSI (78RPM)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "type" },
61  { "riaa", "RIAA", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "type" },
62  { "cd", "Compact Disc (CD)", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "type" },
63  { "50fm", "50µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "type" },
64  { "75fm", "75µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "type" },
65  { "50kf", "50µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, FLAGS, "type" },
66  { "75kf", "75µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, FLAGS, "type" },
67  { NULL }
68 };
69 
70 AVFILTER_DEFINE_CLASS(aemphasis);
71 
72 static inline double biquad(BiquadD2 *bq, double in)
73 {
74  double n = in;
75  double tmp = n - bq->w1 * bq->b1 - bq->w2 * bq->b2;
76  double out = tmp * bq->a0 + bq->w1 * bq->a1 + bq->w2 * bq->a2;
77 
78  bq->w2 = bq->w1;
79  bq->w1 = tmp;
80 
81  return out;
82 }
83 
85 {
86  AVFilterContext *ctx = inlink->dst;
87  AVFilterLink *outlink = ctx->outputs[0];
88  AudioEmphasisContext *s = ctx->priv;
89  const double *src = (const double *)in->data[0];
90  const double level_out = s->level_out;
91  const double level_in = s->level_in;
92  AVFrame *out;
93  double *dst;
94  int n, c;
95 
97  out = in;
98  } else {
99  out = ff_get_audio_buffer(outlink, in->nb_samples);
100  if (!out) {
101  av_frame_free(&in);
102  return AVERROR(ENOMEM);
103  }
105  }
106  dst = (double *)out->data[0];
107 
108  for (n = 0; n < in->nb_samples; n++) {
109  for (c = 0; c < inlink->channels; c++)
110  dst[c] = level_out * biquad(&s->rc[c].r1, s->rc[c].use_brickw ? biquad(&s->rc[c].brickw, src[c] * level_in) : src[c] * level_in);
111  dst += inlink->channels;
112  src += inlink->channels;
113  }
114 
115  if (in != out)
116  av_frame_free(&in);
117  return ff_filter_frame(outlink, out);
118 }
119 
121 {
124  static const enum AVSampleFormat sample_fmts[] = {
127  };
128  int ret;
129 
131  if (!layouts)
132  return AVERROR(ENOMEM);
134  if (ret < 0)
135  return ret;
136 
138  if (!formats)
139  return AVERROR(ENOMEM);
141  if (ret < 0)
142  return ret;
143 
145  if (!formats)
146  return AVERROR(ENOMEM);
148 }
149 
150 static inline void set_highshelf_rbj(BiquadD2 *bq, double freq, double q, double peak, double sr)
151 {
152  double A = sqrt(peak);
153  double w0 = freq * 2 * M_PI / sr;
154  double alpha = sin(w0) / (2 * q);
155  double cw0 = cos(w0);
156  double tmp = 2 * sqrt(A) * alpha;
157  double b0 = 0, ib0 = 0;
158 
159  bq->a0 = A*( (A+1) + (A-1)*cw0 + tmp);
160  bq->a1 = -2*A*( (A-1) + (A+1)*cw0);
161  bq->a2 = A*( (A+1) + (A-1)*cw0 - tmp);
162  b0 = (A+1) - (A-1)*cw0 + tmp;
163  bq->b1 = 2*( (A-1) - (A+1)*cw0);
164  bq->b2 = (A+1) - (A-1)*cw0 - tmp;
165 
166  ib0 = 1 / b0;
167  bq->b1 *= ib0;
168  bq->b2 *= ib0;
169  bq->a0 *= ib0;
170  bq->a1 *= ib0;
171  bq->a2 *= ib0;
172 }
173 
174 static inline void set_lp_rbj(BiquadD2 *bq, double fc, double q, double sr, double gain)
175 {
176  double omega = 2.0 * M_PI * fc / sr;
177  double sn = sin(omega);
178  double cs = cos(omega);
179  double alpha = sn/(2 * q);
180  double inv = 1.0/(1.0 + alpha);
181 
182  bq->a2 = bq->a0 = gain * inv * (1.0 - cs) * 0.5;
183  bq->a1 = bq->a0 + bq->a0;
184  bq->b1 = (-2.0 * cs * inv);
185  bq->b2 = ((1.0 - alpha) * inv);
186 }
187 
188 static double freq_gain(BiquadCoeffs *c, double freq, double sr)
189 {
190  double zr, zi;
191 
192  freq *= 2.0 * M_PI / sr;
193  zr = cos(freq);
194  zi = -sin(freq);
195 
196  /* |(a0 + a1*z + a2*z^2)/(1 + b1*z + b2*z^2)| */
197  return hypot(c->a0 + c->a1*zr + c->a2*(zr*zr-zi*zi), c->a1*zi + 2*c->a2*zr*zi) /
198  hypot(1 + c->b1*zr + c->b2*(zr*zr-zi*zi), c->b1*zi + 2*c->b2*zr*zi);
199 }
200 
202 {
203  double i, j, k, g, t, a0, a1, a2, b1, b2, tau1, tau2, tau3;
204  double cutfreq, gain1kHz, gc, sr = inlink->sample_rate;
205  AVFilterContext *ctx = inlink->dst;
206  AudioEmphasisContext *s = ctx->priv;
207  BiquadCoeffs coeffs;
208  int ch;
209 
210  s->rc = av_calloc(inlink->channels, sizeof(*s->rc));
211  if (!s->rc)
212  return AVERROR(ENOMEM);
213 
214  switch (s->type) {
215  case 0: //"Columbia"
216  i = 100.;
217  j = 500.;
218  k = 1590.;
219  break;
220  case 1: //"EMI"
221  i = 70.;
222  j = 500.;
223  k = 2500.;
224  break;
225  case 2: //"BSI(78rpm)"
226  i = 50.;
227  j = 353.;
228  k = 3180.;
229  break;
230  case 3: //"RIAA"
231  default:
232  tau1 = 0.003180;
233  tau2 = 0.000318;
234  tau3 = 0.000075;
235  i = 1. / (2. * M_PI * tau1);
236  j = 1. / (2. * M_PI * tau2);
237  k = 1. / (2. * M_PI * tau3);
238  break;
239  case 4: //"CD Mastering"
240  tau1 = 0.000050;
241  tau2 = 0.000015;
242  tau3 = 0.0000001;// 1.6MHz out of audible range for null impact
243  i = 1. / (2. * M_PI * tau1);
244  j = 1. / (2. * M_PI * tau2);
245  k = 1. / (2. * M_PI * tau3);
246  break;
247  case 5: //"50µs FM (Europe)"
248  tau1 = 0.000050;
249  tau2 = tau1 / 20;// not used
250  tau3 = tau1 / 50;//
251  i = 1. / (2. * M_PI * tau1);
252  j = 1. / (2. * M_PI * tau2);
253  k = 1. / (2. * M_PI * tau3);
254  break;
255  case 6: //"75µs FM (US)"
256  tau1 = 0.000075;
257  tau2 = tau1 / 20;// not used
258  tau3 = tau1 / 50;//
259  i = 1. / (2. * M_PI * tau1);
260  j = 1. / (2. * M_PI * tau2);
261  k = 1. / (2. * M_PI * tau3);
262  break;
263  }
264 
265  i *= 2 * M_PI;
266  j *= 2 * M_PI;
267  k *= 2 * M_PI;
268 
269  t = 1. / sr;
270 
271  //swap a1 b1, a2 b2
272  if (s->type == 7 || s->type == 8) {
273  double tau = (s->type == 7 ? 0.000050 : 0.000075);
274  double f = 1.0 / (2 * M_PI * tau);
275  double nyq = sr * 0.5;
276  double gain = sqrt(1.0 + nyq * nyq / (f * f)); // gain at Nyquist
277  double cfreq = sqrt((gain - 1.0) * f * f); // frequency
278  double q = 1.0;
279 
280  if (s->type == 8)
281  q = pow((sr / 3269.0) + 19.5, -0.25); // somewhat poor curve-fit
282  if (s->type == 7)
283  q = pow((sr / 4750.0) + 19.5, -0.25);
284  if (s->mode == 0)
285  set_highshelf_rbj(&s->rc[0].r1, cfreq, q, 1. / gain, sr);
286  else
287  set_highshelf_rbj(&s->rc[0].r1, cfreq, q, gain, sr);
288  s->rc[0].use_brickw = 0;
289  } else {
290  s->rc[0].use_brickw = 1;
291  if (s->mode == 0) { // Reproduction
292  g = 1. / (4.+2.*i*t+2.*k*t+i*k*t*t);
293  a0 = (2.*t+j*t*t)*g;
294  a1 = (2.*j*t*t)*g;
295  a2 = (-2.*t+j*t*t)*g;
296  b1 = (-8.+2.*i*k*t*t)*g;
297  b2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
298  } else { // Production
299  g = 1. / (2.*t+j*t*t);
300  a0 = (4.+2.*i*t+2.*k*t+i*k*t*t)*g;
301  a1 = (-8.+2.*i*k*t*t)*g;
302  a2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
303  b1 = (2.*j*t*t)*g;
304  b2 = (-2.*t+j*t*t)*g;
305  }
306 
307  coeffs.a0 = a0;
308  coeffs.a1 = a1;
309  coeffs.a2 = a2;
310  coeffs.b1 = b1;
311  coeffs.b2 = b2;
312 
313  // the coeffs above give non-normalized value, so it should be normalized to produce 0dB at 1 kHz
314  // find actual gain
315  // Note: for FM emphasis, use 100 Hz for normalization instead
316  gain1kHz = freq_gain(&coeffs, 1000.0, sr);
317  // divide one filter's x[n-m] coefficients by that value
318  gc = 1.0 / gain1kHz;
319  s->rc[0].r1.a0 = coeffs.a0 * gc;
320  s->rc[0].r1.a1 = coeffs.a1 * gc;
321  s->rc[0].r1.a2 = coeffs.a2 * gc;
322  s->rc[0].r1.b1 = coeffs.b1;
323  s->rc[0].r1.b2 = coeffs.b2;
324  }
325 
326  cutfreq = FFMIN(0.45 * sr, 21000.);
327  set_lp_rbj(&s->rc[0].brickw, cutfreq, 0.707, sr, 1.);
328 
329  for (ch = 1; ch < inlink->channels; ch++) {
330  memcpy(&s->rc[ch], &s->rc[0], sizeof(RIAACurve));
331  }
332 
333  return 0;
334 }
335 
337 {
338  AudioEmphasisContext *s = ctx->priv;
339  av_freep(&s->rc);
340 }
341 
343  {
344  .name = "default",
345  .type = AVMEDIA_TYPE_AUDIO,
346  .config_props = config_input,
347  .filter_frame = filter_frame,
348  },
349  { NULL }
350 };
351 
353  {
354  .name = "default",
355  .type = AVMEDIA_TYPE_AUDIO,
356  },
357  { NULL }
358 };
359 
361  .name = "aemphasis",
362  .description = NULL_IF_CONFIG_SMALL("Audio emphasis."),
363  .priv_size = sizeof(AudioEmphasisContext),
364  .priv_class = &aemphasis_class,
365  .uninit = uninit,
369 };
formats
formats
Definition: signature.h:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
biquad
static double biquad(BiquadD2 *bq, double in)
Definition: af_aemphasis.c:72
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
out
FILE * out
Definition: movenc.c:54
freq_gain
static double freq_gain(BiquadCoeffs *c, double freq, double sr)
Definition: af_aemphasis.c:188
n
int n
Definition: avisynth_c.h:760
ff_set_common_channel_layouts
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:549
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
BiquadD2::w2
double w2
Definition: af_aemphasis.c:31
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
OFFSET
#define OFFSET(x)
Definition: af_aemphasis.c:48
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:410
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AVOption
AVOption.
Definition: opt.h:246
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aemphasis.c:84
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aemphasis)
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:555
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
BiquadCoeffs::a1
double a1
Definition: af_aemphasis.c:27
A
#define A(x)
Definition: vp56_arith.h:28
BiquadCoeffs
Definition: af_aemphasis.c:26
BiquadD2::a1
double a1
Definition: af_aemphasis.c:31
BiquadCoeffs::a2
double a2
Definition: af_aemphasis.c:27
AudioEmphasisContext::type
int type
Definition: af_aemphasis.c:42
AudioEmphasisContext::mode
int mode
Definition: af_aemphasis.c:42
BiquadCoeffs::b2
double b2
Definition: af_aemphasis.c:27
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
BiquadD2::b1
double b1
Definition: af_aemphasis.c:31
src
#define src
Definition: vp8dsp.c:254
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
a1
#define a1
Definition: regdef.h:47
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
aemphasis_options
static const AVOption aemphasis_options[]
Definition: af_aemphasis.c:51
RIAACurve::brickw
BiquadD2 brickw
Definition: af_aemphasis.c:36
s
#define s(width, name)
Definition: cbs_vp9.c:257
g
const char * g
Definition: vf_curves.c:115
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
avfilter_af_aemphasis_inputs
static const AVFilterPad avfilter_af_aemphasis_inputs[]
Definition: af_aemphasis.c:342
ctx
AVFormatContext * ctx
Definition: movenc.c:48
f
#define f(width, name)
Definition: cbs_vp9.c:255
if
if(ret)
Definition: filter_design.txt:179
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
ff_af_aemphasis
AVFilter ff_af_aemphasis
Definition: af_aemphasis.c:360
BiquadD2::a2
double a2
Definition: af_aemphasis.c:31
avfilter_af_aemphasis_outputs
static const AVFilterPad avfilter_af_aemphasis_outputs[]
Definition: af_aemphasis.c:352
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_aemphasis.c:120
AudioEmphasisContext::level_in
double level_in
Definition: af_aemphasis.c:43
RIAACurve::use_brickw
int use_brickw
Definition: af_aemphasis.c:37
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AudioEmphasisContext
Definition: af_aemphasis.c:40
BiquadD2::b2
double b2
Definition: af_aemphasis.c:31
a0
#define a0
Definition: regdef.h:46
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
BiquadD2::w1
double w1
Definition: af_aemphasis.c:31
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
RIAACurve::r1
BiquadD2 r1
Definition: af_aemphasis.c:35
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
FLAGS
#define FLAGS
Definition: af_aemphasis.c:49
BiquadCoeffs::b1
double b1
Definition: af_aemphasis.c:27
AVFilter
Filter definition.
Definition: avfilter.h:144
AudioEmphasisContext::rc
RIAACurve * rc
Definition: af_aemphasis.c:45
ret
ret
Definition: filter_design.txt:187
BiquadD2
Definition: af_aemphasis.c:30
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
mode
mode
Definition: ebur128.h:83
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
audio.h
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
RIAACurve
Definition: af_aemphasis.c:34
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_aemphasis.c:201
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aemphasis.c:336
set_highshelf_rbj
static void set_highshelf_rbj(BiquadD2 *bq, double freq, double q, double peak, double sr)
Definition: af_aemphasis.c:150
set_lp_rbj
static void set_lp_rbj(BiquadD2 *bq, double fc, double q, double sr, double gain)
Definition: af_aemphasis.c:174
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:64
AudioEmphasisContext::level_out
double level_out
Definition: af_aemphasis.c:43
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
BiquadD2::a0
double a0
Definition: af_aemphasis.c:31
BiquadCoeffs::a0
double a0
Definition: af_aemphasis.c:27