FFmpeg
af_aemphasis.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Damien Zammit and others
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "avfilter.h"
23 #include "internal.h"
24 #include "audio.h"
25 
26 typedef struct BiquadCoeffs {
27  double a0, a1, a2, b1, b2;
28 } BiquadCoeffs;
29 
30 typedef struct BiquadD2 {
31  double a0, a1, a2, b1, b2, w1, w2;
32 } BiquadD2;
33 
34 typedef struct RIAACurve {
38 } RIAACurve;
39 
40 typedef struct AudioEmphasisContext {
41  const AVClass *class;
42  int mode, type;
43  double level_in, level_out;
44 
47 
48 #define OFFSET(x) offsetof(AudioEmphasisContext, x)
49 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
50 
51 static const AVOption aemphasis_options[] = {
52  { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
53  { "level_out", "set output gain", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
54  { "mode", "set filter mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "mode" },
55  { "reproduction", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
56  { "production", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
57  { "type", "set filter type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=4}, 0, 8, FLAGS, "type" },
58  { "col", "Columbia", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
59  { "emi", "EMI", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
60  { "bsi", "BSI (78RPM)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "type" },
61  { "riaa", "RIAA", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "type" },
62  { "cd", "Compact Disc (CD)", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "type" },
63  { "50fm", "50µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "type" },
64  { "75fm", "75µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "type" },
65  { "50kf", "50µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, FLAGS, "type" },
66  { "75kf", "75µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, FLAGS, "type" },
67  { NULL }
68 };
69 
70 AVFILTER_DEFINE_CLASS(aemphasis);
71 
72 static inline double biquad(BiquadD2 *bq, double in)
73 {
74  double n = in;
75  double tmp = n - bq->w1 * bq->b1 - bq->w2 * bq->b2;
76  double out = tmp * bq->a0 + bq->w1 * bq->a1 + bq->w2 * bq->a2;
77 
78  bq->w2 = bq->w1;
79  bq->w1 = tmp;
80 
81  return out;
82 }
83 
85 {
86  AVFilterContext *ctx = inlink->dst;
87  AVFilterLink *outlink = ctx->outputs[0];
88  AudioEmphasisContext *s = ctx->priv;
89  const double *src = (const double *)in->data[0];
90  const double level_out = s->level_out;
91  const double level_in = s->level_in;
92  AVFrame *out;
93  double *dst;
94  int n, c;
95 
96  if (av_frame_is_writable(in)) {
97  out = in;
98  } else {
99  out = ff_get_audio_buffer(outlink, in->nb_samples);
100  if (!out) {
101  av_frame_free(&in);
102  return AVERROR(ENOMEM);
103  }
105  }
106  dst = (double *)out->data[0];
107 
108  for (n = 0; n < in->nb_samples; n++) {
109  for (c = 0; c < inlink->channels; c++)
110  dst[c] = level_out * biquad(&s->rc[c].r1, s->rc[c].use_brickw ? biquad(&s->rc[c].brickw, src[c] * level_in) : src[c] * level_in);
111  dst += inlink->channels;
112  src += inlink->channels;
113  }
114 
115  if (in != out)
116  av_frame_free(&in);
117  return ff_filter_frame(outlink, out);
118 }
119 
121 {
124  static const enum AVSampleFormat sample_fmts[] = {
127  };
128  int ret;
129 
130  layouts = ff_all_channel_counts();
131  if (!layouts)
132  return AVERROR(ENOMEM);
133  ret = ff_set_common_channel_layouts(ctx, layouts);
134  if (ret < 0)
135  return ret;
136 
137  formats = ff_make_format_list(sample_fmts);
138  if (!formats)
139  return AVERROR(ENOMEM);
140  ret = ff_set_common_formats(ctx, formats);
141  if (ret < 0)
142  return ret;
143 
144  formats = ff_all_samplerates();
145  if (!formats)
146  return AVERROR(ENOMEM);
147  return ff_set_common_samplerates(ctx, formats);
148 }
149 
150 static inline void set_highshelf_rbj(BiquadD2 *bq, double freq, double q, double peak, double sr)
151 {
152  double A = sqrt(peak);
153  double w0 = freq * 2 * M_PI / sr;
154  double alpha = sin(w0) / (2 * q);
155  double cw0 = cos(w0);
156  double tmp = 2 * sqrt(A) * alpha;
157  double b0 = 0, ib0 = 0;
158 
159  bq->a0 = A*( (A+1) + (A-1)*cw0 + tmp);
160  bq->a1 = -2*A*( (A-1) + (A+1)*cw0);
161  bq->a2 = A*( (A+1) + (A-1)*cw0 - tmp);
162  b0 = (A+1) - (A-1)*cw0 + tmp;
163  bq->b1 = 2*( (A-1) - (A+1)*cw0);
164  bq->b2 = (A+1) - (A-1)*cw0 - tmp;
165 
166  ib0 = 1 / b0;
167  bq->b1 *= ib0;
168  bq->b2 *= ib0;
169  bq->a0 *= ib0;
170  bq->a1 *= ib0;
171  bq->a2 *= ib0;
172 }
173 
174 static inline void set_lp_rbj(BiquadD2 *bq, double fc, double q, double sr, double gain)
175 {
176  double omega = 2.0 * M_PI * fc / sr;
177  double sn = sin(omega);
178  double cs = cos(omega);
179  double alpha = sn/(2 * q);
180  double inv = 1.0/(1.0 + alpha);
181 
182  bq->a2 = bq->a0 = gain * inv * (1.0 - cs) * 0.5;
183  bq->a1 = bq->a0 + bq->a0;
184  bq->b1 = (-2.0 * cs * inv);
185  bq->b2 = ((1.0 - alpha) * inv);
186 }
187 
188 static double freq_gain(BiquadCoeffs *c, double freq, double sr)
189 {
190  double zr, zi;
191 
192  freq *= 2.0 * M_PI / sr;
193  zr = cos(freq);
194  zi = -sin(freq);
195 
196  /* |(a0 + a1*z + a2*z^2)/(1 + b1*z + b2*z^2)| */
197  return hypot(c->a0 + c->a1*zr + c->a2*(zr*zr-zi*zi), c->a1*zi + 2*c->a2*zr*zi) /
198  hypot(1 + c->b1*zr + c->b2*(zr*zr-zi*zi), c->b1*zi + 2*c->b2*zr*zi);
199 }
200 
202 {
203  double i, j, k, g, t, a0, a1, a2, b1, b2, tau1, tau2, tau3;
204  double cutfreq, gain1kHz, gc, sr = inlink->sample_rate;
205  AVFilterContext *ctx = inlink->dst;
206  AudioEmphasisContext *s = ctx->priv;
207  BiquadCoeffs coeffs;
208  int ch;
209 
210  s->rc = av_calloc(inlink->channels, sizeof(*s->rc));
211  if (!s->rc)
212  return AVERROR(ENOMEM);
213 
214  switch (s->type) {
215  case 0: //"Columbia"
216  i = 100.;
217  j = 500.;
218  k = 1590.;
219  break;
220  case 1: //"EMI"
221  i = 70.;
222  j = 500.;
223  k = 2500.;
224  break;
225  case 2: //"BSI(78rpm)"
226  i = 50.;
227  j = 353.;
228  k = 3180.;
229  break;
230  case 3: //"RIAA"
231  default:
232  tau1 = 0.003180;
233  tau2 = 0.000318;
234  tau3 = 0.000075;
235  i = 1. / (2. * M_PI * tau1);
236  j = 1. / (2. * M_PI * tau2);
237  k = 1. / (2. * M_PI * tau3);
238  break;
239  case 4: //"CD Mastering"
240  tau1 = 0.000050;
241  tau2 = 0.000015;
242  tau3 = 0.0000001;// 1.6MHz out of audible range for null impact
243  i = 1. / (2. * M_PI * tau1);
244  j = 1. / (2. * M_PI * tau2);
245  k = 1. / (2. * M_PI * tau3);
246  break;
247  case 5: //"50µs FM (Europe)"
248  tau1 = 0.000050;
249  tau2 = tau1 / 20;// not used
250  tau3 = tau1 / 50;//
251  i = 1. / (2. * M_PI * tau1);
252  j = 1. / (2. * M_PI * tau2);
253  k = 1. / (2. * M_PI * tau3);
254  break;
255  case 6: //"75µs FM (US)"
256  tau1 = 0.000075;
257  tau2 = tau1 / 20;// not used
258  tau3 = tau1 / 50;//
259  i = 1. / (2. * M_PI * tau1);
260  j = 1. / (2. * M_PI * tau2);
261  k = 1. / (2. * M_PI * tau3);
262  break;
263  }
264 
265  i *= 2 * M_PI;
266  j *= 2 * M_PI;
267  k *= 2 * M_PI;
268 
269  t = 1. / sr;
270 
271  //swap a1 b1, a2 b2
272  if (s->type == 7 || s->type == 8) {
273  double tau = (s->type == 7 ? 0.000050 : 0.000075);
274  double f = 1.0 / (2 * M_PI * tau);
275  double nyq = sr * 0.5;
276  double gain = sqrt(1.0 + nyq * nyq / (f * f)); // gain at Nyquist
277  double cfreq = sqrt((gain - 1.0) * f * f); // frequency
278  double q = 1.0;
279 
280  if (s->type == 8)
281  q = pow((sr / 3269.0) + 19.5, -0.25); // somewhat poor curve-fit
282  if (s->type == 7)
283  q = pow((sr / 4750.0) + 19.5, -0.25);
284  if (s->mode == 0)
285  set_highshelf_rbj(&s->rc[0].r1, cfreq, q, 1. / gain, sr);
286  else
287  set_highshelf_rbj(&s->rc[0].r1, cfreq, q, gain, sr);
288  s->rc[0].use_brickw = 0;
289  } else {
290  s->rc[0].use_brickw = 1;
291  if (s->mode == 0) { // Reproduction
292  g = 1. / (4.+2.*i*t+2.*k*t+i*k*t*t);
293  a0 = (2.*t+j*t*t)*g;
294  a1 = (2.*j*t*t)*g;
295  a2 = (-2.*t+j*t*t)*g;
296  b1 = (-8.+2.*i*k*t*t)*g;
297  b2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
298  } else { // Production
299  g = 1. / (2.*t+j*t*t);
300  a0 = (4.+2.*i*t+2.*k*t+i*k*t*t)*g;
301  a1 = (-8.+2.*i*k*t*t)*g;
302  a2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
303  b1 = (2.*j*t*t)*g;
304  b2 = (-2.*t+j*t*t)*g;
305  }
306 
307  coeffs.a0 = a0;
308  coeffs.a1 = a1;
309  coeffs.a2 = a2;
310  coeffs.b1 = b1;
311  coeffs.b2 = b2;
312 
313  // the coeffs above give non-normalized value, so it should be normalized to produce 0dB at 1 kHz
314  // find actual gain
315  // Note: for FM emphasis, use 100 Hz for normalization instead
316  gain1kHz = freq_gain(&coeffs, 1000.0, sr);
317  // divide one filter's x[n-m] coefficients by that value
318  gc = 1.0 / gain1kHz;
319  s->rc[0].r1.a0 = coeffs.a0 * gc;
320  s->rc[0].r1.a1 = coeffs.a1 * gc;
321  s->rc[0].r1.a2 = coeffs.a2 * gc;
322  s->rc[0].r1.b1 = coeffs.b1;
323  s->rc[0].r1.b2 = coeffs.b2;
324  }
325 
326  cutfreq = FFMIN(0.45 * sr, 21000.);
327  set_lp_rbj(&s->rc[0].brickw, cutfreq, 0.707, sr, 1.);
328 
329  for (ch = 1; ch < inlink->channels; ch++) {
330  memcpy(&s->rc[ch], &s->rc[0], sizeof(RIAACurve));
331  }
332 
333  return 0;
334 }
335 
337 {
338  AudioEmphasisContext *s = ctx->priv;
339  av_freep(&s->rc);
340 }
341 
343  {
344  .name = "default",
345  .type = AVMEDIA_TYPE_AUDIO,
346  .config_props = config_input,
347  .filter_frame = filter_frame,
348  },
349  { NULL }
350 };
351 
353  {
354  .name = "default",
355  .type = AVMEDIA_TYPE_AUDIO,
356  },
357  { NULL }
358 };
359 
361  .name = "aemphasis",
362  .description = NULL_IF_CONFIG_SMALL("Audio emphasis."),
363  .priv_size = sizeof(AudioEmphasisContext),
364  .priv_class = &aemphasis_class,
365  .uninit = uninit,
367  .inputs = avfilter_af_aemphasis_inputs,
368  .outputs = avfilter_af_aemphasis_outputs,
369 };
#define NULL
Definition: coverity.c:32
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates...
Definition: formats.c:549
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
double b1
Definition: af_aemphasis.c:31
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:115
int use_brickw
Definition: af_aemphasis.c:37
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
GLint GLenum type
Definition: opengl_enc.c:104
#define src
Definition: vp8dsp.c:254
static const AVOption aemphasis_options[]
Definition: af_aemphasis.c:51
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static double freq_gain(BiquadCoeffs *c, double freq, double sr)
Definition: af_aemphasis.c:188
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
#define av_cold
Definition: attributes.h:82
AVOptions.
#define OFFSET(x)
Definition: af_aemphasis.c:48
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
BiquadD2 r1
Definition: af_aemphasis.c:35
AVFILTER_DEFINE_CLASS(aemphasis)
AVFilter ff_af_aemphasis
Definition: af_aemphasis.c:360
#define A(x)
Definition: vp56_arith.h:28
A filter pad used for either input or output.
Definition: internal.h:54
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:551
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
double w1
Definition: af_aemphasis.c:31
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define FFMIN(a, b)
Definition: common.h:96
AVFormatContext * ctx
Definition: movenc.c:48
static const AVFilterPad avfilter_af_aemphasis_inputs[]
Definition: af_aemphasis.c:342
#define s(width, name)
Definition: cbs_vp9.c:257
int n
Definition: avisynth_c.h:760
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int query_formats(AVFilterContext *ctx)
Definition: af_aemphasis.c:120
A list of supported channel layouts.
Definition: formats.h:85
if(ret)
double b2
Definition: af_aemphasis.c:31
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
static const int16_t alpha[]
Definition: ilbcdata.h:55
static const AVFilterPad avfilter_af_aemphasis_outputs[]
Definition: af_aemphasis.c:352
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
double w2
Definition: af_aemphasis.c:31
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
double a1
Definition: af_aemphasis.c:31
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static void set_highshelf_rbj(BiquadD2 *bq, double freq, double q, double peak, double sr)
Definition: af_aemphasis.c:150
#define FLAGS
Definition: af_aemphasis.c:49
BiquadD2 brickw
Definition: af_aemphasis.c:36
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
FILE * out
Definition: movenc.c:54
double a2
Definition: af_aemphasis.c:31
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:410
static int config_input(AVFilterLink *inlink)
Definition: af_aemphasis.c:201
double a0
Definition: af_aemphasis.c:31
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aemphasis.c:336
static void set_lp_rbj(BiquadD2 *bq, double fc, double q, double sr, double gain)
Definition: af_aemphasis.c:174
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
static double biquad(BiquadD2 *bq, double in)
Definition: af_aemphasis.c:72
for(j=16;j >0;--j)
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aemphasis.c:84
static uint8_t tmp[11]
Definition: aes_ctr.c:26