FFmpeg
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "formats.h"
29 #include "internal.h"
30 #include "audio.h"
31 #include "video.h"
32 
33 #define FILTER_ORDER 4
34 
35 enum FilterType {
40 };
41 
42 typedef struct FoSection {
43  double a0, a1, a2, a3, a4;
44  double b0, b1, b2, b3, b4;
45 
46  double num[4];
47  double denum[4];
48 } FoSection;
49 
50 typedef struct EqualizatorFilter {
51  int ignore;
52  int channel;
53  int type;
54 
55  double freq;
56  double gain;
57  double width;
58 
61 
62 typedef struct AudioNEqualizerContext {
63  const AVClass *class;
64  char *args;
65  char *colors;
67  int w, h;
68 
69  double mag;
70  int fscale;
76 
77 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
78 #define A AV_OPT_FLAG_AUDIO_PARAM
79 #define V AV_OPT_FLAG_VIDEO_PARAM
80 #define F AV_OPT_FLAG_FILTERING_PARAM
81 
82 static const AVOption anequalizer_options[] = {
83  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
84  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
85  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
86  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
87  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, .unit = "fscale" },
88  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, .unit = "fscale" },
89  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, .unit = "fscale" },
90  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
91  { NULL }
92 };
93 
94 AVFILTER_DEFINE_CLASS(anequalizer);
95 
97 {
98  AudioNEqualizerContext *s = ctx->priv;
99  char *colors, *color, *saveptr = NULL;
100  int ch, i, n;
101 
102  colors = av_strdup(s->colors);
103  if (!colors)
104  return;
105 
106  memset(out->data[0], 0, s->h * out->linesize[0]);
107 
108  for (ch = 0; ch < inlink->ch_layout.nb_channels; ch++) {
109  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
110  int prev_v = -1;
111  double f;
112 
113  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
114  if (color)
115  av_parse_color(fg, color, -1, ctx);
116 
117  for (f = 0; f < s->w; f++) {
118  double zr, zi, zr2, zi2;
119  double Hr, Hi;
120  double Hmag = 1;
121  double w;
122  int v, y, x;
123 
124  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
125  zr = cos(w);
126  zr2 = zr * zr;
127  zi = -sin(w);
128  zi2 = zi * zi;
129 
130  for (n = 0; n < s->nb_filters; n++) {
131  if (s->filters[n].channel != ch ||
132  s->filters[n].ignore)
133  continue;
134 
135  for (i = 0; i < FILTER_ORDER / 2; i++) {
136  FoSection *S = &s->filters[n].section[i];
137 
138  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
139  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
140 
141  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
142  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
143  Hmag *= hypot(Hr, Hi);
144  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
145  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
146  Hmag /= hypot(Hr, Hi);
147  }
148  }
149 
150  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
151  x = lrint(f);
152  if (prev_v == -1)
153  prev_v = v;
154  if (v <= prev_v) {
155  for (y = v; y <= prev_v; y++)
156  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
157  } else {
158  for (y = prev_v; y <= v; y++)
159  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
160  }
161 
162  prev_v = v;
163  }
164  }
165 
166  av_free(colors);
167 }
168 
169 static int config_video(AVFilterLink *outlink)
170 {
171  AVFilterContext *ctx = outlink->src;
172  AudioNEqualizerContext *s = ctx->priv;
173  AVFilterLink *inlink = ctx->inputs[0];
174  AVFrame *out;
175 
176  outlink->w = s->w;
177  outlink->h = s->h;
178 
179  av_frame_free(&s->video);
180  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
181  if (!out)
182  return AVERROR(ENOMEM);
183  outlink->sample_aspect_ratio = (AVRational){1,1};
184 
186 
187  return 0;
188 }
189 
191 {
192  AudioNEqualizerContext *s = ctx->priv;
193  AVFilterPad pad, vpad;
194  int ret;
195 
196  pad = (AVFilterPad){
197  .name = "out0",
198  .type = AVMEDIA_TYPE_AUDIO,
199  };
200 
201  ret = ff_append_outpad(ctx, &pad);
202  if (ret < 0)
203  return ret;
204 
205  if (s->draw_curves) {
206  vpad = (AVFilterPad){
207  .name = "out1",
208  .type = AVMEDIA_TYPE_VIDEO,
209  .config_props = config_video,
210  };
211  ret = ff_append_outpad(ctx, &vpad);
212  if (ret < 0)
213  return ret;
214  }
215 
216  return 0;
217 }
218 
220 {
221  AVFilterLink *inlink = ctx->inputs[0];
222  AVFilterLink *outlink = ctx->outputs[0];
223  AudioNEqualizerContext *s = ctx->priv;
226  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
227  static const enum AVSampleFormat sample_fmts[] = {
230  };
231  int ret;
232 
233  if (s->draw_curves) {
234  AVFilterLink *videolink = ctx->outputs[1];
236  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
237  return ret;
238  }
239 
241  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0 ||
242  (ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
243  return ret;
244 
246  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0 ||
248  return ret;
249 
251  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
252  (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
253  return ret;
254 
255  return 0;
256 }
257 
259 {
260  AudioNEqualizerContext *s = ctx->priv;
261 
262  av_frame_free(&s->video);
263  av_freep(&s->filters);
264  s->nb_filters = 0;
265  s->nb_allocated = 0;
266 }
267 
268 static void butterworth_fo_section(FoSection *S, double beta,
269  double si, double g, double g0,
270  double D, double c0)
271 {
272  if (c0 == 1 || c0 == -1) {
273  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
274  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
275  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
276  S->b3 = 0;
277  S->b4 = 0;
278 
279  S->a0 = 1;
280  S->a1 = 2*c0*(beta*beta - 1)/D;
281  S->a2 = (beta*beta - 2*beta*si + 1)/D;
282  S->a3 = 0;
283  S->a4 = 0;
284  } else {
285  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
286  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
287  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
288  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
289  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
290 
291  S->a0 = 1;
292  S->a1 = -4*c0*(1 + si*beta)/D;
293  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
294  S->a3 = -4*c0*(1 - si*beta)/D;
295  S->a4 = (beta*beta - 2*si*beta + 1)/D;
296  }
297 }
298 
300  int N, double w0, double wb,
301  double G, double Gb, double G0)
302 {
303  double g, c0, g0, beta;
304  double epsilon;
305  int r = N % 2;
306  int L = (N - r) / 2;
307  int i;
308 
309  if (G == 0 && G0 == 0) {
310  f->section[0].a0 = 1;
311  f->section[0].b0 = 1;
312  f->section[1].a0 = 1;
313  f->section[1].b0 = 1;
314  return;
315  }
316 
317  G = ff_exp10(G/20);
318  Gb = ff_exp10(Gb/20);
319  G0 = ff_exp10(G0/20);
320 
321  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
322  g = pow(G, 1.0 / N);
323  g0 = pow(G0, 1.0 / N);
324  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
325  c0 = cos(w0);
326 
327  for (i = 1; i <= L; i++) {
328  double ui = (2.0 * i - 1) / N;
329  double si = sin(M_PI * ui / 2.0);
330  double Di = beta * beta + 2 * si * beta + 1;
331 
332  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
333  }
334 }
335 
336 static void chebyshev1_fo_section(FoSection *S, double a,
337  double c, double tetta_b,
338  double g0, double si, double b,
339  double D, double c0)
340 {
341  if (c0 == 1 || c0 == -1) {
342  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
343  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
344  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
345  S->b3 = 0;
346  S->b4 = 0;
347 
348  S->a0 = 1;
349  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
350  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
351  S->a3 = 0;
352  S->a4 = 0;
353  } else {
354  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
355  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
356  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
357  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
358  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
359 
360  S->a0 = 1;
361  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
362  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
363  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
364  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
365  }
366 }
367 
369  int N, double w0, double wb,
370  double G, double Gb, double G0)
371 {
372  double a, b, c0, g0, alfa, beta, tetta_b;
373  double epsilon;
374  int r = N % 2;
375  int L = (N - r) / 2;
376  int i;
377 
378  if (G == 0 && G0 == 0) {
379  f->section[0].a0 = 1;
380  f->section[0].b0 = 1;
381  f->section[1].a0 = 1;
382  f->section[1].b0 = 1;
383  return;
384  }
385 
386  G = ff_exp10(G/20);
387  Gb = ff_exp10(Gb/20);
388  G0 = ff_exp10(G0/20);
389 
390  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
391  g0 = pow(G0,1.0/N);
392  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
393  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
394  a = 0.5 * (alfa - 1.0/alfa);
395  b = 0.5 * (beta - g0*g0*(1/beta));
396  tetta_b = tan(wb/2);
397  c0 = cos(w0);
398 
399  for (i = 1; i <= L; i++) {
400  double ui = (2.0*i-1.0)/N;
401  double ci = cos(M_PI*ui/2.0);
402  double si = sin(M_PI*ui/2.0);
403  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
404 
405  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
406  }
407 }
408 
409 static void chebyshev2_fo_section(FoSection *S, double a,
410  double c, double tetta_b,
411  double g, double si, double b,
412  double D, double c0)
413 {
414  if (c0 == 1 || c0 == -1) {
415  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
416  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
417  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
418  S->b3 = 0;
419  S->b4 = 0;
420 
421  S->a0 = 1;
422  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
423  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
424  S->a3 = 0;
425  S->a4 = 0;
426  } else {
427  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
428  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
429  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
430  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
431  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
432 
433  S->a0 = 1;
434  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
435  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
436  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
437  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
438  }
439 }
440 
442  int N, double w0, double wb,
443  double G, double Gb, double G0)
444 {
445  double a, b, c0, tetta_b;
446  double epsilon, g, eu, ew;
447  int r = N % 2;
448  int L = (N - r) / 2;
449  int i;
450 
451  if (G == 0 && G0 == 0) {
452  f->section[0].a0 = 1;
453  f->section[0].b0 = 1;
454  f->section[1].a0 = 1;
455  f->section[1].b0 = 1;
456  return;
457  }
458 
459  G = ff_exp10(G/20);
460  Gb = ff_exp10(Gb/20);
461  G0 = ff_exp10(G0/20);
462 
463  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
464  g = pow(G, 1.0 / N);
465  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
466  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
467  a = (eu - 1.0/eu)/2.0;
468  b = (ew - g*g/ew)/2.0;
469  tetta_b = tan(wb/2);
470  c0 = cos(w0);
471 
472  for (i = 1; i <= L; i++) {
473  double ui = (2.0 * i - 1.0)/N;
474  double ci = cos(M_PI * ui / 2.0);
475  double si = sin(M_PI * ui / 2.0);
476  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
477 
478  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
479  }
480 }
481 
482 static double butterworth_compute_bw_gain_db(double gain)
483 {
484  double bw_gain = 0;
485 
486  if (gain <= -6)
487  bw_gain = gain + 3;
488  else if(gain > -6 && gain < 6)
489  bw_gain = gain * 0.5;
490  else if(gain >= 6)
491  bw_gain = gain - 3;
492 
493  return bw_gain;
494 }
495 
496 static double chebyshev1_compute_bw_gain_db(double gain)
497 {
498  double bw_gain = 0;
499 
500  if (gain <= -6)
501  bw_gain = gain + 1;
502  else if(gain > -6 && gain < 6)
503  bw_gain = gain * 0.9;
504  else if(gain >= 6)
505  bw_gain = gain - 1;
506 
507  return bw_gain;
508 }
509 
510 static double chebyshev2_compute_bw_gain_db(double gain)
511 {
512  double bw_gain = 0;
513 
514  if (gain <= -6)
515  bw_gain = -3;
516  else if(gain > -6 && gain < 6)
517  bw_gain = gain * 0.3;
518  else if(gain >= 6)
519  bw_gain = 3;
520 
521  return bw_gain;
522 }
523 
524 static inline double hz_2_rad(double x, double fs)
525 {
526  return 2 * M_PI * x / fs;
527 }
528 
530 {
531  double w0 = hz_2_rad(f->freq, sample_rate);
532  double wb = hz_2_rad(f->width, sample_rate);
533  double bw_gain;
534 
535  switch (f->type) {
536  case BUTTERWORTH:
537  bw_gain = butterworth_compute_bw_gain_db(f->gain);
538  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
539  break;
540  case CHEBYSHEV1:
541  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
542  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
543  break;
544  case CHEBYSHEV2:
545  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
546  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
547  break;
548  }
549 
550 }
551 
553 {
554  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
555  if (s->nb_filters >= s->nb_allocated - 1) {
557 
558  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
559  if (!filters)
560  return AVERROR(ENOMEM);
561  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
562  av_free(s->filters);
563  s->filters = filters;
564  s->nb_allocated *= 2;
565  }
566  s->nb_filters++;
567 
568  return 0;
569 }
570 
572 {
573  AVFilterContext *ctx = inlink->dst;
574  AudioNEqualizerContext *s = ctx->priv;
575  char *args = av_strdup(s->args);
576  char *saveptr = NULL;
577  int ret = 0;
578 
579  if (!args)
580  return AVERROR(ENOMEM);
581 
582  s->nb_allocated = 32 * inlink->ch_layout.nb_channels;
583  s->filters = av_calloc(inlink->ch_layout.nb_channels, 32 * sizeof(*s->filters));
584  if (!s->filters) {
585  s->nb_allocated = 0;
586  av_free(args);
587  return AVERROR(ENOMEM);
588  }
589 
590  while (1) {
591  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
592 
593  if (!arg)
594  break;
595 
596  s->filters[s->nb_filters].type = 0;
597  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
598  &s->filters[s->nb_filters].freq,
599  &s->filters[s->nb_filters].width,
600  &s->filters[s->nb_filters].gain,
601  &s->filters[s->nb_filters].type) != 5 &&
602  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
603  &s->filters[s->nb_filters].freq,
604  &s->filters[s->nb_filters].width,
605  &s->filters[s->nb_filters].gain) != 4 ) {
606  av_free(args);
607  return AVERROR(EINVAL);
608  }
609 
610  if (s->filters[s->nb_filters].freq < 0 ||
611  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
612  s->filters[s->nb_filters].ignore = 1;
613 
614  if (s->filters[s->nb_filters].channel < 0 ||
615  s->filters[s->nb_filters].channel >= inlink->ch_layout.nb_channels)
616  s->filters[s->nb_filters].ignore = 1;
617 
618  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
619  ret = add_filter(s, inlink);
620  if (ret < 0)
621  break;
622  }
623 
624  av_free(args);
625 
626  return ret;
627 }
628 
629 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
630  char *res, int res_len, int flags)
631 {
632  AudioNEqualizerContext *s = ctx->priv;
633  AVFilterLink *inlink = ctx->inputs[0];
634  int ret = AVERROR(ENOSYS);
635 
636  if (!strcmp(cmd, "change")) {
637  double freq, width, gain;
638  int filter;
639 
640  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
641  return AVERROR(EINVAL);
642 
643  if (filter < 0 || filter >= s->nb_filters)
644  return AVERROR(EINVAL);
645 
646  if (freq < 0 || freq > inlink->sample_rate / 2.0)
647  return AVERROR(EINVAL);
648 
649  s->filters[filter].freq = freq;
650  s->filters[filter].width = width;
651  s->filters[filter].gain = gain;
652  equalizer(&s->filters[filter], inlink->sample_rate);
653  if (s->draw_curves)
654  draw_curves(ctx, inlink, s->video);
655 
656  ret = 0;
657  }
658 
659  return ret;
660 }
661 
662 static inline double section_process(FoSection *S, double in)
663 {
664  double out;
665 
666  out = S->b0 * in;
667  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
668  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
669  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
670  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
671 
672  S->num[3] = S->num[2];
673  S->num[2] = S->num[1];
674  S->num[1] = S->num[0];
675  S->num[0] = in;
676 
677  S->denum[3] = S->denum[2];
678  S->denum[2] = S->denum[1];
679  S->denum[1] = S->denum[0];
680  S->denum[0] = out;
681 
682  return out;
683 }
684 
685 static double process_sample(FoSection *s1, double in)
686 {
687  double p0 = in, p1;
688  int i;
689 
690  for (i = 0; i < FILTER_ORDER / 2; i++) {
691  p1 = section_process(&s1[i], p0);
692  p0 = p1;
693  }
694 
695  return p1;
696 }
697 
699  int jobnr, int nb_jobs)
700 {
701  AudioNEqualizerContext *s = ctx->priv;
702  AVFrame *buf = arg;
703  const int start = (buf->ch_layout.nb_channels * jobnr) / nb_jobs;
704  const int end = (buf->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
705 
706  for (int i = 0; i < s->nb_filters; i++) {
707  EqualizatorFilter *f = &s->filters[i];
708  double *bptr;
709 
710  if (f->gain == 0. || f->ignore)
711  continue;
712  if (f->channel < start ||
713  f->channel >= end)
714  continue;
715 
716  bptr = (double *)buf->extended_data[f->channel];
717  for (int n = 0; n < buf->nb_samples; n++) {
718  double sample = bptr[n];
719 
720  sample = process_sample(f->section, sample);
721  bptr[n] = sample;
722  }
723  }
724 
725  return 0;
726 }
727 
729 {
730  AVFilterContext *ctx = inlink->dst;
731  AudioNEqualizerContext *s = ctx->priv;
732  AVFilterLink *outlink = ctx->outputs[0];
733 
734  if (!ctx->is_disabled)
736  FFMIN(inlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
737 
738  if (s->draw_curves) {
739  AVFrame *clone;
740 
741  const int64_t pts = buf->pts +
742  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
743  outlink->time_base);
744  int ret;
745 
746  s->video->pts = pts;
747  clone = av_frame_clone(s->video);
748  if (!clone)
749  return AVERROR(ENOMEM);
750  ret = ff_filter_frame(ctx->outputs[1], clone);
751  if (ret < 0)
752  return ret;
753  }
754 
755  return ff_filter_frame(outlink, buf);
756 }
757 
758 static const AVFilterPad inputs[] = {
759  {
760  .name = "default",
761  .type = AVMEDIA_TYPE_AUDIO,
763  .config_props = config_input,
764  .filter_frame = filter_frame,
765  },
766 };
767 
769  .name = "anequalizer",
770  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
771  .priv_size = sizeof(AudioNEqualizerContext),
772  .priv_class = &anequalizer_class,
773  .init = init,
774  .uninit = uninit,
776  .outputs = NULL,
778  .process_command = process_command,
782 };
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_anequalizer.c:169
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AudioNEqualizerContext::args
char * args
Definition: af_anequalizer.c:64
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
AVFilterFormatsConfig::samplerates
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:515
ff_exp10
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
av_clip
#define av_clip
Definition: common.h:98
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:435
butterworth_fo_section
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
Definition: af_anequalizer.c:268
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
AVFilterFormatsConfig::channel_layouts
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:520
out
FILE * out
Definition: movenc.c:54
F
#define F
Definition: af_anequalizer.c:80
color
Definition: vf_paletteuse.c:511
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:673
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:261
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:356
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:621
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
EqualizatorFilter
Definition: af_anequalizer.c:50
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
filter_channels
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_anequalizer.c:698
CHEBYSHEV2
@ CHEBYSHEV2
Definition: af_anequalizer.c:38
EqualizatorFilter::width
double width
Definition: af_anequalizer.c:57
chebyshev1_bp_filter
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:368
EqualizatorFilter::gain
double gain
Definition: af_anequalizer.c:56
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
video.h
chebyshev2_bp_filter
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:441
sample_rate
sample_rate
Definition: ffmpeg_filter.c:409
anequalizer_options
static const AVOption anequalizer_options[]
Definition: af_anequalizer.c:82
AudioNEqualizerContext::mag
double mag
Definition: af_anequalizer.c:69
D
D(D(float, sse)
Definition: rematrix_init.c:29
AudioNEqualizerContext::nb_filters
int nb_filters
Definition: af_anequalizer.c:71
FILTER_ORDER
#define FILTER_ORDER
Definition: af_anequalizer.c:33
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
Definition: af_anequalizer.c:728
FoSection::b1
double b1
Definition: af_anequalizer.c:44
AudioNEqualizerContext::h
int h
Definition: af_anequalizer.c:67
AudioNEqualizerContext::fscale
int fscale
Definition: af_anequalizer.c:70
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:745
CHEBYSHEV1
@ CHEBYSHEV1
Definition: af_anequalizer.c:37
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_anequalizer.c:571
pts
static int64_t pts
Definition: transcode_aac.c:643
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(anequalizer)
BUTTERWORTH
@ BUTTERWORTH
Definition: af_anequalizer.c:36
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
FoSection::denum
double denum[4]
Definition: af_anequalizer.c:47
FilterType
FilterType
Definition: af_adenorm.c:26
lrint
#define lrint
Definition: tablegen.h:53
FoSection::a2
double a2
Definition: af_anequalizer.c:43
av_cold
#define av_cold
Definition: attributes.h:90
equalizer
static void equalizer(EqualizatorFilter *f, double sample_rate)
Definition: af_anequalizer.c:529
chebyshev2_fo_section
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
Definition: af_anequalizer.c:409
FoSection::num
double num[4]
Definition: af_anequalizer.c:46
width
#define width
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
FoSection::a0
double a0
Definition: af_anequalizer.c:43
g
const char * g
Definition: vf_curves.c:127
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
s1
#define s1
Definition: regdef.h:38
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:678
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
chebyshev1_fo_section
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
Definition: af_anequalizer.c:336
filters
#define filters(fmt, type, inverse, clp, inverset, clip, one, clip_fn, packed)
Definition: af_crystalizer.c:54
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_anequalizer.c:219
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:563
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
A
#define A
Definition: af_anequalizer.c:78
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
hz_2_rad
static double hz_2_rad(double x, double fs)
Definition: af_anequalizer.c:524
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
arg
const char * arg
Definition: jacosubdec.c:67
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_anequalizer.c:258
AudioNEqualizerContext
Definition: af_anequalizer.c:62
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:200
inputs
static const AVFilterPad inputs[]
Definition: af_anequalizer.c:758
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
FoSection::b3
double b3
Definition: af_anequalizer.c:44
parseutils.h
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_anequalizer.c:629
AudioNEqualizerContext::video
AVFrame * video
Definition: af_anequalizer.c:74
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
f
f
Definition: af_crystalizer.c:121
draw_curves
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
Definition: af_anequalizer.c:96
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:94
section
Definition: ffprobe.c:240
FoSection::b4
double b4
Definition: af_anequalizer.c:44
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FoSection
Definition: af_anequalizer.c:42
N
#define N
Definition: af_mcompand.c:53
add_filter
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
Definition: af_anequalizer.c:552
M_PI
#define M_PI
Definition: mathematics.h:67
internal.h
EqualizatorFilter::channel
int channel
Definition: af_anequalizer.c:52
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:424
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
FoSection::b2
double b2
Definition: af_anequalizer.c:44
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:405
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:825
FoSection::b0
double b0
Definition: af_anequalizer.c:44
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
section_process
static double section_process(FoSection *S, double in)
Definition: af_anequalizer.c:662
NB_TYPES
@ NB_TYPES
Definition: af_anequalizer.c:39
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
chebyshev1_compute_bw_gain_db
static double chebyshev1_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:496
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
chebyshev2_compute_bw_gain_db
static double chebyshev2_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:510
V
#define V
Definition: af_anequalizer.c:79
OFFSET
#define OFFSET(x)
Definition: af_anequalizer.c:77
AVFilter
Filter definition.
Definition: avfilter.h:166
FoSection::a3
double a3
Definition: af_anequalizer.c:43
ret
ret
Definition: filter_design.txt:187
FoSection::a1
double a1
Definition: af_anequalizer.c:43
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:606
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
butterworth_bp_filter
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:299
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
EqualizatorFilter::type
int type
Definition: af_anequalizer.c:53
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
EqualizatorFilter::freq
double freq
Definition: af_anequalizer.c:55
L
#define L(x)
Definition: vpx_arith.h:36
ffmath.h
G
#define G
Definition: huffyuv.h:43
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
AudioNEqualizerContext::w
int w
Definition: af_anequalizer.c:67
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:137
process_sample
static double process_sample(FoSection *s1, double in)
Definition: af_anequalizer.c:685
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
ff_af_anequalizer
const AVFilter ff_af_anequalizer
Definition: af_anequalizer.c:768
AudioNEqualizerContext::filters
EqualizatorFilter * filters
Definition: af_anequalizer.c:73
AudioNEqualizerContext::nb_allocated
int nb_allocated
Definition: af_anequalizer.c:72
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AudioNEqualizerContext::colors
char * colors
Definition: af_anequalizer.c:65
EqualizatorFilter::ignore
int ignore
Definition: af_anequalizer.c:51
AudioNEqualizerContext::draw_curves
int draw_curves
Definition: af_anequalizer.c:66
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_anequalizer.c:190
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
FoSection::a4
double a4
Definition: af_anequalizer.c:43
ui
#define ui(width, name)
Definition: cbs_mpeg2.c:113
butterworth_compute_bw_gain_db
static double butterworth_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:482
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: internal.h:52