FFmpeg
avf_showvolume.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avstring.h"
23 #include "libavutil/eval.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
28 #include "avfilter.h"
29 #include "filters.h"
30 #include "formats.h"
31 #include "audio.h"
32 #include "video.h"
33 #include "internal.h"
34 
35 static const char *const var_names[] = { "VOLUME", "CHANNEL", "PEAK", NULL };
38 
39 typedef struct ShowVolumeContext {
40  const AVClass *class;
41  int w, h;
42  int b;
43  double f;
45  char *color;
47  int step;
48  float bgopacity;
49  int mode;
50 
54  int draw_text;
56  double *values;
57  uint32_t *color_lut;
58  float *max;
59  float rms_factor;
61 
62  double draw_persistent_duration; /* in second */
64  int persistent_max_frames; /* number of frames to check max value */
65  float *max_persistent; /* max value for draw_persistent_max for each channel */
66  int *nb_frames_max_display; /* number of frame for each channel, for displaying the max value */
67 
68  void (*meter)(float *src, int nb_samples, float *max, float factor);
70 
71 #define OFFSET(x) offsetof(ShowVolumeContext, x)
72 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
73 
74 static const AVOption showvolume_options[] = {
75  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
76  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
77  { "b", "set border width", OFFSET(b), AV_OPT_TYPE_INT, {.i64=1}, 0, 5, FLAGS },
78  { "w", "set channel width", OFFSET(w), AV_OPT_TYPE_INT, {.i64=400}, 80, 8192, FLAGS },
79  { "h", "set channel height", OFFSET(h), AV_OPT_TYPE_INT, {.i64=20}, 1, 900, FLAGS },
80  { "f", "set fade", OFFSET(f), AV_OPT_TYPE_DOUBLE, {.dbl=0.95}, 0, 1, FLAGS },
81  { "c", "set volume color expression", OFFSET(color), AV_OPT_TYPE_STRING, {.str="PEAK*255+floor((1-PEAK)*255)*256+0xff000000"}, 0, 0, FLAGS },
82  { "t", "display channel names", OFFSET(draw_text), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
83  { "v", "display volume value", OFFSET(draw_volume), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
84  { "dm", "duration for max value display", OFFSET(draw_persistent_duration), AV_OPT_TYPE_DOUBLE, {.dbl=0.}, 0, 9000, FLAGS},
85  { "dmc","set color of the max value line", OFFSET(persistant_max_rgba), AV_OPT_TYPE_COLOR, {.str = "orange"}, CHAR_MIN, CHAR_MAX, FLAGS },
86  { "o", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "orientation" },
87  { "h", "horizontal", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "orientation" },
88  { "v", "vertical", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "orientation" },
89  { "s", "set step size", OFFSET(step), AV_OPT_TYPE_INT, {.i64=0}, 0, 5, FLAGS },
90  { "p", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, FLAGS },
91  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "mode" },
92  { "p", "peak", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
93  { "r", "rms", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
94  { "ds", "set display scale", OFFSET(display_scale), AV_OPT_TYPE_INT, {.i64=LINEAR}, LINEAR, NB_DISPLAY_SCALE - 1, FLAGS, "display_scale" },
95  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "display_scale" },
96  { "log", "log", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "display_scale" },
97  { NULL }
98 };
99 
100 AVFILTER_DEFINE_CLASS(showvolume);
101 
103 {
104  ShowVolumeContext *s = ctx->priv;
105  int ret;
106 
107  if (s->color) {
108  ret = av_expr_parse(&s->c_expr, s->color, var_names,
109  NULL, NULL, NULL, NULL, 0, ctx);
110  if (ret < 0)
111  return ret;
112  }
113 
114  return 0;
115 }
116 
118 {
121  AVFilterLink *inlink = ctx->inputs[0];
122  AVFilterLink *outlink = ctx->outputs[0];
124  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
125  int ret;
126 
127  formats = ff_make_format_list(sample_fmts);
128  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
129  return ret;
130 
131  layouts = ff_all_channel_counts();
132  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
133  return ret;
134 
135  formats = ff_all_samplerates();
136  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
137  return ret;
138 
139  formats = ff_make_format_list(pix_fmts);
140  if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
141  return ret;
142 
143  return 0;
144 }
145 
146 static void find_peak(float *src, int nb_samples, float *peak, float factor)
147 {
148  int i;
149 
150  *peak = 0;
151  for (i = 0; i < nb_samples; i++)
152  *peak = FFMAX(*peak, FFABS(src[i]));
153 }
154 
155 static void find_rms(float *src, int nb_samples, float *rms, float factor)
156 {
157  int i;
158 
159  for (i = 0; i < nb_samples; i++)
160  *rms += factor * (src[i] * src[i] - *rms);
161 }
162 
164 {
165  AVFilterContext *ctx = inlink->dst;
166  ShowVolumeContext *s = ctx->priv;
167 
169  s->values = av_calloc(inlink->channels * VAR_VARS_NB, sizeof(double));
170  if (!s->values)
171  return AVERROR(ENOMEM);
172 
173  s->color_lut = av_calloc(s->w, sizeof(*s->color_lut) * inlink->channels);
174  if (!s->color_lut)
175  return AVERROR(ENOMEM);
176 
177  s->max = av_calloc(inlink->channels, sizeof(*s->max));
178  if (!s->max)
179  return AVERROR(ENOMEM);
180 
181  s->rms_factor = 10000. / inlink->sample_rate;
182 
183  switch (s->mode) {
184  case 0: s->meter = find_peak; break;
185  case 1: s->meter = find_rms; break;
186  default: return AVERROR_BUG;
187  }
188 
189  if (s->draw_persistent_duration > 0.) {
191  s->max_persistent = av_calloc(inlink->channels * s->persistent_max_frames, sizeof(*s->max_persistent));
193  }
194  return 0;
195 }
196 
197 static int config_output(AVFilterLink *outlink)
198 {
199  ShowVolumeContext *s = outlink->src->priv;
200  AVFilterLink *inlink = outlink->src->inputs[0];
201  int ch;
202 
203  if (s->orientation) {
204  outlink->h = s->w;
205  outlink->w = s->h * inlink->channels + (inlink->channels - 1) * s->b;
206  } else {
207  outlink->w = s->w;
208  outlink->h = s->h * inlink->channels + (inlink->channels - 1) * s->b;
209  }
210 
211  outlink->sample_aspect_ratio = (AVRational){1,1};
212  outlink->frame_rate = s->frame_rate;
213 
214  for (ch = 0; ch < inlink->channels; ch++) {
215  int i;
216 
217  for (i = 0; i < s->w; i++) {
218  float max = i / (float)(s->w - 1);
219 
220  s->values[ch * VAR_VARS_NB + VAR_PEAK] = max;
221  s->values[ch * VAR_VARS_NB + VAR_VOLUME] = 20.0 * log10(max);
222  s->values[ch * VAR_VARS_NB + VAR_CHANNEL] = ch;
223  s->color_lut[ch * s->w + i] = av_expr_eval(s->c_expr, &s->values[ch * VAR_VARS_NB], NULL);
224  }
225  }
226 
227  return 0;
228 }
229 
230 static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
231 {
232  const uint8_t *font;
233  int font_height;
234  int i;
235 
236  font = avpriv_cga_font, font_height = 8;
237 
238  for (i = 0; txt[i]; i++) {
239  int char_y, mask;
240 
241  if (o) { /* vertical orientation */
242  for (char_y = font_height - 1; char_y >= 0; char_y--) {
243  uint8_t *p = pic->data[0] + (y + i * 10) * pic->linesize[0] + x * 4;
244  for (mask = 0x80; mask; mask >>= 1) {
245  if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
246  AV_WN32(&p[char_y * 4], ~AV_RN32(&p[char_y * 4]));
247  p += pic->linesize[0];
248  }
249  }
250  } else { /* horizontal orientation */
251  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
252  for (char_y = 0; char_y < font_height; char_y++) {
253  for (mask = 0x80; mask; mask >>= 1) {
254  if (font[txt[i] * font_height + char_y] & mask)
255  AV_WN32(p, ~AV_RN32(p));
256  p += 4;
257  }
258  p += pic->linesize[0] - 8 * 4;
259  }
260  }
261  }
262 }
263 
265 {
266  int i, j;
267  const uint32_t bg = (uint32_t)(s->bgopacity * 255) << 24;
268 
269  for (i = 0; i < outlink->h; i++) {
270  uint32_t *dst = (uint32_t *)(s->out->data[0] + i * s->out->linesize[0]);
271  for (j = 0; j < outlink->w; j++)
272  AV_WN32A(dst + j, bg);
273  }
274 }
275 
276 static inline int calc_max_draw(ShowVolumeContext *s, AVFilterLink *outlink, float max)
277 {
278  float max_val;
279  if (s->display_scale == LINEAR) {
280  max_val = max;
281  } else { /* log */
282  max_val = av_clipf(0.21 * log10(max) + 1, 0, 1);
283  }
284  if (s->orientation) { /* vertical */
285  return outlink->h - outlink->h * max_val;
286  } else { /* horizontal */
287  return s->w * max_val;
288  }
289 }
290 
291 static inline void calc_persistent_max(ShowVolumeContext *s, float max, int channel)
292 {
293  /* update max value for persistent max display */
294  if ((max >= s->max_persistent[channel]) || (s->nb_frames_max_display[channel] >= s->persistent_max_frames)) { /* update max value for display */
295  s->max_persistent[channel] = max;
297  } else {
298  s->nb_frames_max_display[channel] += 1; /* incremente display frame count */
299  }
300 }
301 
302 static inline void draw_max_line(ShowVolumeContext *s, int max_draw, int channel)
303 {
304  int k;
305  if (s->orientation) { /* vertical */
306  uint8_t *dst = s->out->data[0] + max_draw * s->out->linesize[0] + channel * (s->b + s->h) * 4;
307  for (k = 0; k < s->h; k++) {
308  memcpy(dst + k * 4, s->persistant_max_rgba, sizeof(s->persistant_max_rgba));
309  }
310  } else { /* horizontal */
311  for (k = 0; k < s->h; k++) {
312  uint8_t *dst = s->out->data[0] + (channel * s->h + channel * s->b + k) * s->out->linesize[0];
313  memcpy(dst + max_draw * 4, s->persistant_max_rgba, sizeof(s->persistant_max_rgba));
314  }
315  }
316 }
317 
318 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
319 {
320  AVFilterContext *ctx = inlink->dst;
321  AVFilterLink *outlink = ctx->outputs[0];
322  ShowVolumeContext *s = ctx->priv;
323  const int step = s->step;
324  int c, j, k, max_draw;
325  AVFrame *out;
326 
327  if (!s->out || s->out->width != outlink->w ||
328  s->out->height != outlink->h) {
329  av_frame_free(&s->out);
330  s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
331  if (!s->out) {
332  av_frame_free(&insamples);
333  return AVERROR(ENOMEM);
334  }
335  clear_picture(s, outlink);
336  }
337  s->out->pts = insamples->pts;
338 
339  if ((s->f < 1.) && (s->f > 0.)) {
340  for (j = 0; j < outlink->h; j++) {
341  uint8_t *dst = s->out->data[0] + j * s->out->linesize[0];
342  const uint32_t alpha = s->bgopacity * 255;
343 
344  for (k = 0; k < outlink->w; k++) {
345  dst[k * 4 + 0] = FFMAX(dst[k * 4 + 0] * s->f, 0);
346  dst[k * 4 + 1] = FFMAX(dst[k * 4 + 1] * s->f, 0);
347  dst[k * 4 + 2] = FFMAX(dst[k * 4 + 2] * s->f, 0);
348  dst[k * 4 + 3] = FFMAX(dst[k * 4 + 3] * s->f, alpha);
349  }
350  }
351  } else if (s->f == 0.) {
352  clear_picture(s, outlink);
353  }
354 
355  if (s->orientation) { /* vertical */
356  for (c = 0; c < inlink->channels; c++) {
357  float *src = (float *)insamples->extended_data[c];
358  uint32_t *lut = s->color_lut + s->w * c;
359  float max;
360 
361  s->meter(src, insamples->nb_samples, &s->max[c], s->rms_factor);
362  max = s->max[c];
363 
364  s->values[c * VAR_VARS_NB + VAR_VOLUME] = 20.0 * log10(max);
365  max = av_clipf(max, 0, 1);
366  max_draw = calc_max_draw(s, outlink, max);
367 
368  for (j = max_draw; j < s->w; j++) {
369  uint8_t *dst = s->out->data[0] + j * s->out->linesize[0] + c * (s->b + s->h) * 4;
370  for (k = 0; k < s->h; k++) {
371  AV_WN32A(&dst[k * 4], lut[s->w - j - 1]);
372  if (j & step)
373  j += step;
374  }
375  }
376 
377  if (s->h >= 8 && s->draw_text) {
379  if (!channel_name)
380  continue;
381  drawtext(s->out, c * (s->h + s->b) + (s->h - 10) / 2, outlink->h - 35, channel_name, 1);
382  }
383 
384  if (s->draw_persistent_duration > 0.) {
385  calc_persistent_max(s, max, c);
386  max_draw = FFMAX(0, calc_max_draw(s, outlink, s->max_persistent[c]) - 1);
387  draw_max_line(s, max_draw, c);
388  }
389  }
390  } else { /* horizontal */
391  for (c = 0; c < inlink->channels; c++) {
392  float *src = (float *)insamples->extended_data[c];
393  uint32_t *lut = s->color_lut + s->w * c;
394  float max;
395 
396  s->meter(src, insamples->nb_samples, &s->max[c], s->rms_factor);
397  max = s->max[c];
398 
399  s->values[c * VAR_VARS_NB + VAR_VOLUME] = 20.0 * log10(max);
400  max = av_clipf(max, 0, 1);
401  max_draw = calc_max_draw(s, outlink, max);
402 
403  for (j = 0; j < s->h; j++) {
404  uint8_t *dst = s->out->data[0] + (c * s->h + c * s->b + j) * s->out->linesize[0];
405 
406  for (k = 0; k < max_draw; k++) {
407  AV_WN32A(dst + k * 4, lut[k]);
408  if (k & step)
409  k += step;
410  }
411  }
412 
413  if (s->h >= 8 && s->draw_text) {
415  if (!channel_name)
416  continue;
417  drawtext(s->out, 2, c * (s->h + s->b) + (s->h - 8) / 2, channel_name, 0);
418  }
419 
420  if (s->draw_persistent_duration > 0.) {
421  calc_persistent_max(s, max, c);
422  max_draw = FFMAX(0, calc_max_draw(s, outlink, s->max_persistent[c]) - 1);
423  draw_max_line(s, max_draw, c);
424  }
425  }
426  }
427 
428  av_frame_free(&insamples);
429  out = av_frame_clone(s->out);
430  if (!out)
431  return AVERROR(ENOMEM);
433 
434  /* draw volume level */
435  for (c = 0; c < inlink->channels && s->h >= 8 && s->draw_volume; c++) {
436  char buf[16];
437 
438  if (s->orientation) { /* vertical */
439  snprintf(buf, sizeof(buf), "%.2f", s->values[c * VAR_VARS_NB + VAR_VOLUME]);
440  drawtext(out, c * (s->h + s->b) + (s->h - 8) / 2, 2, buf, 1);
441  } else { /* horizontal */
442  snprintf(buf, sizeof(buf), "%.2f", s->values[c * VAR_VARS_NB + VAR_VOLUME]);
443  drawtext(out, FFMAX(0, s->w - 8 * (int)strlen(buf)), c * (s->h + s->b) + (s->h - 8) / 2, buf, 0);
444  }
445  }
446 
447  return ff_filter_frame(outlink, out);
448 }
449 
451 {
452  AVFilterLink *inlink = ctx->inputs[0];
453  AVFilterLink *outlink = ctx->outputs[0];
454  ShowVolumeContext *s = ctx->priv;
455  AVFrame *in = NULL;
456  int ret;
457 
458  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
459 
460  ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
461  if (ret < 0)
462  return ret;
463  if (ret > 0)
464  return filter_frame(inlink, in);
465 
466  FF_FILTER_FORWARD_STATUS(inlink, outlink);
467  FF_FILTER_FORWARD_WANTED(outlink, inlink);
468 
469  return FFERROR_NOT_READY;
470 }
471 
473 {
474  ShowVolumeContext *s = ctx->priv;
475 
476  av_frame_free(&s->out);
477  av_expr_free(s->c_expr);
478  av_freep(&s->values);
479  av_freep(&s->color_lut);
480  av_freep(&s->max);
481 }
482 
483 static const AVFilterPad showvolume_inputs[] = {
484  {
485  .name = "default",
486  .type = AVMEDIA_TYPE_AUDIO,
487  .config_props = config_input,
488  },
489  { NULL }
490 };
491 
492 static const AVFilterPad showvolume_outputs[] = {
493  {
494  .name = "default",
495  .type = AVMEDIA_TYPE_VIDEO,
496  .config_props = config_output,
497  },
498  { NULL }
499 };
500 
502  .name = "showvolume",
503  .description = NULL_IF_CONFIG_SMALL("Convert input audio volume to video output."),
504  .init = init,
505  .activate = activate,
506  .uninit = uninit,
507  .query_formats = query_formats,
508  .priv_size = sizeof(ShowVolumeContext),
509  .inputs = showvolume_inputs,
510  .outputs = showvolume_outputs,
511  .priv_class = &showvolume_class,
512 };
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
uint8_t persistant_max_rgba[4]
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
AVFilter ff_avf_showvolume
uint32_t * color_lut
Main libavfilter public API header.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
int num
Numerator.
Definition: rational.h:59
void(* meter)(float *src, int nb_samples, float *max, float factor)
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:683
return FFERROR_NOT_READY
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
#define src
Definition: vp8dsp.c:254
static void calc_persistent_max(ShowVolumeContext *s, float max, int channel)
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
static const AVOption showvolume_options[]
static void find_rms(float *src, int nb_samples, float *rms, float factor)
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
DisplayScale
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:434
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
static const AVFilterPad showvolume_outputs[]
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
Definition: eval.c:157
#define OFFSET(x)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
AVRational frame_rate
A filter pad used for either input or output.
Definition: internal.h:54
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
static const uint16_t mask[17]
Definition: lzw.c:38
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
static void draw_max_line(ShowVolumeContext *s, int max_draw, int channel)
#define FFMAX(a, b)
Definition: common.h:94
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
audio channel layout utility functions
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
static void find_peak(float *src, int nb_samples, float *peak, float factor)
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:439
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
AVFormatContext * ctx
Definition: movenc.c:48
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad showvolume_inputs[]
static int config_output(AVFilterLink *outlink)
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:85
static void clear_picture(ShowVolumeContext *s, AVFilterLink *outlink)
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
static int query_formats(AVFilterContext *ctx)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1513
static const int16_t alpha[]
Definition: ilbcdata.h:55
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int activate(AVFilterContext *ctx)
static int calc_max_draw(ShowVolumeContext *s, AVFilterLink *outlink, float max)
offset must point to AVRational
Definition: opt.h:236
static av_cold int init(AVFilterContext *ctx)
static const int factor[16]
Definition: vf_pp7.c:75
const char * name
Filter name.
Definition: avfilter.h:148
#define snprintf
Definition: snprintf.h:34
#define AV_RN32(p)
Definition: intreadwrite.h:364
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:394
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
#define FLAGS
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
double draw_persistent_duration
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
static int config_input(AVFilterLink *inlink)
int den
Denominator.
Definition: rational.h:60
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static av_cold void uninit(AVFilterContext *ctx)
static const char *const var_names[]
const char * av_get_channel_name(uint64_t channel)
Get the name of a given channel.
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:738
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int height
Definition: frame.h:353
#define av_freep(p)
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:409
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
AVFILTER_DEFINE_CLASS(showvolume)
CGA/EGA/VGA ROM font data.
simple arithmetic expression evaluator