FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
avf_showwaves.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio to video multimedia filter
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/parseutils.h"
31 #include "avfilter.h"
32 #include "formats.h"
33 #include "audio.h"
34 #include "video.h"
35 #include "internal.h"
36 
43 };
44 
49 };
50 
51 struct frame_node {
53  struct frame_node *next;
54 };
55 
56 typedef struct {
57  const AVClass *class;
58  int w, h;
60  char *colors;
61  int buf_idx;
62  int16_t *buf_idy; /* y coordinate of previous sample for each channel */
64  int n;
65  int pixstep;
67  int mode; ///< ShowWavesMode
68  int scale; ///< ShowWavesScale
71 
72  int (*get_h)(int16_t sample, int height);
73  void (*draw_sample)(uint8_t *buf, int height, int linesize,
74  int16_t *prev_y, const uint8_t color[4], int h);
75 
76  /* single picture */
80  int64_t total_samples;
81  int64_t *sum; /* abs sum of the samples per channel */
83 
84 #define OFFSET(x) offsetof(ShowWavesContext, x)
85 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
86 
87 static const AVOption showwaves_options[] = {
88  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
89  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
90  { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
91  { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
92  { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
93  { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
94  { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
95  { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
96  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
97  { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
98  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
99  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
100  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
101  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
102  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
103  { NULL }
104 };
105 
106 AVFILTER_DEFINE_CLASS(showwaves);
107 
109 {
110  ShowWavesContext *showwaves = ctx->priv;
111 
112  av_frame_free(&showwaves->outpicref);
113  av_freep(&showwaves->buf_idy);
114  av_freep(&showwaves->fg);
115 
116  if (showwaves->single_pic) {
117  struct frame_node *node = showwaves->audio_frames;
118  while (node) {
119  struct frame_node *tmp = node;
120 
121  node = node->next;
122  av_frame_free(&tmp->frame);
123  av_freep(&tmp);
124  }
125  av_freep(&showwaves->sum);
126  showwaves->last_frame = NULL;
127  }
128 }
129 
131 {
134  AVFilterLink *inlink = ctx->inputs[0];
135  AVFilterLink *outlink = ctx->outputs[0];
138  int ret;
139 
140  /* set input audio formats */
141  formats = ff_make_format_list(sample_fmts);
142  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
143  return ret;
144 
145  layouts = ff_all_channel_layouts();
146  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
147  return ret;
148 
149  formats = ff_all_samplerates();
150  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
151  return ret;
152 
153  /* set output video format */
154  formats = ff_make_format_list(pix_fmts);
155  if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
156  return ret;
157 
158  return 0;
159 }
160 
161 static int get_lin_h(int16_t sample, int height)
162 {
163  return height/2 - av_rescale(sample, height/2, INT16_MAX);
164 }
165 
166 static int get_lin_h2(int16_t sample, int height)
167 {
168  return av_rescale(FFABS(sample), height, INT16_MAX);
169 }
170 
171 static int get_log_h(int16_t sample, int height)
172 {
173  return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
174 }
175 
176 static int get_log_h2(int16_t sample, int height)
177 {
178  return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
179 }
180 
181 static void draw_sample_point_rgba(uint8_t *buf, int height, int linesize,
182  int16_t *prev_y,
183  const uint8_t color[4], int h)
184 {
185  if (h >= 0 && h < height) {
186  buf[h * linesize + 0] += color[0];
187  buf[h * linesize + 1] += color[1];
188  buf[h * linesize + 2] += color[2];
189  buf[h * linesize + 3] += color[3];
190  }
191 }
192 
193 static void draw_sample_line_rgba(uint8_t *buf, int height, int linesize,
194  int16_t *prev_y,
195  const uint8_t color[4], int h)
196 {
197  int k;
198  int start = height/2;
199  int end = av_clip(h, 0, height-1);
200  if (start > end)
201  FFSWAP(int16_t, start, end);
202  for (k = start; k < end; k++) {
203  buf[k * linesize + 0] += color[0];
204  buf[k * linesize + 1] += color[1];
205  buf[k * linesize + 2] += color[2];
206  buf[k * linesize + 3] += color[3];
207  }
208 }
209 
210 static void draw_sample_p2p_rgba(uint8_t *buf, int height, int linesize,
211  int16_t *prev_y,
212  const uint8_t color[4], int h)
213 {
214  int k;
215  if (h >= 0 && h < height) {
216  buf[h * linesize + 0] += color[0];
217  buf[h * linesize + 1] += color[1];
218  buf[h * linesize + 2] += color[2];
219  buf[h * linesize + 3] += color[3];
220  if (*prev_y && h != *prev_y) {
221  int start = *prev_y;
222  int end = av_clip(h, 0, height-1);
223  if (start > end)
224  FFSWAP(int16_t, start, end);
225  for (k = start + 1; k < end; k++) {
226  buf[k * linesize + 0] += color[0];
227  buf[k * linesize + 1] += color[1];
228  buf[k * linesize + 2] += color[2];
229  buf[k * linesize + 3] += color[3];
230  }
231  }
232  }
233  *prev_y = h;
234 }
235 
236 static void draw_sample_cline_rgba(uint8_t *buf, int height, int linesize,
237  int16_t *prev_y,
238  const uint8_t color[4], int h)
239 {
240  int k;
241  const int start = (height - h) / 2;
242  const int end = start + h;
243  for (k = start; k < end; k++) {
244  buf[k * linesize + 0] += color[0];
245  buf[k * linesize + 1] += color[1];
246  buf[k * linesize + 2] += color[2];
247  buf[k * linesize + 3] += color[3];
248  }
249 }
250 
251 static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
252  int16_t *prev_y,
253  const uint8_t color[4], int h)
254 {
255  if (h >= 0 && h < height)
256  buf[h * linesize] += color[0];
257 }
258 
259 static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
260  int16_t *prev_y,
261  const uint8_t color[4], int h)
262 {
263  int k;
264  int start = height/2;
265  int end = av_clip(h, 0, height-1);
266  if (start > end)
267  FFSWAP(int16_t, start, end);
268  for (k = start; k < end; k++)
269  buf[k * linesize] += color[0];
270 }
271 
272 static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
273  int16_t *prev_y,
274  const uint8_t color[4], int h)
275 {
276  int k;
277  if (h >= 0 && h < height) {
278  buf[h * linesize] += color[0];
279  if (*prev_y && h != *prev_y) {
280  int start = *prev_y;
281  int end = av_clip(h, 0, height-1);
282  if (start > end)
283  FFSWAP(int16_t, start, end);
284  for (k = start + 1; k < end; k++)
285  buf[k * linesize] += color[0];
286  }
287  }
288  *prev_y = h;
289 }
290 
291 static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
292  int16_t *prev_y,
293  const uint8_t color[4], int h)
294 {
295  int k;
296  const int start = (height - h) / 2;
297  const int end = start + h;
298  for (k = start; k < end; k++)
299  buf[k * linesize] += color[0];
300 }
301 
302 static int config_output(AVFilterLink *outlink)
303 {
304  AVFilterContext *ctx = outlink->src;
305  AVFilterLink *inlink = ctx->inputs[0];
306  ShowWavesContext *showwaves = ctx->priv;
307  int nb_channels = inlink->channels;
308  char *colors, *saveptr = NULL;
309  uint8_t x;
310  int ch;
311 
312  if (showwaves->single_pic)
313  showwaves->n = 1;
314 
315  if (!showwaves->n)
316  showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
317 
318  showwaves->buf_idx = 0;
319  if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
320  av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
321  return AVERROR(ENOMEM);
322  }
323  outlink->w = showwaves->w;
324  outlink->h = showwaves->h;
325  outlink->sample_aspect_ratio = (AVRational){1,1};
326 
327  outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
328  (AVRational){showwaves->w,1});
329 
330  av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
331  showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
332 
333  switch (outlink->format) {
334  case AV_PIX_FMT_GRAY8:
335  switch (showwaves->mode) {
336  case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
337  case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
338  case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
339  case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
340  default:
341  return AVERROR_BUG;
342  }
343  showwaves->pixstep = 1;
344  break;
345  case AV_PIX_FMT_RGBA:
346  switch (showwaves->mode) {
347  case MODE_POINT: showwaves->draw_sample = draw_sample_point_rgba; break;
348  case MODE_LINE: showwaves->draw_sample = draw_sample_line_rgba; break;
349  case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_rgba; break;
350  case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_rgba; break;
351  default:
352  return AVERROR_BUG;
353  }
354  showwaves->pixstep = 4;
355  break;
356  }
357 
358  switch (showwaves->scale) {
359  case SCALE_LIN:
360  switch (showwaves->mode) {
361  case MODE_POINT:
362  case MODE_LINE:
363  case MODE_P2P: showwaves->get_h = get_lin_h; break;
364  case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
365  default:
366  return AVERROR_BUG;
367  }
368  break;
369  case SCALE_LOG:
370  switch (showwaves->mode) {
371  case MODE_POINT:
372  case MODE_LINE:
373  case MODE_P2P: showwaves->get_h = get_log_h; break;
374  case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
375  default:
376  return AVERROR_BUG;
377  }
378  break;
379  }
380 
381  showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
382  if (!showwaves->fg)
383  return AVERROR(ENOMEM);
384 
385  colors = av_strdup(showwaves->colors);
386  if (!colors)
387  return AVERROR(ENOMEM);
388 
389  /* multiplication factor, pre-computed to avoid in-loop divisions */
390  x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
391  if (outlink->format == AV_PIX_FMT_RGBA) {
392  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
393 
394  for (ch = 0; ch < nb_channels; ch++) {
395  char *color;
396 
397  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
398  if (color)
399  av_parse_color(fg, color, -1, ctx);
400  showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
401  showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
402  showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
403  showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
404  }
405  } else {
406  for (ch = 0; ch < nb_channels; ch++)
407  showwaves->fg[4 * ch + 0] = x;
408  }
409  av_free(colors);
410 
411  return 0;
412 }
413 
414 inline static int push_frame(AVFilterLink *outlink)
415 {
416  AVFilterContext *ctx = outlink->src;
417  AVFilterLink *inlink = ctx->inputs[0];
418  ShowWavesContext *showwaves = outlink->src->priv;
419  int nb_channels = inlink->channels;
420  int ret, i;
421 
422  ret = ff_filter_frame(outlink, showwaves->outpicref);
423  showwaves->outpicref = NULL;
424  showwaves->buf_idx = 0;
425  for (i = 0; i < nb_channels; i++)
426  showwaves->buf_idy[i] = 0;
427  return ret;
428 }
429 
430 static int push_single_pic(AVFilterLink *outlink)
431 {
432  AVFilterContext *ctx = outlink->src;
433  AVFilterLink *inlink = ctx->inputs[0];
434  ShowWavesContext *showwaves = ctx->priv;
435  int64_t n = 0, max_samples = showwaves->total_samples / outlink->w;
436  AVFrame *out = showwaves->outpicref;
437  struct frame_node *node;
438  const int nb_channels = inlink->channels;
439  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
440  const int linesize = out->linesize[0];
441  const int pixstep = showwaves->pixstep;
442  int col = 0;
443  int64_t *sum = showwaves->sum;
444 
445  if (max_samples == 0) {
446  av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
447  return AVERROR(EINVAL);
448  }
449 
450  av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples);
451 
452  memset(sum, 0, nb_channels);
453 
454  for (node = showwaves->audio_frames; node; node = node->next) {
455  int i;
456  const AVFrame *frame = node->frame;
457  const int16_t *p = (const int16_t *)frame->data[0];
458 
459  for (i = 0; i < frame->nb_samples; i++) {
460  int ch;
461 
462  for (ch = 0; ch < nb_channels; ch++)
463  sum[ch] += abs(p[ch + i*nb_channels]) << 1;
464  if (n++ == max_samples) {
465  for (ch = 0; ch < nb_channels; ch++) {
466  int16_t sample = sum[ch] / max_samples;
467  uint8_t *buf = out->data[0] + col * pixstep;
468  int h;
469 
470  if (showwaves->split_channels)
471  buf += ch*ch_height*linesize;
472  av_assert0(col < outlink->w);
473  h = showwaves->get_h(sample, ch_height);
474  showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
475  sum[ch] = 0;
476  }
477  col++;
478  n = 0;
479  }
480  }
481  }
482 
483  return push_frame(outlink);
484 }
485 
486 
487 static int request_frame(AVFilterLink *outlink)
488 {
489  ShowWavesContext *showwaves = outlink->src->priv;
490  AVFilterLink *inlink = outlink->src->inputs[0];
491  int ret;
492 
493  ret = ff_request_frame(inlink);
494  if (ret == AVERROR_EOF && showwaves->outpicref) {
495  if (showwaves->single_pic)
496  push_single_pic(outlink);
497  else
498  push_frame(outlink);
499  }
500 
501  return ret;
502 }
503 
504 static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
505  const AVFilterLink *inlink, AVFilterLink *outlink,
506  const AVFrame *in)
507 {
508  if (!showwaves->outpicref) {
509  int j;
510  AVFrame *out = showwaves->outpicref =
511  ff_get_video_buffer(outlink, outlink->w, outlink->h);
512  if (!out)
513  return AVERROR(ENOMEM);
514  out->width = outlink->w;
515  out->height = outlink->h;
516  out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
517  av_make_q(1, inlink->sample_rate),
518  outlink->time_base);
519  for (j = 0; j < outlink->h; j++)
520  memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
521  }
522  return 0;
523 }
524 
526 {
527  ShowWavesContext *showwaves = ctx->priv;
528 
529  if (!strcmp(ctx->filter->name, "showwavespic")) {
530  showwaves->single_pic = 1;
531  showwaves->mode = MODE_CENTERED_LINE;
532  }
533 
534  return 0;
535 }
536 
537 #if CONFIG_SHOWWAVES_FILTER
538 
539 static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
540 {
541  AVFilterContext *ctx = inlink->dst;
542  AVFilterLink *outlink = ctx->outputs[0];
543  ShowWavesContext *showwaves = ctx->priv;
544  const int nb_samples = insamples->nb_samples;
545  AVFrame *outpicref = showwaves->outpicref;
546  int16_t *p = (int16_t *)insamples->data[0];
547  int nb_channels = inlink->channels;
548  int i, j, ret = 0;
549  const int pixstep = showwaves->pixstep;
550  const int n = showwaves->n;
551  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
552 
553  /* draw data in the buffer */
554  for (i = 0; i < nb_samples; i++) {
555 
556  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
557  if (ret < 0)
558  goto end;
559  outpicref = showwaves->outpicref;
560 
561  for (j = 0; j < nb_channels; j++) {
562  uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
563  const int linesize = outpicref->linesize[0];
564  int h;
565 
566  if (showwaves->split_channels)
567  buf += j*ch_height*linesize;
568  h = showwaves->get_h(*p++, ch_height);
569  showwaves->draw_sample(buf, ch_height, linesize,
570  &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
571  }
572 
573  showwaves->sample_count_mod++;
574  if (showwaves->sample_count_mod == n) {
575  showwaves->sample_count_mod = 0;
576  showwaves->buf_idx++;
577  }
578  if (showwaves->buf_idx == showwaves->w)
579  if ((ret = push_frame(outlink)) < 0)
580  break;
581  outpicref = showwaves->outpicref;
582  }
583 
584 end:
585  av_frame_free(&insamples);
586  return ret;
587 }
588 
589 static const AVFilterPad showwaves_inputs[] = {
590  {
591  .name = "default",
592  .type = AVMEDIA_TYPE_AUDIO,
593  .filter_frame = showwaves_filter_frame,
594  },
595  { NULL }
596 };
597 
598 static const AVFilterPad showwaves_outputs[] = {
599  {
600  .name = "default",
601  .type = AVMEDIA_TYPE_VIDEO,
602  .config_props = config_output,
603  .request_frame = request_frame,
604  },
605  { NULL }
606 };
607 
608 AVFilter ff_avf_showwaves = {
609  .name = "showwaves",
610  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
611  .init = init,
612  .uninit = uninit,
613  .query_formats = query_formats,
614  .priv_size = sizeof(ShowWavesContext),
615  .inputs = showwaves_inputs,
616  .outputs = showwaves_outputs,
617  .priv_class = &showwaves_class,
618 };
619 
620 #endif // CONFIG_SHOWWAVES_FILTER
621 
622 #if CONFIG_SHOWWAVESPIC_FILTER
623 
624 #define OFFSET(x) offsetof(ShowWavesContext, x)
625 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
626 
627 static const AVOption showwavespic_options[] = {
628  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
629  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
630  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
631  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
632  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
633  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
634  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
635  { NULL }
636 };
637 
638 AVFILTER_DEFINE_CLASS(showwavespic);
639 
640 static int showwavespic_config_input(AVFilterLink *inlink)
641 {
642  AVFilterContext *ctx = inlink->dst;
643  ShowWavesContext *showwaves = ctx->priv;
644 
645  if (showwaves->single_pic) {
646  showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
647  if (!showwaves->sum)
648  return AVERROR(ENOMEM);
649  }
650 
651  return 0;
652 }
653 
654 static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
655 {
656  AVFilterContext *ctx = inlink->dst;
657  AVFilterLink *outlink = ctx->outputs[0];
658  ShowWavesContext *showwaves = ctx->priv;
659  int16_t *p = (int16_t *)insamples->data[0];
660  int ret = 0;
661 
662  if (showwaves->single_pic) {
663  struct frame_node *f;
664 
665  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
666  if (ret < 0)
667  goto end;
668 
669  /* queue the audio frame */
670  f = av_malloc(sizeof(*f));
671  if (!f) {
672  ret = AVERROR(ENOMEM);
673  goto end;
674  }
675  f->frame = insamples;
676  f->next = NULL;
677  if (!showwaves->last_frame) {
678  showwaves->audio_frames =
679  showwaves->last_frame = f;
680  } else {
681  showwaves->last_frame->next = f;
682  showwaves->last_frame = f;
683  }
684  showwaves->total_samples += insamples->nb_samples;
685 
686  return 0;
687  }
688 
689 end:
690  av_frame_free(&insamples);
691  return ret;
692 }
693 
694 static const AVFilterPad showwavespic_inputs[] = {
695  {
696  .name = "default",
697  .type = AVMEDIA_TYPE_AUDIO,
698  .config_props = showwavespic_config_input,
699  .filter_frame = showwavespic_filter_frame,
700  },
701  { NULL }
702 };
703 
704 static const AVFilterPad showwavespic_outputs[] = {
705  {
706  .name = "default",
707  .type = AVMEDIA_TYPE_VIDEO,
708  .config_props = config_output,
709  .request_frame = request_frame,
710  },
711  { NULL }
712 };
713 
714 AVFilter ff_avf_showwavespic = {
715  .name = "showwavespic",
716  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
717  .init = init,
718  .uninit = uninit,
719  .query_formats = query_formats,
720  .priv_size = sizeof(ShowWavesContext),
721  .inputs = showwavespic_inputs,
722  .outputs = showwavespic_outputs,
723  .priv_class = &showwavespic_class,
724 };
725 
726 #endif // CONFIG_SHOWWAVESPIC_FILTER
ShowWavesMode
Definition: avf_showwaves.c:37
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
AVOption.
Definition: opt.h:245
static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, const AVFilterLink *inlink, AVFilterLink *outlink, const AVFrame *in)
int16_t * buf_idy
Definition: avf_showwaves.c:62
#define OFFSET(x)
Definition: avf_showwaves.c:84
Main libavfilter public API header.
ShowWavesScale
Definition: avf_showwaves.c:45
static void draw_sample_line_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static enum AVSampleFormat formats[]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:76
static AVRational av_make_q(int num, int den)
Create a rational.
Definition: rational.h:53
#define sample
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static void draw_sample_cline_rgba(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
const char * name
Pad name.
Definition: internal.h:59
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:313
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1180
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
mode
Definition: f_perms.c:27
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:94
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
static av_cold void uninit(AVFilterContext *ctx)
static AVFrame * frame
#define height
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define av_log(a,...)
int mode
ShowWavesMode.
Definition: avf_showwaves.c:67
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:349
A filter pad used for either input or output.
Definition: internal.h:53
int64_t total_samples
Definition: avf_showwaves.c:80
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVRational rate
Definition: avf_showwaves.c:59
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
struct frame_node * last_frame
Definition: avf_showwaves.c:79
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
void * priv
private data for use by the filter
Definition: avfilter.h:320
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
simple assert() macros that are a bit more flexible than ISO C assert().
#define FFMAX(a, b)
Definition: common.h:94
static int get_lin_h2(int16_t sample, int height)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:94
AVFrame * outpicref
Definition: avf_showwaves.c:63
audio channel layout utility functions
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
struct frame_node * audio_frames
Definition: avf_showwaves.c:78
static void draw_sample_p2p_rgba(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
static int push_single_pic(AVFilterLink *outlink)
#define FFSIGN(a)
Definition: common.h:73
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
AVFormatContext * ctx
Definition: movenc.c:48
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int n
Definition: avisynth_c.h:547
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:386
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:401
A list of supported channel layouts.
Definition: formats.h:85
struct frame_node * next
Definition: avf_showwaves.c:53
int scale
ShowWavesScale.
Definition: avf_showwaves.c:68
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:376
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static int config_output(AVFilterLink *outlink)
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:267
static int push_frame(AVFilterLink *outlink)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
static int request_frame(AVFilterLink *outlink)
static int get_log_h2(int16_t sample, int height)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
static void draw_sample_line_rgba(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
void * buf
Definition: avisynth_c.h:553
static int get_log_h(int16_t sample, int height)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFILTER_DEFINE_CLASS(showwaves)
int(* get_h)(int16_t sample, int height)
Definition: avf_showwaves.c:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:142
rational number numerator/denominator
Definition: rational.h:43
static int query_formats(AVFilterContext *ctx)
offset must point to AVRational
Definition: opt.h:235
const char * name
Filter name.
Definition: avfilter.h:146
static void draw_sample_point_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
offset must point to two consecutive integers
Definition: opt.h:232
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:317
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Y , 8bpp.
Definition: pixfmt.h:70
void(* draw_sample)(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:73
if(ret< 0)
Definition: vf_mcdeint.c:282
signed 16 bits
Definition: samplefmt.h:61
static int get_lin_h(int16_t sample, int height)
#define FLAGS
Definition: avf_showwaves.c:85
#define av_free(p)
static uint8_t tmp[8]
Definition: des.c:38
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:305
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
#define av_freep(p)
void INT64 start
Definition: avisynth_c.h:553
#define av_malloc_array(a, b)
static void draw_sample_point_rgba(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:369
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
internal API functions
static const AVOption showwaves_options[]
Definition: avf_showwaves.c:87
static av_cold int init(AVFilterContext *ctx)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:308
static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
for(j=16;j >0;--j)
AVFrame * frame
Definition: avf_showwaves.c:52