FFmpeg
filter_audio.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2013 Andrew Kelley
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * libavfilter API usage example.
24  *
25  * @example filter_audio.c
26  * This example will generate a sine wave audio,
27  * pass it through a simple filter chain, and then compute the MD5 checksum of
28  * the output data.
29  *
30  * The filter chain it uses is:
31  * (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
32  *
33  * abuffer: This provides the endpoint where you can feed the decoded samples.
34  * volume: In this example we hardcode it to 0.90.
35  * aformat: This converts the samples to the samplefreq, channel layout,
36  * and sample format required by the audio device.
37  * abuffersink: This provides the endpoint where you can read the samples after
38  * they have passed through the filter chain.
39  */
40 
41 #include <inttypes.h>
42 #include <math.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 
47 #include "libavutil/md5.h"
48 #include "libavutil/mem.h"
49 #include "libavutil/opt.h"
50 #include "libavutil/samplefmt.h"
51 
52 #include "libavfilter/avfilter.h"
53 #include "libavfilter/buffersink.h"
54 #include "libavfilter/buffersrc.h"
55 
56 #define INPUT_SAMPLERATE 48000
57 #define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
58 #define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
59 
60 #define VOLUME_VAL 0.90
61 
63  AVFilterContext **sink)
64 {
66  AVFilterContext *abuffer_ctx;
67  const AVFilter *abuffer;
68  AVFilterContext *volume_ctx;
69  const AVFilter *volume;
70  AVFilterContext *aformat_ctx;
71  const AVFilter *aformat;
72  AVFilterContext *abuffersink_ctx;
73  const AVFilter *abuffersink;
74 
75  AVDictionary *options_dict = NULL;
76  uint8_t options_str[1024];
77  uint8_t ch_layout[64];
78 
79  int err;
80 
81  /* Create a new filtergraph, which will contain all the filters. */
82  filter_graph = avfilter_graph_alloc();
83  if (!filter_graph) {
84  fprintf(stderr, "Unable to create filter graph.\n");
85  return AVERROR(ENOMEM);
86  }
87 
88  /* Create the abuffer filter;
89  * it will be used for feeding the data into the graph. */
90  abuffer = avfilter_get_by_name("abuffer");
91  if (!abuffer) {
92  fprintf(stderr, "Could not find the abuffer filter.\n");
94  }
95 
96  abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
97  if (!abuffer_ctx) {
98  fprintf(stderr, "Could not allocate the abuffer instance.\n");
99  return AVERROR(ENOMEM);
100  }
101 
102  /* Set the filter options through the AVOptions API. */
103  av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
104  av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
106  av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
107  av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
108 
109  /* Now initialize the filter; we pass NULL options, since we have already
110  * set all the options above. */
111  err = avfilter_init_str(abuffer_ctx, NULL);
112  if (err < 0) {
113  fprintf(stderr, "Could not initialize the abuffer filter.\n");
114  return err;
115  }
116 
117  /* Create volume filter. */
118  volume = avfilter_get_by_name("volume");
119  if (!volume) {
120  fprintf(stderr, "Could not find the volume filter.\n");
122  }
123 
124  volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
125  if (!volume_ctx) {
126  fprintf(stderr, "Could not allocate the volume instance.\n");
127  return AVERROR(ENOMEM);
128  }
129 
130  /* A different way of passing the options is as key/value pairs in a
131  * dictionary. */
132  av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
133  err = avfilter_init_dict(volume_ctx, &options_dict);
134  av_dict_free(&options_dict);
135  if (err < 0) {
136  fprintf(stderr, "Could not initialize the volume filter.\n");
137  return err;
138  }
139 
140  /* Create the aformat filter;
141  * it ensures that the output is of the format we want. */
142  aformat = avfilter_get_by_name("aformat");
143  if (!aformat) {
144  fprintf(stderr, "Could not find the aformat filter.\n");
146  }
147 
148  aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
149  if (!aformat_ctx) {
150  fprintf(stderr, "Could not allocate the aformat instance.\n");
151  return AVERROR(ENOMEM);
152  }
153 
154  /* A third way of passing the options is in a string of the form
155  * key1=value1:key2=value2.... */
156  snprintf(options_str, sizeof(options_str),
157  "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
159  (uint64_t)AV_CH_LAYOUT_STEREO);
160  err = avfilter_init_str(aformat_ctx, options_str);
161  if (err < 0) {
162  av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
163  return err;
164  }
165 
166  /* Finally create the abuffersink filter;
167  * it will be used to get the filtered data out of the graph. */
168  abuffersink = avfilter_get_by_name("abuffersink");
169  if (!abuffersink) {
170  fprintf(stderr, "Could not find the abuffersink filter.\n");
172  }
173 
174  abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
175  if (!abuffersink_ctx) {
176  fprintf(stderr, "Could not allocate the abuffersink instance.\n");
177  return AVERROR(ENOMEM);
178  }
179 
180  /* This filter takes no options. */
181  err = avfilter_init_str(abuffersink_ctx, NULL);
182  if (err < 0) {
183  fprintf(stderr, "Could not initialize the abuffersink instance.\n");
184  return err;
185  }
186 
187  /* Connect the filters;
188  * in this simple case the filters just form a linear chain. */
189  err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
190  if (err >= 0)
191  err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
192  if (err >= 0)
193  err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
194  if (err < 0) {
195  fprintf(stderr, "Error connecting filters\n");
196  return err;
197  }
198 
199  /* Configure the graph. */
200  err = avfilter_graph_config(filter_graph, NULL);
201  if (err < 0) {
202  av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
203  return err;
204  }
205 
206  *graph = filter_graph;
207  *src = abuffer_ctx;
208  *sink = abuffersink_ctx;
209 
210  return 0;
211 }
212 
213 /* Do something useful with the filtered data: this simple
214  * example just prints the MD5 checksum of each plane to stdout. */
215 static int process_output(struct AVMD5 *md5, AVFrame *frame)
216 {
217  int planar = av_sample_fmt_is_planar(frame->format);
219  int planes = planar ? channels : 1;
220  int bps = av_get_bytes_per_sample(frame->format);
221  int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
222  int i, j;
223 
224  for (i = 0; i < planes; i++) {
225  uint8_t checksum[16];
226 
227  av_md5_init(md5);
228  av_md5_sum(checksum, frame->extended_data[i], plane_size);
229 
230  fprintf(stdout, "plane %d: 0x", i);
231  for (j = 0; j < sizeof(checksum); j++)
232  fprintf(stdout, "%02X", checksum[j]);
233  fprintf(stdout, "\n");
234  }
235  fprintf(stdout, "\n");
236 
237  return 0;
238 }
239 
240 /* Construct a frame of audio data to be filtered;
241  * this simple example just synthesizes a sine wave. */
242 static int get_input(AVFrame *frame, int frame_num)
243 {
244  int err, i, j;
245 
246 #define FRAME_SIZE 1024
247 
248  /* Set up the frame properties and allocate the buffer for the data. */
249  frame->sample_rate = INPUT_SAMPLERATE;
250  frame->format = INPUT_FORMAT;
252  frame->nb_samples = FRAME_SIZE;
253  frame->pts = frame_num * FRAME_SIZE;
254 
255  err = av_frame_get_buffer(frame, 0);
256  if (err < 0)
257  return err;
258 
259  /* Fill the data for each channel. */
260  for (i = 0; i < 5; i++) {
261  float *data = (float*)frame->extended_data[i];
262 
263  for (j = 0; j < frame->nb_samples; j++)
264  data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
265  }
266 
267  return 0;
268 }
269 
270 int main(int argc, char *argv[])
271 {
272  struct AVMD5 *md5;
273  AVFilterGraph *graph;
274  AVFilterContext *src, *sink;
275  AVFrame *frame;
276  uint8_t errstr[1024];
277  float duration;
278  int err, nb_frames, i;
279 
280  if (argc < 2) {
281  fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
282  return 1;
283  }
284 
285  duration = atof(argv[1]);
286  nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
287  if (nb_frames <= 0) {
288  fprintf(stderr, "Invalid duration: %s\n", argv[1]);
289  return 1;
290  }
291 
292  /* Allocate the frame we will be using to store the data. */
293  frame = av_frame_alloc();
294  if (!frame) {
295  fprintf(stderr, "Error allocating the frame\n");
296  return 1;
297  }
298 
299  md5 = av_md5_alloc();
300  if (!md5) {
301  fprintf(stderr, "Error allocating the MD5 context\n");
302  return 1;
303  }
304 
305  /* Set up the filtergraph. */
306  err = init_filter_graph(&graph, &src, &sink);
307  if (err < 0) {
308  fprintf(stderr, "Unable to init filter graph:");
309  goto fail;
310  }
311 
312  /* the main filtering loop */
313  for (i = 0; i < nb_frames; i++) {
314  /* get an input frame to be filtered */
315  err = get_input(frame, i);
316  if (err < 0) {
317  fprintf(stderr, "Error generating input frame:");
318  goto fail;
319  }
320 
321  /* Send the frame to the input of the filtergraph. */
322  err = av_buffersrc_add_frame(src, frame);
323  if (err < 0) {
324  av_frame_unref(frame);
325  fprintf(stderr, "Error submitting the frame to the filtergraph:");
326  goto fail;
327  }
328 
329  /* Get all the filtered output that is available. */
330  while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
331  /* now do something with our filtered frame */
332  err = process_output(md5, frame);
333  if (err < 0) {
334  fprintf(stderr, "Error processing the filtered frame:");
335  goto fail;
336  }
337  av_frame_unref(frame);
338  }
339 
340  if (err == AVERROR(EAGAIN)) {
341  /* Need to feed more frames in. */
342  continue;
343  } else if (err == AVERROR_EOF) {
344  /* Nothing more to do, finish. */
345  break;
346  } else if (err < 0) {
347  /* An error occurred. */
348  fprintf(stderr, "Error filtering the data:");
349  goto fail;
350  }
351  }
352 
353  avfilter_graph_free(&graph);
354  av_frame_free(&frame);
355  av_freep(&md5);
356 
357  return 0;
358 
359 fail:
360  av_strerror(err, errstr, sizeof(errstr));
361  fprintf(stderr, "%s\n", errstr);
362  return 1;
363 }
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int av_opt_set_q(void *obj, const char *name, AVRational val, int search_flags)
Definition: opt.c:578
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
Main libavfilter public API header.
Memory handling functions.
Memory buffer source API.
AVFilterGraph * filter_graph
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
channels
Definition: aptx.c:30
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define AV_CH_LAYOUT_STEREO
static int get_input(AVFrame *frame, int frame_num)
Definition: filter_audio.c:242
#define src
Definition: vp8dsp.c:254
#define INPUT_SAMPLERATE
Definition: filter_audio.c:56
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
memory buffer sink API for audio and video
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:48
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define INPUT_FORMAT
Definition: filter_audio.c:57
AVOptions.
#define FRAME_SIZE
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
int64_t duration
Definition: movenc.c:63
struct AVMD5 * md5
Definition: movenc.c:56
#define AVERROR_EOF
End of file.
Definition: error.h:55
void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len)
Hash an array of data.
Definition: md5.c:208
Definition: md5.c:40
#define av_log(a,...)
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:568
static int process_output(struct AVMD5 *md5, AVFrame *frame)
Definition: filter_audio.c:215
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
#define fail()
Definition: checkasm.h:122
int main(int argc, char *argv[])
Definition: filter_audio.c:270
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:498
static const struct @321 planes[]
audio channel layout utility functions
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src, AVFilterContext **sink)
Definition: filter_audio.c:62
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static volatile int checksum
Definition: adler32.c:30
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:937
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:143
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AV_STRINGIFY(s)
Definition: macros.h:36
#define INPUT_CHANNEL_LAYOUT
Definition: filter_audio.c:58
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define snprintf
Definition: snprintf.h:34
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:324
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:58
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:900
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:105
signed 16 bits
Definition: samplefmt.h:61
unsigned bps
Definition: movenc.c:1532
#define VOLUME_VAL
Definition: filter_audio.c:60
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
An instance of a filter.
Definition: avfilter.h:338
Public header for MD5 hash function implementation.
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:67
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:449
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:150