FFmpeg
filter_audio.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2013 Andrew Kelley
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file libavfilter audio filtering API usage example
23  * @example filter_audio.c
24  *
25  * This example will generate a sine wave audio, pass it through a simple filter
26  * chain, and then compute the MD5 checksum of the output data.
27  *
28  * The filter chain it uses is:
29  * (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
30  *
31  * abuffer: This provides the endpoint where you can feed the decoded samples.
32  * volume: In this example we hardcode it to 0.90.
33  * aformat: This converts the samples to the samplefreq, channel layout,
34  * and sample format required by the audio device.
35  * abuffersink: This provides the endpoint where you can read the samples after
36  * they have passed through the filter chain.
37  */
38 
39 #include <inttypes.h>
40 #include <math.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 
45 #include <libavutil/md5.h>
46 #include <libavutil/mem.h>
47 #include <libavutil/opt.h>
48 #include <libavutil/samplefmt.h>
49 
50 #include <libavfilter/avfilter.h>
51 #include <libavfilter/buffersink.h>
52 #include <libavfilter/buffersrc.h>
53 
54 #define INPUT_SAMPLERATE 48000
55 #define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
56 #define INPUT_CHANNEL_LAYOUT (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0
57 
58 #define VOLUME_VAL 0.90
59 
61  AVFilterContext **sink)
62 {
64  AVFilterContext *abuffer_ctx;
65  const AVFilter *abuffer;
66  AVFilterContext *volume_ctx;
67  const AVFilter *volume;
68  AVFilterContext *aformat_ctx;
69  const AVFilter *aformat;
70  AVFilterContext *abuffersink_ctx;
71  const AVFilter *abuffersink;
72 
73  AVDictionary *options_dict = NULL;
74  uint8_t options_str[1024];
75  uint8_t ch_layout[64];
76 
77  int err;
78 
79  /* Create a new filtergraph, which will contain all the filters. */
81  if (!filter_graph) {
82  fprintf(stderr, "Unable to create filter graph.\n");
83  return AVERROR(ENOMEM);
84  }
85 
86  /* Create the abuffer filter;
87  * it will be used for feeding the data into the graph. */
88  abuffer = avfilter_get_by_name("abuffer");
89  if (!abuffer) {
90  fprintf(stderr, "Could not find the abuffer filter.\n");
92  }
93 
94  abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
95  if (!abuffer_ctx) {
96  fprintf(stderr, "Could not allocate the abuffer instance.\n");
97  return AVERROR(ENOMEM);
98  }
99 
100  /* Set the filter options through the AVOptions API. */
101  av_channel_layout_describe(&INPUT_CHANNEL_LAYOUT, ch_layout, sizeof(ch_layout));
102  av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
104  av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
105  av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
106 
107  /* Now initialize the filter; we pass NULL options, since we have already
108  * set all the options above. */
109  err = avfilter_init_str(abuffer_ctx, NULL);
110  if (err < 0) {
111  fprintf(stderr, "Could not initialize the abuffer filter.\n");
112  return err;
113  }
114 
115  /* Create volume filter. */
116  volume = avfilter_get_by_name("volume");
117  if (!volume) {
118  fprintf(stderr, "Could not find the volume filter.\n");
120  }
121 
122  volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
123  if (!volume_ctx) {
124  fprintf(stderr, "Could not allocate the volume instance.\n");
125  return AVERROR(ENOMEM);
126  }
127 
128  /* A different way of passing the options is as key/value pairs in a
129  * dictionary. */
130  av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
131  err = avfilter_init_dict(volume_ctx, &options_dict);
132  av_dict_free(&options_dict);
133  if (err < 0) {
134  fprintf(stderr, "Could not initialize the volume filter.\n");
135  return err;
136  }
137 
138  /* Create the aformat filter;
139  * it ensures that the output is of the format we want. */
140  aformat = avfilter_get_by_name("aformat");
141  if (!aformat) {
142  fprintf(stderr, "Could not find the aformat filter.\n");
144  }
145 
146  aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
147  if (!aformat_ctx) {
148  fprintf(stderr, "Could not allocate the aformat instance.\n");
149  return AVERROR(ENOMEM);
150  }
151 
152  /* A third way of passing the options is in a string of the form
153  * key1=value1:key2=value2.... */
154  snprintf(options_str, sizeof(options_str),
155  "sample_fmts=%s:sample_rates=%d:channel_layouts=stereo",
157  err = avfilter_init_str(aformat_ctx, options_str);
158  if (err < 0) {
159  av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
160  return err;
161  }
162 
163  /* Finally create the abuffersink filter;
164  * it will be used to get the filtered data out of the graph. */
165  abuffersink = avfilter_get_by_name("abuffersink");
166  if (!abuffersink) {
167  fprintf(stderr, "Could not find the abuffersink filter.\n");
169  }
170 
171  abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
172  if (!abuffersink_ctx) {
173  fprintf(stderr, "Could not allocate the abuffersink instance.\n");
174  return AVERROR(ENOMEM);
175  }
176 
177  /* This filter takes no options. */
178  err = avfilter_init_str(abuffersink_ctx, NULL);
179  if (err < 0) {
180  fprintf(stderr, "Could not initialize the abuffersink instance.\n");
181  return err;
182  }
183 
184  /* Connect the filters;
185  * in this simple case the filters just form a linear chain. */
186  err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
187  if (err >= 0)
188  err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
189  if (err >= 0)
190  err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
191  if (err < 0) {
192  fprintf(stderr, "Error connecting filters\n");
193  return err;
194  }
195 
196  /* Configure the graph. */
198  if (err < 0) {
199  av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
200  return err;
201  }
202 
203  *graph = filter_graph;
204  *src = abuffer_ctx;
205  *sink = abuffersink_ctx;
206 
207  return 0;
208 }
209 
210 /* Do something useful with the filtered data: this simple
211  * example just prints the MD5 checksum of each plane to stdout. */
212 static int process_output(struct AVMD5 *md5, AVFrame *frame)
213 {
214  int planar = av_sample_fmt_is_planar(frame->format);
215  int channels = frame->ch_layout.nb_channels;
216  int planes = planar ? channels : 1;
217  int bps = av_get_bytes_per_sample(frame->format);
218  int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
219  int i, j;
220 
221  for (i = 0; i < planes; i++) {
222  uint8_t checksum[16];
223 
224  av_md5_init(md5);
225  av_md5_sum(checksum, frame->extended_data[i], plane_size);
226 
227  fprintf(stdout, "plane %d: 0x", i);
228  for (j = 0; j < sizeof(checksum); j++)
229  fprintf(stdout, "%02X", checksum[j]);
230  fprintf(stdout, "\n");
231  }
232  fprintf(stdout, "\n");
233 
234  return 0;
235 }
236 
237 /* Construct a frame of audio data to be filtered;
238  * this simple example just synthesizes a sine wave. */
239 static int get_input(AVFrame *frame, int frame_num)
240 {
241  int err, i, j;
242 
243 #define FRAME_SIZE 1024
244 
245  /* Set up the frame properties and allocate the buffer for the data. */
246  frame->sample_rate = INPUT_SAMPLERATE;
247  frame->format = INPUT_FORMAT;
249  frame->nb_samples = FRAME_SIZE;
250  frame->pts = frame_num * FRAME_SIZE;
251 
252  err = av_frame_get_buffer(frame, 0);
253  if (err < 0)
254  return err;
255 
256  /* Fill the data for each channel. */
257  for (i = 0; i < 5; i++) {
258  float *data = (float*)frame->extended_data[i];
259 
260  for (j = 0; j < frame->nb_samples; j++)
261  data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
262  }
263 
264  return 0;
265 }
266 
267 int main(int argc, char *argv[])
268 {
269  struct AVMD5 *md5;
270  AVFilterGraph *graph;
271  AVFilterContext *src, *sink;
272  AVFrame *frame;
273  uint8_t errstr[1024];
274  float duration;
275  int err, nb_frames, i;
276 
277  if (argc < 2) {
278  fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
279  return 1;
280  }
281 
282  duration = atof(argv[1]);
283  nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
284  if (nb_frames <= 0) {
285  fprintf(stderr, "Invalid duration: %s\n", argv[1]);
286  return 1;
287  }
288 
289  /* Allocate the frame we will be using to store the data. */
290  frame = av_frame_alloc();
291  if (!frame) {
292  fprintf(stderr, "Error allocating the frame\n");
293  return 1;
294  }
295 
296  md5 = av_md5_alloc();
297  if (!md5) {
298  fprintf(stderr, "Error allocating the MD5 context\n");
299  return 1;
300  }
301 
302  /* Set up the filtergraph. */
303  err = init_filter_graph(&graph, &src, &sink);
304  if (err < 0) {
305  fprintf(stderr, "Unable to init filter graph:");
306  goto fail;
307  }
308 
309  /* the main filtering loop */
310  for (i = 0; i < nb_frames; i++) {
311  /* get an input frame to be filtered */
312  err = get_input(frame, i);
313  if (err < 0) {
314  fprintf(stderr, "Error generating input frame:");
315  goto fail;
316  }
317 
318  /* Send the frame to the input of the filtergraph. */
320  if (err < 0) {
322  fprintf(stderr, "Error submitting the frame to the filtergraph:");
323  goto fail;
324  }
325 
326  /* Get all the filtered output that is available. */
327  while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
328  /* now do something with our filtered frame */
329  err = process_output(md5, frame);
330  if (err < 0) {
331  fprintf(stderr, "Error processing the filtered frame:");
332  goto fail;
333  }
335  }
336 
337  if (err == AVERROR(EAGAIN)) {
338  /* Need to feed more frames in. */
339  continue;
340  } else if (err == AVERROR_EOF) {
341  /* Nothing more to do, finish. */
342  break;
343  } else if (err < 0) {
344  /* An error occurred. */
345  fprintf(stderr, "Error filtering the data:");
346  goto fail;
347  }
348  }
349 
350  avfilter_graph_free(&graph);
352  av_freep(&md5);
353 
354  return 0;
355 
356 fail:
357  av_strerror(err, errstr, sizeof(errstr));
358  fprintf(stderr, "%s\n", errstr);
359  return 1;
360 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:304
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
INPUT_FORMAT
#define INPUT_FORMAT
Definition: filter_audio.c:55
FRAME_SIZE
#define FRAME_SIZE
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:173
data
const char data[16]
Definition: mxf.c:149
AVDictionary
Definition: dict.c:34
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
planes
static const struct @466 planes[]
get_input
static int get_input(AVFrame *frame, int frame_num)
Definition: filter_audio.c:239
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
fail
#define fail()
Definition: checkasm.h:189
md5
struct AVMD5 * md5
Definition: movenc.c:57
av_strerror
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:109
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
samplefmt.h
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVMD5
Definition: md5.c:42
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
duration
int64_t duration
Definition: movenc.c:65
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:651
INPUT_SAMPLERATE
#define INPUT_SAMPLERATE
Definition: filter_audio.c:54
init_filter_graph
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src, AVFilterContext **sink)
Definition: filter_audio.c:60
av_buffersink_get_frame
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:89
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
channels
channels
Definition: aptx.h:31
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
main
int main(int argc, char *argv[])
Definition: filter_audio.c:267
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:634
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1294
NULL
#define NULL
Definition: coverity.c:32
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
process_output
static int process_output(struct AVMD5 *md5, AVFrame *frame)
Definition: filter_audio.c:212
AVFilterGraph
Definition: avfilter.h:781
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
bps
unsigned bps
Definition: movenc.c:1880
av_md5_sum
void av_md5_sum(uint8_t *dst, const uint8_t *src, size_t len)
Hash an array of data.
Definition: md5.c:203
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
INPUT_CHANNEL_LAYOUT
#define INPUT_CHANNEL_LAYOUT
Definition: filter_audio.c:56
M_PI
#define M_PI
Definition: mathematics.h:67
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:954
buffersink.h
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:143
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
md5.h
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
AVFilter
Filter definition.
Definition: avfilter.h:201
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
channel_layout.h
av_md5_alloc
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:50
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:913
avfilter.h
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:447
mem.h
filter_graph
AVFilterGraph * filter_graph
Definition: decode_filter_audio.c:50
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
VOLUME_VAL
#define VOLUME_VAL
Definition: filter_audio.c:58
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_opt_set_q
int av_opt_set_q(void *obj, const char *name, AVRational val, int search_flags)
Definition: opt.c:890
snprintf
#define snprintf
Definition: snprintf.h:34
buffersrc.h
src
#define src
Definition: vp8dsp.c:248