FFmpeg
ffwavesynth.c
Go to the documentation of this file.
1 /*
2  * Wavesynth pseudo-codec
3  * Copyright (c) 2011 Nicolas George
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/log.h"
24 #include "avcodec.h"
25 #include "internal.h"
26 
27 
28 #define SIN_BITS 14
29 #define WS_MAX_CHANNELS 32
30 #define INF_TS 0x7FFFFFFFFFFFFFFF
31 
32 #define PINK_UNIT 128
33 
34 /*
35  Format of the extradata and packets
36 
37  THIS INFORMATION IS NOT PART OF THE PUBLIC API OR ABI.
38  IT CAN CHANGE WITHOUT NOTIFICATION.
39 
40  All numbers are in little endian.
41 
42  The codec extradata define a set of intervals with uniform content.
43  Overlapping intervals are added together.
44 
45  extradata:
46  uint32 number of intervals
47  ... intervals
48 
49  interval:
50  int64 start timestamp; time_base must be 1/sample_rate;
51  start timestamps must be in ascending order
52  int64 end timestamp
53  uint32 type
54  uint32 channels mask
55  ... additional information, depends on type
56 
57  sine interval (type fourcc "SINE"):
58  int32 start frequency, in 1/(1<<16) Hz
59  int32 end frequency
60  int32 start amplitude, 1<<16 is the full amplitude
61  int32 end amplitude
62  uint32 start phase, 0 is sin(0), 0x20000000 is sin(pi/2), etc.;
63  n | (1<<31) means to match the phase of previous channel #n
64 
65  pink noise interval (type fourcc "NOIS"):
66  int32 start amplitude
67  int32 end amplitude
68 
69  The input packets encode the time and duration of the requested segment.
70 
71  packet:
72  int64 start timestamp
73  int32 duration
74 
75 */
76 
78  WS_SINE = MKTAG('S','I','N','E'),
79  WS_NOISE = MKTAG('N','O','I','S'),
80 };
81 
82 struct ws_interval {
83  int64_t ts_start, ts_end;
84  uint64_t phi0, dphi0, ddphi;
85  uint64_t amp0, damp;
86  uint64_t phi, dphi, amp;
87  uint32_t channels;
89  int next;
90 };
91 
93  int64_t cur_ts;
94  int64_t next_ts;
96  struct ws_interval *inter;
97  uint32_t dither_state;
98  uint32_t pink_state;
100  unsigned pink_need, pink_pos;
101  int nb_inter;
104 };
105 
106 #define LCG_A 1284865837
107 #define LCG_C 4150755663
108 #define LCG_AI 849225893 /* A*AI = 1 [mod 1<<32] */
109 
110 static uint32_t lcg_next(uint32_t *s)
111 {
112  *s = *s * LCG_A + LCG_C;
113  return *s;
114 }
115 
116 static void lcg_seek(uint32_t *s, uint32_t dt)
117 {
118  uint32_t a, c, t = *s;
119 
120  a = LCG_A;
121  c = LCG_C;
122  while (dt) {
123  if (dt & 1)
124  t = a * t + c;
125  c *= a + 1; /* coefficients for a double step */
126  a *= a;
127  dt >>= 1;
128  }
129  *s = t;
130 }
131 
132 /* Emulate pink noise by summing white noise at the sampling frequency,
133  * white noise at half the sampling frequency (each value taken twice),
134  * etc., with a total of 8 octaves.
135  * This is known as the Voss-McCartney algorithm. */
136 
137 static void pink_fill(struct wavesynth_context *ws)
138 {
139  int32_t vt[7] = { 0 }, v = 0;
140  int i, j;
141 
142  ws->pink_pos = 0;
143  if (!ws->pink_need)
144  return;
145  for (i = 0; i < PINK_UNIT; i++) {
146  for (j = 0; j < 7; j++) {
147  if ((i >> j) & 1)
148  break;
149  v -= vt[j];
150  vt[j] = (int32_t)lcg_next(&ws->pink_state) >> 3;
151  v += vt[j];
152  }
153  ws->pink_pool[i] = v + ((int32_t)lcg_next(&ws->pink_state) >> 3);
154  }
155  lcg_next(&ws->pink_state); /* so we use exactly 256 steps */
156 }
157 
158 /**
159  * @return (1<<64) * a / b, without overflow, if a < b
160  */
161 static uint64_t frac64(uint64_t a, uint64_t b)
162 {
163  uint64_t r = 0;
164  int i;
165 
166  if (b < (uint64_t)1 << 32) { /* b small, use two 32-bits steps */
167  a <<= 32;
168  return ((a / b) << 32) | ((a % b) << 32) / b;
169  }
170  if (b < (uint64_t)1 << 48) { /* b medium, use four 16-bits steps */
171  for (i = 0; i < 4; i++) {
172  a <<= 16;
173  r = (r << 16) | (a / b);
174  a %= b;
175  }
176  return r;
177  }
178  for (i = 63; i >= 0; i--) {
179  if (a >= (uint64_t)1 << 63 || a << 1 >= b) {
180  r |= (uint64_t)1 << i;
181  a = (a << 1) - b;
182  } else {
183  a <<= 1;
184  }
185  }
186  return r;
187 }
188 
189 static uint64_t phi_at(struct ws_interval *in, int64_t ts)
190 {
191  uint64_t dt = ts - (uint64_t)in->ts_start;
192  uint64_t dt2 = dt & 1 ? /* dt * (dt - 1) / 2 without overflow */
193  dt * ((dt - 1) >> 1) : (dt >> 1) * (dt - 1);
194  return in->phi0 + dt * in->dphi0 + dt2 * in->ddphi;
195 }
196 
197 static void wavesynth_seek(struct wavesynth_context *ws, int64_t ts)
198 {
199  int *last, i;
200  struct ws_interval *in;
201 
202  last = &ws->cur_inter;
203  for (i = 0; i < ws->nb_inter; i++) {
204  in = &ws->inter[i];
205  if (ts < in->ts_start)
206  break;
207  if (ts >= in->ts_end)
208  continue;
209  *last = i;
210  last = &in->next;
211  in->phi = phi_at(in, ts);
212  in->dphi = in->dphi0 + (ts - in->ts_start) * in->ddphi;
213  in->amp = in->amp0 + (ts - in->ts_start) * in->damp;
214  }
215  ws->next_inter = i;
216  ws->next_ts = i < ws->nb_inter ? ws->inter[i].ts_start : INF_TS;
217  *last = -1;
218  lcg_seek(&ws->dither_state, (uint32_t)ts - (uint32_t)ws->cur_ts);
219  if (ws->pink_need) {
220  uint64_t pink_ts_cur = (ws->cur_ts + (uint64_t)PINK_UNIT - 1) & ~(PINK_UNIT - 1);
221  uint64_t pink_ts_next = ts & ~(PINK_UNIT - 1);
222  int pos = ts & (PINK_UNIT - 1);
223  lcg_seek(&ws->pink_state, (uint32_t)(pink_ts_next - pink_ts_cur) * 2);
224  if (pos) {
225  pink_fill(ws);
226  ws->pink_pos = pos;
227  } else {
228  ws->pink_pos = PINK_UNIT;
229  }
230  }
231  ws->cur_ts = ts;
232 }
233 
235 {
236  struct wavesynth_context *ws = avc->priv_data;
237  struct ws_interval *in;
238  uint8_t *edata, *edata_end;
239  int32_t f1, f2, a1, a2;
240  uint32_t phi;
241  int64_t dphi1, dphi2, dt, cur_ts = -0x8000000000000000;
242  int i;
243 
244  if (avc->extradata_size < 4)
245  return AVERROR(EINVAL);
246  edata = avc->extradata;
247  edata_end = edata + avc->extradata_size;
248  ws->nb_inter = AV_RL32(edata);
249  edata += 4;
250  if (ws->nb_inter < 0 || (edata_end - edata) / 24 < ws->nb_inter)
251  return AVERROR(EINVAL);
252  ws->inter = av_calloc(ws->nb_inter, sizeof(*ws->inter));
253  if (!ws->inter)
254  return AVERROR(ENOMEM);
255  for (i = 0; i < ws->nb_inter; i++) {
256  in = &ws->inter[i];
257  if (edata_end - edata < 24)
258  return AVERROR(EINVAL);
259  in->ts_start = AV_RL64(edata + 0);
260  in->ts_end = AV_RL64(edata + 8);
261  in->type = AV_RL32(edata + 16);
262  in->channels = AV_RL32(edata + 20);
263  edata += 24;
264  if (in->ts_start < cur_ts ||
265  in->ts_end <= in->ts_start ||
266  (uint64_t)in->ts_end - in->ts_start > INT64_MAX
267  )
268  return AVERROR(EINVAL);
269  cur_ts = in->ts_start;
270  dt = in->ts_end - in->ts_start;
271  switch (in->type) {
272  case WS_SINE:
273  if (edata_end - edata < 20 || avc->sample_rate <= 0)
274  return AVERROR(EINVAL);
275  f1 = AV_RL32(edata + 0);
276  f2 = AV_RL32(edata + 4);
277  a1 = AV_RL32(edata + 8);
278  a2 = AV_RL32(edata + 12);
279  phi = AV_RL32(edata + 16);
280  edata += 20;
281  dphi1 = frac64(f1, (int64_t)avc->sample_rate << 16);
282  dphi2 = frac64(f2, (int64_t)avc->sample_rate << 16);
283  in->dphi0 = dphi1;
284  in->ddphi = (int64_t)(dphi2 - (uint64_t)dphi1) / dt;
285  if (phi & 0x80000000) {
286  phi &= ~0x80000000;
287  if (phi >= i)
288  return AVERROR(EINVAL);
289  in->phi0 = phi_at(&ws->inter[phi], in->ts_start);
290  } else {
291  in->phi0 = (uint64_t)phi << 33;
292  }
293  break;
294  case WS_NOISE:
295  if (edata_end - edata < 8)
296  return AVERROR(EINVAL);
297  a1 = AV_RL32(edata + 0);
298  a2 = AV_RL32(edata + 4);
299  edata += 8;
300  break;
301  default:
302  return AVERROR(EINVAL);
303  }
304  in->amp0 = (uint64_t)a1 << 32;
305  in->damp = (int64_t)(((uint64_t)a2 << 32) - ((uint64_t)a1 << 32)) / dt;
306  }
307  if (edata != edata_end)
308  return AVERROR(EINVAL);
309  return 0;
310 }
311 
313 {
314  struct wavesynth_context *ws = avc->priv_data;
315  int i, r;
316 
317  if (avc->channels > WS_MAX_CHANNELS) {
318  av_log(avc, AV_LOG_ERROR,
319  "This implementation is limited to %d channels.\n",
321  return AVERROR(EINVAL);
322  }
324  if (r < 0) {
325  av_log(avc, AV_LOG_ERROR, "Invalid intervals definitions.\n");
326  goto fail;
327  }
328  ws->sin = av_malloc(sizeof(*ws->sin) << SIN_BITS);
329  if (!ws->sin) {
330  r = AVERROR(ENOMEM);
331  goto fail;
332  }
333  for (i = 0; i < 1 << SIN_BITS; i++)
334  ws->sin[i] = floor(32767 * sin(2 * M_PI * i / (1 << SIN_BITS)));
335  ws->dither_state = MKTAG('D','I','T','H');
336  for (i = 0; i < ws->nb_inter; i++)
337  ws->pink_need += ws->inter[i].type == WS_NOISE;
338  ws->pink_state = MKTAG('P','I','N','K');
339  ws->pink_pos = PINK_UNIT;
340  wavesynth_seek(ws, 0);
342  return 0;
343 
344 fail:
345  av_freep(&ws->inter);
346  av_freep(&ws->sin);
347  return r;
348 }
349 
350 static void wavesynth_synth_sample(struct wavesynth_context *ws, int64_t ts,
351  int32_t *channels)
352 {
353  int32_t amp, *cv;
354  unsigned val;
355  struct ws_interval *in;
356  int i, *last, pink;
357  uint32_t c, all_ch = 0;
358 
359  i = ws->cur_inter;
360  last = &ws->cur_inter;
361  if (ws->pink_pos == PINK_UNIT)
362  pink_fill(ws);
363  pink = ws->pink_pool[ws->pink_pos++] >> 16;
364  while (i >= 0) {
365  in = &ws->inter[i];
366  i = in->next;
367  if (ts >= in->ts_end) {
368  *last = i;
369  continue;
370  }
371  last = &in->next;
372  amp = in->amp >> 32;
373  in->amp += in->damp;
374  switch (in->type) {
375  case WS_SINE:
376  val = amp * (unsigned)ws->sin[in->phi >> (64 - SIN_BITS)];
377  in->phi += in->dphi;
378  in->dphi += in->ddphi;
379  break;
380  case WS_NOISE:
381  val = amp * (unsigned)pink;
382  break;
383  default:
384  val = 0;
385  }
386  all_ch |= in->channels;
387  for (c = in->channels, cv = channels; c; c >>= 1, cv++)
388  if (c & 1)
389  *cv += (unsigned)val;
390  }
391  val = (int32_t)lcg_next(&ws->dither_state) >> 16;
392  for (c = all_ch, cv = channels; c; c >>= 1, cv++)
393  if (c & 1)
394  *cv += val;
395 }
396 
397 static void wavesynth_enter_intervals(struct wavesynth_context *ws, int64_t ts)
398 {
399  int *last, i;
400  struct ws_interval *in;
401 
402  last = &ws->cur_inter;
403  for (i = ws->cur_inter; i >= 0; i = ws->inter[i].next)
404  last = &ws->inter[i].next;
405  for (i = ws->next_inter; i < ws->nb_inter; i++) {
406  in = &ws->inter[i];
407  if (ts < in->ts_start)
408  break;
409  if (ts >= in->ts_end)
410  continue;
411  *last = i;
412  last = &in->next;
413  in->phi = in->phi0;
414  in->dphi = in->dphi0;
415  in->amp = in->amp0;
416  }
417  ws->next_inter = i;
418  ws->next_ts = i < ws->nb_inter ? ws->inter[i].ts_start : INF_TS;
419  *last = -1;
420 }
421 
422 static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame,
423  AVPacket *packet)
424 {
425  struct wavesynth_context *ws = avc->priv_data;
426  AVFrame *frame = rframe;
427  int64_t ts;
428  int duration;
429  int s, c, r;
430  int16_t *pcm;
432 
433  *rgot_frame = 0;
434  if (packet->size != 12)
435  return AVERROR_INVALIDDATA;
436  ts = AV_RL64(packet->data);
437  if (ts != ws->cur_ts)
438  wavesynth_seek(ws, ts);
439  duration = AV_RL32(packet->data + 8);
440  if (duration <= 0)
441  return AVERROR(EINVAL);
442  frame->nb_samples = duration;
443  r = ff_get_buffer(avc, frame, 0);
444  if (r < 0)
445  return r;
446  pcm = (int16_t *)frame->data[0];
447  for (s = 0; s < duration; s++, ts+=(uint64_t)1) {
448  memset(channels, 0, avc->channels * sizeof(*channels));
449  if (ts >= ws->next_ts)
452  for (c = 0; c < avc->channels; c++)
453  *(pcm++) = channels[c] >> 16;
454  }
455  ws->cur_ts += (uint64_t)duration;
456  *rgot_frame = 1;
457  return packet->size;
458 }
459 
461 {
462  struct wavesynth_context *ws = avc->priv_data;
463 
464  av_freep(&ws->sin);
465  av_freep(&ws->inter);
466  return 0;
467 }
468 
470  .name = "wavesynth",
471  .long_name = NULL_IF_CONFIG_SMALL("Wave synthesis pseudo-codec"),
472  .type = AVMEDIA_TYPE_AUDIO,
474  .priv_data_size = sizeof(struct wavesynth_context),
475  .init = wavesynth_init,
476  .close = wavesynth_close,
477  .decode = wavesynth_decode,
478  .capabilities = AV_CODEC_CAP_DR1,
479 };
ws_interval::phi
uint64_t phi
Definition: ffwavesynth.c:86
AVCodec
AVCodec.
Definition: avcodec.h:3481
ws_interval::type
enum ws_interval_type type
Definition: ffwavesynth.c:88
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
wavesynth_context::pink_state
uint32_t pink_state
Definition: ffwavesynth.c:98
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:2225
wavesynth_context::next_inter
int next_inter
Definition: ffwavesynth.c:103
AV_RL64
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
ws_interval::amp0
uint64_t amp0
Definition: ffwavesynth.c:85
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:366
AudioConvert::channels
int channels
Definition: audio_convert.c:54
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
wavesynth_context::pink_need
unsigned pink_need
Definition: ffwavesynth.c:100
b
#define b
Definition: input.c:41
wavesynth_context::inter
struct ws_interval * inter
Definition: ffwavesynth.c:96
channels
channels
Definition: aptx.c:30
wavesynth_context::nb_inter
int nb_inter
Definition: ffwavesynth.c:101
wavesynth_synth_sample
static void wavesynth_synth_sample(struct wavesynth_context *ws, int64_t ts, int32_t *channels)
Definition: ffwavesynth.c:350
sample_rate
sample_rate
Definition: ffmpeg_filter.c:191
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
wavesynth_context::cur_ts
int64_t cur_ts
Definition: ffwavesynth.c:93
ws_interval::ts_start
int64_t ts_start
Definition: ffwavesynth.c:83
fail
#define fail()
Definition: checkasm.h:120
ws_interval::dphi
uint64_t dphi
Definition: ffwavesynth.c:86
wavesynth_context::cur_inter
int cur_inter
Definition: ffwavesynth.c:102
AV_CODEC_ID_FFWAVESYNTH
@ AV_CODEC_ID_FFWAVESYNTH
Definition: avcodec.h:633
LCG_A
#define LCG_A
Definition: ffwavesynth.c:106
phi_at
static uint64_t phi_at(struct ws_interval *in, int64_t ts)
Definition: ffwavesynth.c:189
wavesynth_parse_extradata
static int wavesynth_parse_extradata(AVCodecContext *avc)
Definition: ffwavesynth.c:234
a1
#define a1
Definition: regdef.h:47
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ws_interval::next
int next
Definition: ffwavesynth.c:89
av_cold
#define av_cold
Definition: attributes.h:84
ws_interval::ddphi
uint64_t ddphi
Definition: ffwavesynth.c:84
duration
int64_t duration
Definition: movenc.c:63
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
ws_interval::f2
int32_t f2
Definition: sbgdec.c:147
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
frac64
static uint64_t frac64(uint64_t a, uint64_t b)
Definition: ffwavesynth.c:161
ws_interval::phi0
uint64_t phi0
Definition: ffwavesynth.c:84
WS_SINE
@ WS_SINE
Definition: ffwavesynth.c:78
wavesynth_context::sin
int32_t * sin
Definition: ffwavesynth.c:95
wavesynth_context
Definition: ffwavesynth.c:92
wavesynth_enter_intervals
static void wavesynth_enter_intervals(struct wavesynth_context *ws, int64_t ts)
Definition: ffwavesynth.c:397
int32_t
int32_t
Definition: audio_convert.c:194
SIN_BITS
#define SIN_BITS
Definition: ffwavesynth.c:28
LCG_C
#define LCG_C
Definition: ffwavesynth.c:107
ws_interval::dphi0
uint64_t dphi0
Definition: ffwavesynth.c:84
ws_interval::amp
uint64_t amp
Definition: ffwavesynth.c:86
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
PINK_UNIT
#define PINK_UNIT
Definition: ffwavesynth.c:32
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
ff_ffwavesynth_decoder
AVCodec ff_ffwavesynth_decoder
Definition: ffwavesynth.c:469
ws_interval::channels
uint32_t channels
Definition: ffwavesynth.c:87
INF_TS
#define INF_TS
Definition: ffwavesynth.c:30
val
const char const char void * val
Definition: avisynth_c.h:863
wavesynth_seek
static void wavesynth_seek(struct wavesynth_context *ws, int64_t ts)
Definition: ffwavesynth.c:197
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:52
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
wavesynth_init
static av_cold int wavesynth_init(AVCodecContext *avc)
Definition: ffwavesynth.c:312
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
WS_MAX_CHANNELS
#define WS_MAX_CHANNELS
Definition: ffwavesynth.c:29
wavesynth_context::next_ts
int64_t next_ts
Definition: ffwavesynth.c:94
wavesynth_decode
static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame, AVPacket *packet)
Definition: ffwavesynth.c:422
a2
#define a2
Definition: regdef.h:48
wavesynth_context::pink_pool
int32_t pink_pool[PINK_UNIT]
Definition: ffwavesynth.c:99
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
wavesynth_context::dither_state
uint32_t dither_state
Definition: ffwavesynth.c:97
ws_interval
Definition: ffwavesynth.c:82
ws_interval::ts_end
int64_t ts_end
Definition: ffwavesynth.c:83
ws_interval::f1
int32_t f1
Definition: sbgdec.c:147
avcodec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
wavesynth_close
static av_cold int wavesynth_close(AVCodecContext *avc)
Definition: ffwavesynth.c:460
lcg_next
static uint32_t lcg_next(uint32_t *s)
Definition: ffwavesynth.c:110
lcg_seek
static void lcg_seek(uint32_t *s, uint32_t dt)
Definition: ffwavesynth.c:116
WS_NOISE
@ WS_NOISE
Definition: ffwavesynth.c:79
pink_fill
static void pink_fill(struct wavesynth_context *ws)
Definition: ffwavesynth.c:137
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
wavesynth_context::pink_pos
unsigned pink_pos
Definition: ffwavesynth.c:100
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ws_interval::damp
uint64_t damp
Definition: ffwavesynth.c:85
ws_interval_type
ws_interval_type
Definition: ffwavesynth.c:77