FFmpeg
wavpack.c
Go to the documentation of this file.
1 /*
2  * WavPack lossless audio decoder
3  * Copyright (c) 2006,2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
23 
24 #define BITSTREAM_READER_LE
25 #include "avcodec.h"
26 #include "bytestream.h"
27 #include "get_bits.h"
28 #include "internal.h"
29 #include "thread.h"
30 #include "unary.h"
31 #include "wavpack.h"
32 
33 /**
34  * @file
35  * WavPack lossless audio decoder
36  */
37 
38 typedef struct SavedContext {
39  int offset;
40  int size;
41  int bits_used;
42  uint32_t crc;
43 } SavedContext;
44 
45 typedef struct WavpackFrameContext {
48  int stereo, stereo_in;
49  int joint;
50  uint32_t CRC;
53  uint32_t crc_extra_bits;
55  int data_size; // in bits
56  int samples;
57  int terms;
58  Decorr decorr[MAX_TERMS];
59  int zero, one, zeroes;
61  int and, or, shift;
63  int hybrid, hybrid_bitrate;
64  int hybrid_maxclip, hybrid_minclip;
69  int pos;
70  SavedContext sc, extra_sc;
72 
73 #define WV_MAX_FRAME_DECODERS 14
74 
75 typedef struct WavpackContext {
77 
79  int fdec_num;
80 
81  int block;
82  int samples;
83  int ch_offset;
85 
86 #define LEVEL_DECAY(a) (((a) + 0x80) >> 8)
87 
88 static av_always_inline unsigned get_tail(GetBitContext *gb, int k)
89 {
90  int p, e, res;
91 
92  if (k < 1)
93  return 0;
94  p = av_log2(k);
95  e = (1 << (p + 1)) - k - 1;
96  res = get_bitsz(gb, p);
97  if (res >= e)
98  res = (res << 1) - e + get_bits1(gb);
99  return res;
100 }
101 
103 {
104  int i, br[2], sl[2];
105 
106  for (i = 0; i <= ctx->stereo_in; i++) {
107  if (ctx->ch[i].bitrate_acc > UINT_MAX - ctx->ch[i].bitrate_delta)
108  return AVERROR_INVALIDDATA;
109  ctx->ch[i].bitrate_acc += ctx->ch[i].bitrate_delta;
110  br[i] = ctx->ch[i].bitrate_acc >> 16;
111  sl[i] = LEVEL_DECAY(ctx->ch[i].slow_level);
112  }
113  if (ctx->stereo_in && ctx->hybrid_bitrate) {
114  int balance = (sl[1] - sl[0] + br[1] + 1) >> 1;
115  if (balance > br[0]) {
116  br[1] = br[0] * 2;
117  br[0] = 0;
118  } else if (-balance > br[0]) {
119  br[0] *= 2;
120  br[1] = 0;
121  } else {
122  br[1] = br[0] + balance;
123  br[0] = br[0] - balance;
124  }
125  }
126  for (i = 0; i <= ctx->stereo_in; i++) {
127  if (ctx->hybrid_bitrate) {
128  if (sl[i] - br[i] > -0x100)
129  ctx->ch[i].error_limit = wp_exp2(sl[i] - br[i] + 0x100);
130  else
131  ctx->ch[i].error_limit = 0;
132  } else {
133  ctx->ch[i].error_limit = wp_exp2(br[i]);
134  }
135  }
136 
137  return 0;
138 }
139 
141  int channel, int *last)
142 {
143  int t, t2;
144  int sign, base, add, ret;
145  WvChannel *c = &ctx->ch[channel];
146 
147  *last = 0;
148 
149  if ((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) &&
150  !ctx->zero && !ctx->one) {
151  if (ctx->zeroes) {
152  ctx->zeroes--;
153  if (ctx->zeroes) {
155  return 0;
156  }
157  } else {
158  t = get_unary_0_33(gb);
159  if (t >= 2) {
160  if (t >= 32 || get_bits_left(gb) < t - 1)
161  goto error;
162  t = get_bits_long(gb, t - 1) | (1 << (t - 1));
163  } else {
164  if (get_bits_left(gb) < 0)
165  goto error;
166  }
167  ctx->zeroes = t;
168  if (ctx->zeroes) {
169  memset(ctx->ch[0].median, 0, sizeof(ctx->ch[0].median));
170  memset(ctx->ch[1].median, 0, sizeof(ctx->ch[1].median));
172  return 0;
173  }
174  }
175  }
176 
177  if (ctx->zero) {
178  t = 0;
179  ctx->zero = 0;
180  } else {
181  t = get_unary_0_33(gb);
182  if (get_bits_left(gb) < 0)
183  goto error;
184  if (t == 16) {
185  t2 = get_unary_0_33(gb);
186  if (t2 < 2) {
187  if (get_bits_left(gb) < 0)
188  goto error;
189  t += t2;
190  } else {
191  if (t2 >= 32 || get_bits_left(gb) < t2 - 1)
192  goto error;
193  t += get_bits_long(gb, t2 - 1) | (1 << (t2 - 1));
194  }
195  }
196 
197  if (ctx->one) {
198  ctx->one = t & 1;
199  t = (t >> 1) + 1;
200  } else {
201  ctx->one = t & 1;
202  t >>= 1;
203  }
204  ctx->zero = !ctx->one;
205  }
206 
207  if (ctx->hybrid && !channel) {
208  if (update_error_limit(ctx) < 0)
209  goto error;
210  }
211 
212  if (!t) {
213  base = 0;
214  add = GET_MED(0) - 1;
215  DEC_MED(0);
216  } else if (t == 1) {
217  base = GET_MED(0);
218  add = GET_MED(1) - 1;
219  INC_MED(0);
220  DEC_MED(1);
221  } else if (t == 2) {
222  base = GET_MED(0) + GET_MED(1);
223  add = GET_MED(2) - 1;
224  INC_MED(0);
225  INC_MED(1);
226  DEC_MED(2);
227  } else {
228  base = GET_MED(0) + GET_MED(1) + GET_MED(2) * (t - 2U);
229  add = GET_MED(2) - 1;
230  INC_MED(0);
231  INC_MED(1);
232  INC_MED(2);
233  }
234  if (!c->error_limit) {
235  if (add >= 0x2000000U) {
236  av_log(ctx->avctx, AV_LOG_ERROR, "k %d is too large\n", add);
237  goto error;
238  }
239  ret = base + get_tail(gb, add);
240  if (get_bits_left(gb) <= 0)
241  goto error;
242  } else {
243  int mid = (base * 2U + add + 1) >> 1;
244  while (add > c->error_limit) {
245  if (get_bits_left(gb) <= 0)
246  goto error;
247  if (get_bits1(gb)) {
248  add -= (mid - (unsigned)base);
249  base = mid;
250  } else
251  add = mid - (unsigned)base - 1;
252  mid = (base * 2U + add + 1) >> 1;
253  }
254  ret = mid;
255  }
256  sign = get_bits1(gb);
257  if (ctx->hybrid_bitrate)
258  c->slow_level += wp_log2(ret) - LEVEL_DECAY(c->slow_level);
259  return sign ? ~ret : ret;
260 
261 error:
262  ret = get_bits_left(gb);
263  if (ret <= 0) {
264  av_log(ctx->avctx, AV_LOG_ERROR, "Too few bits (%d) left\n", ret);
265  }
266  *last = 1;
267  return 0;
268 }
269 
270 static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc,
271  unsigned S)
272 {
273  unsigned bit;
274 
275  if (s->extra_bits) {
276  S *= 1 << s->extra_bits;
277 
278  if (s->got_extra_bits &&
280  S |= get_bits_long(&s->gb_extra_bits, s->extra_bits);
281  *crc = *crc * 9 + (S & 0xffff) * 3 + ((unsigned)S >> 16);
282  }
283  }
284 
285  bit = (S & s->and) | s->or;
286  bit = ((S + bit) << s->shift) - bit;
287 
288  if (s->hybrid)
289  bit = av_clip(bit, s->hybrid_minclip, s->hybrid_maxclip);
290 
291  return bit << s->post_shift;
292 }
293 
294 static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
295 {
296  union {
297  float f;
298  uint32_t u;
299  } value;
300 
301  unsigned int sign;
302  int exp = s->float_max_exp;
303 
304  if (s->got_extra_bits) {
305  const int max_bits = 1 + 23 + 8 + 1;
306  const int left_bits = get_bits_left(&s->gb_extra_bits);
307 
308  if (left_bits + 8 * AV_INPUT_BUFFER_PADDING_SIZE < max_bits)
309  return 0.0;
310  }
311 
312  if (S) {
313  S *= 1U << s->float_shift;
314  sign = S < 0;
315  if (sign)
316  S = -(unsigned)S;
317  if (S >= 0x1000000U) {
318  if (s->got_extra_bits && get_bits1(&s->gb_extra_bits))
319  S = get_bits(&s->gb_extra_bits, 23);
320  else
321  S = 0;
322  exp = 255;
323  } else if (exp) {
324  int shift = 23 - av_log2(S);
325  exp = s->float_max_exp;
326  if (exp <= shift)
327  shift = --exp;
328  exp -= shift;
329 
330  if (shift) {
331  S <<= shift;
332  if ((s->float_flag & WV_FLT_SHIFT_ONES) ||
333  (s->got_extra_bits &&
334  (s->float_flag & WV_FLT_SHIFT_SAME) &&
335  get_bits1(&s->gb_extra_bits))) {
336  S |= (1 << shift) - 1;
337  } else if (s->got_extra_bits &&
338  (s->float_flag & WV_FLT_SHIFT_SENT)) {
339  S |= get_bits(&s->gb_extra_bits, shift);
340  }
341  }
342  } else {
343  exp = s->float_max_exp;
344  }
345  S &= 0x7fffff;
346  } else {
347  sign = 0;
348  exp = 0;
349  if (s->got_extra_bits && (s->float_flag & WV_FLT_ZERO_SENT)) {
350  if (get_bits1(&s->gb_extra_bits)) {
351  S = get_bits(&s->gb_extra_bits, 23);
352  if (s->float_max_exp >= 25)
353  exp = get_bits(&s->gb_extra_bits, 8);
354  sign = get_bits1(&s->gb_extra_bits);
355  } else {
356  if (s->float_flag & WV_FLT_ZERO_SIGN)
357  sign = get_bits1(&s->gb_extra_bits);
358  }
359  }
360  }
361 
362  *crc = *crc * 27 + S * 9 + exp * 3 + sign;
363 
364  value.u = (sign << 31) | (exp << 23) | S;
365  return value.f;
366 }
367 
369 {
370  s->pos = 0;
371  s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
372 }
373 
374 static inline int wv_check_crc(WavpackFrameContext *s, uint32_t crc,
375  uint32_t crc_extra_bits)
376 {
377  if (crc != s->CRC) {
378  av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
379  return AVERROR_INVALIDDATA;
380  }
381  if (s->got_extra_bits && crc_extra_bits != s->crc_extra_bits) {
382  av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
383  return AVERROR_INVALIDDATA;
384  }
385 
386  return 0;
387 }
388 
390  void *dst_l, void *dst_r, const int type)
391 {
392  int i, j, count = 0;
393  int last, t;
394  int A, B, L, L2, R, R2;
395  int pos = s->pos;
396  uint32_t crc = s->sc.crc;
397  uint32_t crc_extra_bits = s->extra_sc.crc;
398  int16_t *dst16_l = dst_l;
399  int16_t *dst16_r = dst_r;
400  int32_t *dst32_l = dst_l;
401  int32_t *dst32_r = dst_r;
402  float *dstfl_l = dst_l;
403  float *dstfl_r = dst_r;
404 
405  s->one = s->zero = s->zeroes = 0;
406  do {
407  L = wv_get_value(s, gb, 0, &last);
408  if (last)
409  break;
410  R = wv_get_value(s, gb, 1, &last);
411  if (last)
412  break;
413  for (i = 0; i < s->terms; i++) {
414  t = s->decorr[i].value;
415  if (t > 0) {
416  if (t > 8) {
417  if (t & 1) {
418  A = 2U * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
419  B = 2U * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1];
420  } else {
421  A = (int)(3U * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
422  B = (int)(3U * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]) >> 1;
423  }
424  s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
425  s->decorr[i].samplesB[1] = s->decorr[i].samplesB[0];
426  j = 0;
427  } else {
428  A = s->decorr[i].samplesA[pos];
429  B = s->decorr[i].samplesB[pos];
430  j = (pos + t) & 7;
431  }
432  if (type != AV_SAMPLE_FMT_S16P) {
433  L2 = L + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10);
434  R2 = R + ((s->decorr[i].weightB * (int64_t)B + 512) >> 10);
435  } else {
436  L2 = L + (unsigned)((int)(s->decorr[i].weightA * (unsigned)A + 512) >> 10);
437  R2 = R + (unsigned)((int)(s->decorr[i].weightB * (unsigned)B + 512) >> 10);
438  }
439  if (A && L)
440  s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
441  if (B && R)
442  s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta;
443  s->decorr[i].samplesA[j] = L = L2;
444  s->decorr[i].samplesB[j] = R = R2;
445  } else if (t == -1) {
446  if (type != AV_SAMPLE_FMT_S16P)
447  L2 = L + ((s->decorr[i].weightA * (int64_t)s->decorr[i].samplesA[0] + 512) >> 10);
448  else
449  L2 = L + (unsigned)((int)(s->decorr[i].weightA * (unsigned)s->decorr[i].samplesA[0] + 512) >> 10);
450  UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, s->decorr[i].samplesA[0], L);
451  L = L2;
452  if (type != AV_SAMPLE_FMT_S16P)
453  R2 = R + ((s->decorr[i].weightB * (int64_t)L2 + 512) >> 10);
454  else
455  R2 = R + (unsigned)((int)(s->decorr[i].weightB * (unsigned)L2 + 512) >> 10);
456  UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, L2, R);
457  R = R2;
458  s->decorr[i].samplesA[0] = R;
459  } else {
460  if (type != AV_SAMPLE_FMT_S16P)
461  R2 = R + ((s->decorr[i].weightB * (int64_t)s->decorr[i].samplesB[0] + 512) >> 10);
462  else
463  R2 = R + (unsigned)((int)(s->decorr[i].weightB * (unsigned)s->decorr[i].samplesB[0] + 512) >> 10);
464  UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, s->decorr[i].samplesB[0], R);
465  R = R2;
466 
467  if (t == -3) {
468  R2 = s->decorr[i].samplesA[0];
469  s->decorr[i].samplesA[0] = R;
470  }
471 
472  if (type != AV_SAMPLE_FMT_S16P)
473  L2 = L + ((s->decorr[i].weightA * (int64_t)R2 + 512) >> 10);
474  else
475  L2 = L + (unsigned)((int)(s->decorr[i].weightA * (unsigned)R2 + 512) >> 10);
476  UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, R2, L);
477  L = L2;
478  s->decorr[i].samplesB[0] = L;
479  }
480  }
481 
482  if (type == AV_SAMPLE_FMT_S16P) {
483  if (FFABS((int64_t)L) + FFABS((int64_t)R) > (1<<19)) {
484  av_log(s->avctx, AV_LOG_ERROR, "sample %d %d too large\n", L, R);
485  return AVERROR_INVALIDDATA;
486  }
487  }
488 
489  pos = (pos + 1) & 7;
490  if (s->joint)
491  L += (unsigned)(R -= (unsigned)(L >> 1));
492  crc = (crc * 3 + L) * 3 + R;
493 
494  if (type == AV_SAMPLE_FMT_FLTP) {
495  *dstfl_l++ = wv_get_value_float(s, &crc_extra_bits, L);
496  *dstfl_r++ = wv_get_value_float(s, &crc_extra_bits, R);
497  } else if (type == AV_SAMPLE_FMT_S32P) {
498  *dst32_l++ = wv_get_value_integer(s, &crc_extra_bits, L);
499  *dst32_r++ = wv_get_value_integer(s, &crc_extra_bits, R);
500  } else {
501  *dst16_l++ = wv_get_value_integer(s, &crc_extra_bits, L);
502  *dst16_r++ = wv_get_value_integer(s, &crc_extra_bits, R);
503  }
504  count++;
505  } while (!last && count < s->samples);
506 
508 
509  if (last && count < s->samples) {
510  int size = av_get_bytes_per_sample(type);
511  memset((uint8_t*)dst_l + count*size, 0, (s->samples-count)*size);
512  memset((uint8_t*)dst_r + count*size, 0, (s->samples-count)*size);
513  }
514 
515  if ((s->avctx->err_recognition & AV_EF_CRCCHECK) &&
516  wv_check_crc(s, crc, crc_extra_bits))
517  return AVERROR_INVALIDDATA;
518 
519  return 0;
520 }
521 
523  void *dst, const int type)
524 {
525  int i, j, count = 0;
526  int last, t;
527  int A, S, T;
528  int pos = s->pos;
529  uint32_t crc = s->sc.crc;
530  uint32_t crc_extra_bits = s->extra_sc.crc;
531  int16_t *dst16 = dst;
532  int32_t *dst32 = dst;
533  float *dstfl = dst;
534 
535  s->one = s->zero = s->zeroes = 0;
536  do {
537  T = wv_get_value(s, gb, 0, &last);
538  S = 0;
539  if (last)
540  break;
541  for (i = 0; i < s->terms; i++) {
542  t = s->decorr[i].value;
543  if (t > 8) {
544  if (t & 1)
545  A = 2U * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
546  else
547  A = (int)(3U * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
548  s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
549  j = 0;
550  } else {
551  A = s->decorr[i].samplesA[pos];
552  j = (pos + t) & 7;
553  }
554  if (type != AV_SAMPLE_FMT_S16P)
555  S = T + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10);
556  else
557  S = T + (unsigned)((int)(s->decorr[i].weightA * (unsigned)A + 512) >> 10);
558  if (A && T)
559  s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
560  s->decorr[i].samplesA[j] = T = S;
561  }
562  pos = (pos + 1) & 7;
563  crc = crc * 3 + S;
564 
565  if (type == AV_SAMPLE_FMT_FLTP) {
566  *dstfl++ = wv_get_value_float(s, &crc_extra_bits, S);
567  } else if (type == AV_SAMPLE_FMT_S32P) {
568  *dst32++ = wv_get_value_integer(s, &crc_extra_bits, S);
569  } else {
570  *dst16++ = wv_get_value_integer(s, &crc_extra_bits, S);
571  }
572  count++;
573  } while (!last && count < s->samples);
574 
576 
577  if (last && count < s->samples) {
578  int size = av_get_bytes_per_sample(type);
579  memset((uint8_t*)dst + count*size, 0, (s->samples-count)*size);
580  }
581 
583  int ret = wv_check_crc(s, crc, crc_extra_bits);
584  if (ret < 0 && s->avctx->err_recognition & AV_EF_EXPLODE)
585  return ret;
586  }
587 
588  return 0;
589 }
590 
592 {
594  return -1;
595 
596  c->fdec[c->fdec_num] = av_mallocz(sizeof(**c->fdec));
597  if (!c->fdec[c->fdec_num])
598  return -1;
599  c->fdec_num++;
600  c->fdec[c->fdec_num - 1]->avctx = c->avctx;
602 
603  return 0;
604 }
605 
606 #if HAVE_THREADS
607 static int init_thread_copy(AVCodecContext *avctx)
608 {
609  WavpackContext *s = avctx->priv_data;
610  s->avctx = avctx;
611  return 0;
612 }
613 #endif
614 
616 {
617  WavpackContext *s = avctx->priv_data;
618 
619  s->avctx = avctx;
620 
621  s->fdec_num = 0;
622 
623  return 0;
624 }
625 
627 {
628  WavpackContext *s = avctx->priv_data;
629  int i;
630 
631  for (i = 0; i < s->fdec_num; i++)
632  av_freep(&s->fdec[i]);
633  s->fdec_num = 0;
634 
635  return 0;
636 }
637 
638 static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
639  AVFrame *frame, const uint8_t *buf, int buf_size)
640 {
641  WavpackContext *wc = avctx->priv_data;
642  ThreadFrame tframe = { .f = frame };
644  GetByteContext gb;
645  void *samples_l = NULL, *samples_r = NULL;
646  int ret;
647  int got_terms = 0, got_weights = 0, got_samples = 0,
648  got_entropy = 0, got_bs = 0, got_float = 0, got_hybrid = 0;
649  int i, j, id, size, ssize, weights, t;
650  int bpp, chan = 0, chmask = 0, orig_bpp, sample_rate = 0;
651  int multiblock;
652 
653  if (block_no >= wc->fdec_num && wv_alloc_frame_context(wc) < 0) {
654  av_log(avctx, AV_LOG_ERROR, "Error creating frame decode context\n");
655  return AVERROR_INVALIDDATA;
656  }
657 
658  s = wc->fdec[block_no];
659  if (!s) {
660  av_log(avctx, AV_LOG_ERROR, "Context for block %d is not present\n",
661  block_no);
662  return AVERROR_INVALIDDATA;
663  }
664 
665  memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
666  memset(s->ch, 0, sizeof(s->ch));
667  s->extra_bits = 0;
668  s->and = s->or = s->shift = 0;
669  s->got_extra_bits = 0;
670 
671  bytestream2_init(&gb, buf, buf_size);
672 
673  s->samples = bytestream2_get_le32(&gb);
674  if (s->samples != wc->samples) {
675  av_log(avctx, AV_LOG_ERROR, "Mismatching number of samples in "
676  "a sequence: %d and %d\n", wc->samples, s->samples);
677  return AVERROR_INVALIDDATA;
678  }
679  s->frame_flags = bytestream2_get_le32(&gb);
680  bpp = av_get_bytes_per_sample(avctx->sample_fmt);
681  orig_bpp = ((s->frame_flags & 0x03) + 1) << 3;
682  multiblock = (s->frame_flags & WV_SINGLE_BLOCK) != WV_SINGLE_BLOCK;
683 
684  s->stereo = !(s->frame_flags & WV_MONO);
685  s->stereo_in = (s->frame_flags & WV_FALSE_STEREO) ? 0 : s->stereo;
689  s->post_shift = bpp * 8 - orig_bpp + ((s->frame_flags >> 13) & 0x1f);
690  if (s->post_shift < 0 || s->post_shift > 31) {
691  return AVERROR_INVALIDDATA;
692  }
693  s->hybrid_maxclip = ((1LL << (orig_bpp - 1)) - 1);
694  s->hybrid_minclip = ((-1UL << (orig_bpp - 1)));
695  s->CRC = bytestream2_get_le32(&gb);
696 
697  // parse metadata blocks
698  while (bytestream2_get_bytes_left(&gb)) {
699  id = bytestream2_get_byte(&gb);
700  size = bytestream2_get_byte(&gb);
701  if (id & WP_IDF_LONG) {
702  size |= (bytestream2_get_byte(&gb)) << 8;
703  size |= (bytestream2_get_byte(&gb)) << 16;
704  }
705  size <<= 1; // size is specified in words
706  ssize = size;
707  if (id & WP_IDF_ODD)
708  size--;
709  if (size < 0) {
710  av_log(avctx, AV_LOG_ERROR,
711  "Got incorrect block %02X with size %i\n", id, size);
712  break;
713  }
714  if (bytestream2_get_bytes_left(&gb) < ssize) {
715  av_log(avctx, AV_LOG_ERROR,
716  "Block size %i is out of bounds\n", size);
717  break;
718  }
719  switch (id & WP_IDF_MASK) {
720  case WP_ID_DECTERMS:
721  if (size > MAX_TERMS) {
722  av_log(avctx, AV_LOG_ERROR, "Too many decorrelation terms\n");
723  s->terms = 0;
724  bytestream2_skip(&gb, ssize);
725  continue;
726  }
727  s->terms = size;
728  for (i = 0; i < s->terms; i++) {
729  uint8_t val = bytestream2_get_byte(&gb);
730  s->decorr[s->terms - i - 1].value = (val & 0x1F) - 5;
731  s->decorr[s->terms - i - 1].delta = val >> 5;
732  }
733  got_terms = 1;
734  break;
735  case WP_ID_DECWEIGHTS:
736  if (!got_terms) {
737  av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
738  continue;
739  }
740  weights = size >> s->stereo_in;
741  if (weights > MAX_TERMS || weights > s->terms) {
742  av_log(avctx, AV_LOG_ERROR, "Too many decorrelation weights\n");
743  bytestream2_skip(&gb, ssize);
744  continue;
745  }
746  for (i = 0; i < weights; i++) {
747  t = (int8_t)bytestream2_get_byte(&gb);
748  s->decorr[s->terms - i - 1].weightA = t * (1 << 3);
749  if (s->decorr[s->terms - i - 1].weightA > 0)
750  s->decorr[s->terms - i - 1].weightA +=
751  (s->decorr[s->terms - i - 1].weightA + 64) >> 7;
752  if (s->stereo_in) {
753  t = (int8_t)bytestream2_get_byte(&gb);
754  s->decorr[s->terms - i - 1].weightB = t * (1 << 3);
755  if (s->decorr[s->terms - i - 1].weightB > 0)
756  s->decorr[s->terms - i - 1].weightB +=
757  (s->decorr[s->terms - i - 1].weightB + 64) >> 7;
758  }
759  }
760  got_weights = 1;
761  break;
762  case WP_ID_DECSAMPLES:
763  if (!got_terms) {
764  av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
765  continue;
766  }
767  t = 0;
768  for (i = s->terms - 1; (i >= 0) && (t < size); i--) {
769  if (s->decorr[i].value > 8) {
770  s->decorr[i].samplesA[0] =
771  wp_exp2(bytestream2_get_le16(&gb));
772  s->decorr[i].samplesA[1] =
773  wp_exp2(bytestream2_get_le16(&gb));
774 
775  if (s->stereo_in) {
776  s->decorr[i].samplesB[0] =
777  wp_exp2(bytestream2_get_le16(&gb));
778  s->decorr[i].samplesB[1] =
779  wp_exp2(bytestream2_get_le16(&gb));
780  t += 4;
781  }
782  t += 4;
783  } else if (s->decorr[i].value < 0) {
784  s->decorr[i].samplesA[0] =
785  wp_exp2(bytestream2_get_le16(&gb));
786  s->decorr[i].samplesB[0] =
787  wp_exp2(bytestream2_get_le16(&gb));
788  t += 4;
789  } else {
790  for (j = 0; j < s->decorr[i].value; j++) {
791  s->decorr[i].samplesA[j] =
792  wp_exp2(bytestream2_get_le16(&gb));
793  if (s->stereo_in) {
794  s->decorr[i].samplesB[j] =
795  wp_exp2(bytestream2_get_le16(&gb));
796  }
797  }
798  t += s->decorr[i].value * 2 * (s->stereo_in + 1);
799  }
800  }
801  got_samples = 1;
802  break;
803  case WP_ID_ENTROPY:
804  if (size != 6 * (s->stereo_in + 1)) {
805  av_log(avctx, AV_LOG_ERROR,
806  "Entropy vars size should be %i, got %i.\n",
807  6 * (s->stereo_in + 1), size);
808  bytestream2_skip(&gb, ssize);
809  continue;
810  }
811  for (j = 0; j <= s->stereo_in; j++)
812  for (i = 0; i < 3; i++) {
813  s->ch[j].median[i] = wp_exp2(bytestream2_get_le16(&gb));
814  }
815  got_entropy = 1;
816  break;
817  case WP_ID_HYBRID:
818  if (s->hybrid_bitrate) {
819  for (i = 0; i <= s->stereo_in; i++) {
820  s->ch[i].slow_level = wp_exp2(bytestream2_get_le16(&gb));
821  size -= 2;
822  }
823  }
824  for (i = 0; i < (s->stereo_in + 1); i++) {
825  s->ch[i].bitrate_acc = bytestream2_get_le16(&gb) << 16;
826  size -= 2;
827  }
828  if (size > 0) {
829  for (i = 0; i < (s->stereo_in + 1); i++) {
830  s->ch[i].bitrate_delta =
831  wp_exp2((int16_t)bytestream2_get_le16(&gb));
832  }
833  } else {
834  for (i = 0; i < (s->stereo_in + 1); i++)
835  s->ch[i].bitrate_delta = 0;
836  }
837  got_hybrid = 1;
838  break;
839  case WP_ID_INT32INFO: {
840  uint8_t val[4];
841  if (size != 4) {
842  av_log(avctx, AV_LOG_ERROR,
843  "Invalid INT32INFO, size = %i\n",
844  size);
845  bytestream2_skip(&gb, ssize - 4);
846  continue;
847  }
848  bytestream2_get_buffer(&gb, val, 4);
849  if (val[0] > 30) {
850  av_log(avctx, AV_LOG_ERROR,
851  "Invalid INT32INFO, extra_bits = %d (> 30)\n", val[0]);
852  continue;
853  } else if (val[0]) {
854  s->extra_bits = val[0];
855  } else if (val[1]) {
856  s->shift = val[1];
857  } else if (val[2]) {
858  s->and = s->or = 1;
859  s->shift = val[2];
860  } else if (val[3]) {
861  s->and = 1;
862  s->shift = val[3];
863  }
864  if (s->shift > 31) {
865  av_log(avctx, AV_LOG_ERROR,
866  "Invalid INT32INFO, shift = %d (> 31)\n", s->shift);
867  s->and = s->or = s->shift = 0;
868  continue;
869  }
870  /* original WavPack decoder forces 32-bit lossy sound to be treated
871  * as 24-bit one in order to have proper clipping */
872  if (s->hybrid && bpp == 4 && s->post_shift < 8 && s->shift > 8) {
873  s->post_shift += 8;
874  s->shift -= 8;
875  s->hybrid_maxclip >>= 8;
876  s->hybrid_minclip >>= 8;
877  }
878  break;
879  }
880  case WP_ID_FLOATINFO:
881  if (size != 4) {
882  av_log(avctx, AV_LOG_ERROR,
883  "Invalid FLOATINFO, size = %i\n", size);
884  bytestream2_skip(&gb, ssize);
885  continue;
886  }
887  s->float_flag = bytestream2_get_byte(&gb);
888  s->float_shift = bytestream2_get_byte(&gb);
889  s->float_max_exp = bytestream2_get_byte(&gb);
890  if (s->float_shift > 31) {
891  av_log(avctx, AV_LOG_ERROR,
892  "Invalid FLOATINFO, shift = %d (> 31)\n", s->float_shift);
893  s->float_shift = 0;
894  continue;
895  }
896  got_float = 1;
897  bytestream2_skip(&gb, 1);
898  break;
899  case WP_ID_DATA:
900  s->sc.offset = bytestream2_tell(&gb);
901  s->sc.size = size * 8;
902  if ((ret = init_get_bits8(&s->gb, gb.buffer, size)) < 0)
903  return ret;
904  s->data_size = size * 8;
905  bytestream2_skip(&gb, size);
906  got_bs = 1;
907  break;
908  case WP_ID_EXTRABITS:
909  if (size <= 4) {
910  av_log(avctx, AV_LOG_ERROR, "Invalid EXTRABITS, size = %i\n",
911  size);
912  bytestream2_skip(&gb, size);
913  continue;
914  }
915  s->extra_sc.offset = bytestream2_tell(&gb);
916  s->extra_sc.size = size * 8;
917  if ((ret = init_get_bits8(&s->gb_extra_bits, gb.buffer, size)) < 0)
918  return ret;
920  bytestream2_skip(&gb, size);
921  s->got_extra_bits = 1;
922  break;
923  case WP_ID_CHANINFO:
924  if (size <= 1) {
925  av_log(avctx, AV_LOG_ERROR,
926  "Insufficient channel information\n");
927  return AVERROR_INVALIDDATA;
928  }
929  chan = bytestream2_get_byte(&gb);
930  switch (size - 2) {
931  case 0:
932  chmask = bytestream2_get_byte(&gb);
933  break;
934  case 1:
935  chmask = bytestream2_get_le16(&gb);
936  break;
937  case 2:
938  chmask = bytestream2_get_le24(&gb);
939  break;
940  case 3:
941  chmask = bytestream2_get_le32(&gb);
942  break;
943  case 4:
944  size = bytestream2_get_byte(&gb);
945  chan |= (bytestream2_get_byte(&gb) & 0xF) << 8;
946  chan += 1;
947  if (avctx->channels != chan)
948  av_log(avctx, AV_LOG_WARNING, "%i channels signalled"
949  " instead of %i.\n", chan, avctx->channels);
950  chmask = bytestream2_get_le24(&gb);
951  break;
952  case 5:
953  size = bytestream2_get_byte(&gb);
954  chan |= (bytestream2_get_byte(&gb) & 0xF) << 8;
955  chan += 1;
956  if (avctx->channels != chan)
957  av_log(avctx, AV_LOG_WARNING, "%i channels signalled"
958  " instead of %i.\n", chan, avctx->channels);
959  chmask = bytestream2_get_le32(&gb);
960  break;
961  default:
962  av_log(avctx, AV_LOG_ERROR, "Invalid channel info size %d\n",
963  size);
964  chan = avctx->channels;
965  chmask = avctx->channel_layout;
966  }
967  break;
968  case WP_ID_SAMPLE_RATE:
969  if (size != 3) {
970  av_log(avctx, AV_LOG_ERROR, "Invalid custom sample rate.\n");
971  return AVERROR_INVALIDDATA;
972  }
973  sample_rate = bytestream2_get_le24(&gb);
974  break;
975  default:
976  bytestream2_skip(&gb, size);
977  }
978  if (id & WP_IDF_ODD)
979  bytestream2_skip(&gb, 1);
980  }
981 
982  if (!got_terms) {
983  av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
984  return AVERROR_INVALIDDATA;
985  }
986  if (!got_weights) {
987  av_log(avctx, AV_LOG_ERROR, "No block with decorrelation weights\n");
988  return AVERROR_INVALIDDATA;
989  }
990  if (!got_samples) {
991  av_log(avctx, AV_LOG_ERROR, "No block with decorrelation samples\n");
992  return AVERROR_INVALIDDATA;
993  }
994  if (!got_entropy) {
995  av_log(avctx, AV_LOG_ERROR, "No block with entropy info\n");
996  return AVERROR_INVALIDDATA;
997  }
998  if (s->hybrid && !got_hybrid) {
999  av_log(avctx, AV_LOG_ERROR, "Hybrid config not found\n");
1000  return AVERROR_INVALIDDATA;
1001  }
1002  if (!got_bs) {
1003  av_log(avctx, AV_LOG_ERROR, "Packed samples not found\n");
1004  return AVERROR_INVALIDDATA;
1005  }
1006  if (!got_float && avctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
1007  av_log(avctx, AV_LOG_ERROR, "Float information not found\n");
1008  return AVERROR_INVALIDDATA;
1009  }
1010  if (s->got_extra_bits && avctx->sample_fmt != AV_SAMPLE_FMT_FLTP) {
1011  const int size = get_bits_left(&s->gb_extra_bits);
1012  const int wanted = s->samples * s->extra_bits << s->stereo_in;
1013  if (size < wanted) {
1014  av_log(avctx, AV_LOG_ERROR, "Too small EXTRABITS\n");
1015  s->got_extra_bits = 0;
1016  }
1017  }
1018 
1019  if (!wc->ch_offset) {
1020  int sr = (s->frame_flags >> 23) & 0xf;
1021  if (sr == 0xf) {
1022  if (!sample_rate) {
1023  av_log(avctx, AV_LOG_ERROR, "Custom sample rate missing.\n");
1024  return AVERROR_INVALIDDATA;
1025  }
1026  avctx->sample_rate = sample_rate;
1027  } else
1028  avctx->sample_rate = wv_rates[sr];
1029 
1030  if (multiblock) {
1031  if (chan)
1032  avctx->channels = chan;
1033  if (chmask)
1034  avctx->channel_layout = chmask;
1035  } else {
1036  avctx->channels = s->stereo ? 2 : 1;
1037  avctx->channel_layout = s->stereo ? AV_CH_LAYOUT_STEREO :
1039  }
1040 
1041  /* get output buffer */
1042  frame->nb_samples = s->samples + 1;
1043  if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
1044  return ret;
1045  frame->nb_samples = s->samples;
1046  }
1047 
1048  if (wc->ch_offset + s->stereo >= avctx->channels) {
1049  av_log(avctx, AV_LOG_WARNING, "Too many channels coded in a packet.\n");
1050  return ((avctx->err_recognition & AV_EF_EXPLODE) || !wc->ch_offset) ? AVERROR_INVALIDDATA : 0;
1051  }
1052 
1053  samples_l = frame->extended_data[wc->ch_offset];
1054  if (s->stereo)
1055  samples_r = frame->extended_data[wc->ch_offset + 1];
1056 
1057  wc->ch_offset += 1 + s->stereo;
1058 
1059  if (s->stereo_in) {
1060  ret = wv_unpack_stereo(s, &s->gb, samples_l, samples_r, avctx->sample_fmt);
1061  if (ret < 0)
1062  return ret;
1063  } else {
1064  ret = wv_unpack_mono(s, &s->gb, samples_l, avctx->sample_fmt);
1065  if (ret < 0)
1066  return ret;
1067 
1068  if (s->stereo)
1069  memcpy(samples_r, samples_l, bpp * s->samples);
1070  }
1071 
1072  return 0;
1073 }
1074 
1076 {
1077  WavpackContext *s = avctx->priv_data;
1078  int i;
1079 
1080  for (i = 0; i < s->fdec_num; i++)
1082 }
1083 
1084 static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
1085  int *got_frame_ptr, AVPacket *avpkt)
1086 {
1087  WavpackContext *s = avctx->priv_data;
1088  const uint8_t *buf = avpkt->data;
1089  int buf_size = avpkt->size;
1090  AVFrame *frame = data;
1091  int frame_size, ret, frame_flags;
1092 
1093  if (avpkt->size <= WV_HEADER_SIZE)
1094  return AVERROR_INVALIDDATA;
1095 
1096  s->block = 0;
1097  s->ch_offset = 0;
1098 
1099  /* determine number of samples */
1100  s->samples = AV_RL32(buf + 20);
1101  frame_flags = AV_RL32(buf + 24);
1102  if (s->samples <= 0 || s->samples > WV_MAX_SAMPLES) {
1103  av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n",
1104  s->samples);
1105  return AVERROR_INVALIDDATA;
1106  }
1107 
1108  if (frame_flags & 0x80) {
1109  avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
1110  } else if ((frame_flags & 0x03) <= 1) {
1111  avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
1112  } else {
1113  avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
1114  avctx->bits_per_raw_sample = ((frame_flags & 0x03) + 1) << 3;
1115  }
1116 
1117  while (buf_size > 0) {
1118  if (buf_size <= WV_HEADER_SIZE)
1119  break;
1120  frame_size = AV_RL32(buf + 4) - 12;
1121  buf += 20;
1122  buf_size -= 20;
1123  if (frame_size <= 0 || frame_size > buf_size) {
1124  av_log(avctx, AV_LOG_ERROR,
1125  "Block %d has invalid size (size %d vs. %d bytes left)\n",
1126  s->block, frame_size, buf_size);
1127  wavpack_decode_flush(avctx);
1128  return AVERROR_INVALIDDATA;
1129  }
1130  if ((ret = wavpack_decode_block(avctx, s->block,
1131  frame, buf, frame_size)) < 0) {
1132  wavpack_decode_flush(avctx);
1133  return ret;
1134  }
1135  s->block++;
1136  buf += frame_size;
1137  buf_size -= frame_size;
1138  }
1139 
1140  if (s->ch_offset != avctx->channels) {
1141  av_log(avctx, AV_LOG_ERROR, "Not enough channels coded in a packet.\n");
1142  return AVERROR_INVALIDDATA;
1143  }
1144 
1145  *got_frame_ptr = 1;
1146 
1147  return avpkt->size;
1148 }
1149 
1151  .name = "wavpack",
1152  .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
1153  .type = AVMEDIA_TYPE_AUDIO,
1154  .id = AV_CODEC_ID_WAVPACK,
1155  .priv_data_size = sizeof(WavpackContext),
1157  .close = wavpack_decode_end,
1161  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1162 };
int delta
Definition: wavpack.h:84
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
#define WV_HYBRID_MODE
Definition: wavpack.h:39
const char const char void * val
Definition: avisynth_c.h:863
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int shift(int a, int b)
Definition: sonic.c:82
int median[3]
Definition: wavpack.h:95
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
unsigned bitrate_delta
Definition: wavpack.h:97
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
static void wavpack_decode_flush(AVCodecContext *avctx)
Definition: wavpack.c:1075
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:35
int slow_level
Definition: wavpack.h:96
Definition: wvdec.c:32
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
int samplesA[MAX_TERM]
Definition: wavpack.h:88
int size
Definition: avcodec.h:1481
int av_log2(unsigned v)
Definition: intmath.c:26
#define R2
Definition: simple_idct.c:173
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define WV_FLT_SHIFT_ONES
Definition: wavpack.h:50
GLint GLenum type
Definition: opengl_enc.c:104
#define MAX_TERMS
Definition: wavpack.h:27
int weightB
Definition: wavpack.h:87
#define WV_HYBRID_BITRATE
Definition: wavpack.h:41
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2799
#define AV_CH_LAYOUT_STEREO
uint32_t CRC
Definition: wavpack.c:50
AVCodec.
Definition: avcodec.h:3492
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int ch_offset
Definition: wavpack.c:83
Decorr decorr[MAX_TERMS]
Definition: wavpack.c:58
uint8_t base
Definition: vp3data.h:202
static int get_unary_0_33(GetBitContext *gb)
Get unary code terminated by a 0 with a maximum length of 33.
Definition: unary.h:59
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2236
uint8_t
#define av_cold
Definition: attributes.h:82
AVCodec ff_wavpack_decoder
Definition: wavpack.c:1150
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static int wavpack_decode_block(AVCodecContext *avctx, int block_no, AVFrame *frame, const uint8_t *buf, int buf_size)
Definition: wavpack.c:638
Multithreading support functions.
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
int value
Definition: wavpack.h:85
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
uint8_t * data
Definition: avcodec.h:1480
const uint8_t * buffer
Definition: bytestream.h:34
bitstream reader API header.
#define WV_HEADER_SIZE
Definition: wavpack.h:30
#define WV_FLT_ZERO_SIGN
Definition: wavpack.h:54
ptrdiff_t size
Definition: opengl_enc.c:100
static av_always_inline int wp_log2(uint32_t val)
Definition: wavpack.h:180
int fdec_num
Definition: wavpack.c:79
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
WavpackFrameContext * fdec[WV_MAX_FRAME_DECODERS]
Definition: wavpack.c:78
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
static int update_error_limit(WavpackFrameContext *ctx)
Definition: wavpack.c:102
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define R
Definition: huffyuvdsp.h:34
#define S(s, c, i)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
#define B
Definition: huffyuvdsp.h:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static av_always_inline int wp_exp2(int16_t val)
Definition: wavpack.h:163
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
AVCodecContext * avctx
Definition: wavpack.c:46
#define zero
Definition: regdef.h:64
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
#define WV_SINGLE_BLOCK
Definition: wavpack.h:48
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
GLsizei count
Definition: opengl_enc.c:108
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1040
int8_t exp
Definition: eval.c:72
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2279
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
int weightA
Definition: wavpack.h:86
uint32_t crc
Definition: wavpack.c:42
#define T(x)
Definition: vp56_arith.h:29
audio channel layout utility functions
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2697
signed 32 bits, planar
Definition: samplefmt.h:68
static int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, void *dst_l, void *dst_r, const int type)
Definition: wavpack.c:389
static const int wv_rates[16]
Definition: wavpack.h:119
#define WV_FALSE_STEREO
Definition: wavpack.h:37
static void wv_reset_saved_context(WavpackFrameContext *s)
Definition: wavpack.c:368
static int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type)
Definition: wavpack.c:522
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define WV_FLT_SHIFT_SAME
Definition: wavpack.h:51
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2708
#define WV_FLT_SHIFT_SENT
Definition: wavpack.h:52
#define L(x)
Definition: vp56_arith.h:36
static void error(const char *err)
int error_limit
Definition: wavpack.h:96
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
static av_cold int wavpack_decode_end(AVCodecContext *avctx)
Definition: wavpack.c:626
#define GET_MED(n)
Definition: wavpack.h:101
sample_rate
int frame_size
Definition: mxfenc.c:2223
uint32_t crc_extra_bits
Definition: wavpack.c:53
Libavcodec external API header.
int sample_rate
samples per second
Definition: avcodec.h:2228
unsigned bitrate_acc
Definition: wavpack.h:97
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1568
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2^^^F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v\/v\/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O^/\^/\^h2--> O q O<--h3-> O q O<--h2 v\/v\/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O^/\^/\^F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0.To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){for(x=0;x< width;x++){sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][*]=sample[*][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform)--------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one.------------------------------------------------------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L2
Definition: snow.txt:554
static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
Definition: wavpack.c:294
int samplesB[MAX_TERM]
Definition: wavpack.h:89
static int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, unsigned S)
Definition: wavpack.c:270
void * buf
Definition: avisynth_c.h:766
#define LEVEL_DECAY(a)
Definition: wavpack.c:86
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
SavedContext sc
Definition: wavpack.c:70
#define WV_JOINT_STEREO
Definition: wavpack.h:33
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data...
Definition: avcodec.h:2705
static int wavpack_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: wavpack.c:1084
static int wv_check_crc(WavpackFrameContext *s, uint32_t crc, uint32_t crc_extra_bits)
Definition: wavpack.c:374
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel, int *last)
Definition: wavpack.c:140
WvChannel ch[2]
Definition: wavpack.c:68
#define UPDATE_WEIGHT_CLIP(weight, delta, samples, in)
Definition: wavpack.h:106
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
static av_always_inline unsigned get_tail(GetBitContext *gb, int k)
Definition: wavpack.c:88
int
common internal api header.
int offset
Definition: wavpack.c:39
#define bit(string, value)
Definition: cbs_mpeg2.c:58
#define WV_FLT_ZERO_SENT
Definition: wavpack.h:53
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int size
Definition: wavpack.c:40
#define INC_MED(n)
Definition: wavpack.h:103
int bits_used
Definition: wavpack.c:41
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:793
GetBitContext gb_extra_bits
Definition: wavpack.c:54
void * priv_data
Definition: avcodec.h:1595
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
int channels
number of audio channels
Definition: avcodec.h:2229
AVCodecContext * avctx
Definition: wavpack.c:76
#define WV_MAX_SAMPLES
Definition: wavpack.h:56
#define DEC_MED(n)
Definition: wavpack.h:102
Definition: wavpack.h:83
GetBitContext gb
Definition: wavpack.c:51
Filter the word “frame” indicates either a video frame or a group of audio samples
#define av_freep(p)
signed 16 bits, planar
Definition: samplefmt.h:67
#define av_always_inline
Definition: attributes.h:39
static av_cold int wv_alloc_frame_context(WavpackContext *c)
Definition: wavpack.c:591
static av_cold int wavpack_decode_init(AVCodecContext *avctx)
Definition: wavpack.c:615
#define WV_MAX_FRAME_DECODERS
Definition: wavpack.c:73
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
#define AV_CH_LAYOUT_MONO
enum AVCodecID id
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1457
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
#define t2
Definition: regdef.h:30
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
SavedContext extra_sc
Definition: wavpack.c:70