FFmpeg
adpcmenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "avcodec.h"
26 #include "put_bits.h"
27 #include "bytestream.h"
28 #include "adpcm.h"
29 #include "adpcm_data.h"
30 #include "internal.h"
31 
32 /**
33  * @file
34  * ADPCM encoders
35  * See ADPCM decoder reference documents for codec information.
36  */
37 
38 typedef struct TrellisPath {
39  int nibble;
40  int prev;
41 } TrellisPath;
42 
43 typedef struct TrellisNode {
44  uint32_t ssd;
45  int path;
46  int sample1;
47  int sample2;
48  int step;
49 } TrellisNode;
50 
51 typedef struct ADPCMEncodeContext {
58 
59 #define FREEZE_INTERVAL 128
60 
62 {
63  ADPCMEncodeContext *s = avctx->priv_data;
64  uint8_t *extradata;
65  int i;
66 
67  if (avctx->channels > 2) {
68  av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
69  return AVERROR(EINVAL);
70  }
71 
72  if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
73  av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
74  return AVERROR(EINVAL);
75  }
76 
77  if (avctx->trellis && avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_SSI) {
78  /*
79  * The current trellis implementation doesn't work for extended
80  * runs of samples without periodic resets. Disallow it.
81  */
82  av_log(avctx, AV_LOG_ERROR, "trellis not supported\n");
83  return AVERROR_PATCHWELCOME;
84  }
85 
86  if (avctx->trellis) {
87  int frontier = 1 << avctx->trellis;
88  int max_paths = frontier * FREEZE_INTERVAL;
89  if (!FF_ALLOC_TYPED_ARRAY(s->paths, max_paths) ||
90  !FF_ALLOC_TYPED_ARRAY(s->node_buf, 2 * frontier) ||
91  !FF_ALLOC_TYPED_ARRAY(s->nodep_buf, 2 * frontier) ||
93  return AVERROR(ENOMEM);
94  }
95 
97 
98  switch (avctx->codec->id) {
100  /* each 16 bits sample gives one nibble
101  and we have 4 bytes per channel overhead */
102  avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
103  (4 * avctx->channels) + 1;
104  /* seems frame_size isn't taken into account...
105  have to buffer the samples :-( */
106  avctx->block_align = BLKSIZE;
107  avctx->bits_per_coded_sample = 4;
108  break;
110  avctx->frame_size = 64;
111  avctx->block_align = 34 * avctx->channels;
112  break;
114  /* each 16 bits sample gives one nibble
115  and we have 7 bytes per channel overhead */
116  avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
117  avctx->bits_per_coded_sample = 4;
118  avctx->block_align = BLKSIZE;
120  return AVERROR(ENOMEM);
121  avctx->extradata_size = 32;
122  extradata = avctx->extradata;
123  bytestream_put_le16(&extradata, avctx->frame_size);
124  bytestream_put_le16(&extradata, 7); /* wNumCoef */
125  for (i = 0; i < 7; i++) {
126  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
127  bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
128  }
129  break;
131  avctx->frame_size = BLKSIZE * 2 / avctx->channels;
132  avctx->block_align = BLKSIZE;
133  break;
135  if (avctx->sample_rate != 11025 &&
136  avctx->sample_rate != 22050 &&
137  avctx->sample_rate != 44100) {
138  av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
139  "22050 or 44100\n");
140  return AVERROR(EINVAL);
141  }
142  avctx->frame_size = 512 * (avctx->sample_rate / 11025);
143  break;
145  avctx->frame_size = BLKSIZE * 2 / avctx->channels;
146  avctx->block_align = BLKSIZE;
147  break;
148  default:
149  return AVERROR(EINVAL);
150  }
151 
152  return 0;
153 }
154 
156 {
157  ADPCMEncodeContext *s = avctx->priv_data;
158  av_freep(&s->paths);
159  av_freep(&s->node_buf);
160  av_freep(&s->nodep_buf);
161  av_freep(&s->trellis_hash);
162 
163  return 0;
164 }
165 
166 
168  int16_t sample)
169 {
170  int delta = sample - c->prev_sample;
171  int nibble = FFMIN(7, abs(delta) * 4 /
172  ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
175  c->prev_sample = av_clip_int16(c->prev_sample);
176  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
177  return nibble;
178 }
179 
181  int16_t sample)
182 {
183  int delta = sample - c->prev_sample;
185  int nibble = 8*(delta < 0);
186 
187  delta= abs(delta);
188  diff = delta + (step >> 3);
189 
190  if (delta >= step) {
191  nibble |= 4;
192  delta -= step;
193  }
194  step >>= 1;
195  if (delta >= step) {
196  nibble |= 2;
197  delta -= step;
198  }
199  step >>= 1;
200  if (delta >= step) {
201  nibble |= 1;
202  delta -= step;
203  }
204  diff -= delta;
205 
206  if (nibble & 8)
207  c->prev_sample -= diff;
208  else
209  c->prev_sample += diff;
210 
211  c->prev_sample = av_clip_int16(c->prev_sample);
212  c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
213 
214  return nibble;
215 }
216 
218  int16_t sample)
219 {
220  int predictor, nibble, bias;
221 
222  predictor = (((c->sample1) * (c->coeff1)) +
223  (( c->sample2) * (c->coeff2))) / 64;
224 
225  nibble = sample - predictor;
226  if (nibble >= 0)
227  bias = c->idelta / 2;
228  else
229  bias = -c->idelta / 2;
230 
231  nibble = (nibble + bias) / c->idelta;
232  nibble = av_clip_intp2(nibble, 3) & 0x0F;
233 
234  predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
235 
236  c->sample2 = c->sample1;
237  c->sample1 = av_clip_int16(predictor);
238 
239  c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
240  if (c->idelta < 16)
241  c->idelta = 16;
242 
243  return nibble;
244 }
245 
247  int16_t sample)
248 {
249  int nibble, delta;
250 
251  if (!c->step) {
252  c->predictor = 0;
253  c->step = 127;
254  }
255 
256  delta = sample - c->predictor;
257 
258  nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
259 
260  c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
261  c->predictor = av_clip_int16(c->predictor);
262  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
263  c->step = av_clip(c->step, 127, 24576);
264 
265  return nibble;
266 }
267 
269  const int16_t *samples, uint8_t *dst,
270  ADPCMChannelStatus *c, int n, int stride)
271 {
272  //FIXME 6% faster if frontier is a compile-time constant
273  ADPCMEncodeContext *s = avctx->priv_data;
274  const int frontier = 1 << avctx->trellis;
275  const int version = avctx->codec->id;
276  TrellisPath *paths = s->paths, *p;
277  TrellisNode *node_buf = s->node_buf;
278  TrellisNode **nodep_buf = s->nodep_buf;
279  TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
280  TrellisNode **nodes_next = nodep_buf + frontier;
281  int pathn = 0, froze = -1, i, j, k, generation = 0;
282  uint8_t *hash = s->trellis_hash;
283  memset(hash, 0xff, 65536 * sizeof(*hash));
284 
285  memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
286  nodes[0] = node_buf + frontier;
287  nodes[0]->ssd = 0;
288  nodes[0]->path = 0;
289  nodes[0]->step = c->step_index;
290  nodes[0]->sample1 = c->sample1;
291  nodes[0]->sample2 = c->sample2;
292  if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
293  version == AV_CODEC_ID_ADPCM_IMA_QT ||
294  version == AV_CODEC_ID_ADPCM_SWF)
295  nodes[0]->sample1 = c->prev_sample;
296  if (version == AV_CODEC_ID_ADPCM_MS)
297  nodes[0]->step = c->idelta;
298  if (version == AV_CODEC_ID_ADPCM_YAMAHA) {
299  if (c->step == 0) {
300  nodes[0]->step = 127;
301  nodes[0]->sample1 = 0;
302  } else {
303  nodes[0]->step = c->step;
304  nodes[0]->sample1 = c->predictor;
305  }
306  }
307 
308  for (i = 0; i < n; i++) {
309  TrellisNode *t = node_buf + frontier*(i&1);
310  TrellisNode **u;
311  int sample = samples[i * stride];
312  int heap_pos = 0;
313  memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
314  for (j = 0; j < frontier && nodes[j]; j++) {
315  // higher j have higher ssd already, so they're likely
316  // to yield a suboptimal next sample too
317  const int range = (j < frontier / 2) ? 1 : 0;
318  const int step = nodes[j]->step;
319  int nidx;
320  if (version == AV_CODEC_ID_ADPCM_MS) {
321  const int predictor = ((nodes[j]->sample1 * c->coeff1) +
322  (nodes[j]->sample2 * c->coeff2)) / 64;
323  const int div = (sample - predictor) / step;
324  const int nmin = av_clip(div-range, -8, 6);
325  const int nmax = av_clip(div+range, -7, 7);
326  for (nidx = nmin; nidx <= nmax; nidx++) {
327  const int nibble = nidx & 0xf;
328  int dec_sample = predictor + nidx * step;
329 #define STORE_NODE(NAME, STEP_INDEX)\
330  int d;\
331  uint32_t ssd;\
332  int pos;\
333  TrellisNode *u;\
334  uint8_t *h;\
335  dec_sample = av_clip_int16(dec_sample);\
336  d = sample - dec_sample;\
337  ssd = nodes[j]->ssd + d*(unsigned)d;\
338  /* Check for wraparound, skip such samples completely. \
339  * Note, changing ssd to a 64 bit variable would be \
340  * simpler, avoiding this check, but it's slower on \
341  * x86 32 bit at the moment. */\
342  if (ssd < nodes[j]->ssd)\
343  goto next_##NAME;\
344  /* Collapse any two states with the same previous sample value. \
345  * One could also distinguish states by step and by 2nd to last
346  * sample, but the effects of that are negligible.
347  * Since nodes in the previous generation are iterated
348  * through a heap, they're roughly ordered from better to
349  * worse, but not strictly ordered. Therefore, an earlier
350  * node with the same sample value is better in most cases
351  * (and thus the current is skipped), but not strictly
352  * in all cases. Only skipping samples where ssd >=
353  * ssd of the earlier node with the same sample gives
354  * slightly worse quality, though, for some reason. */ \
355  h = &hash[(uint16_t) dec_sample];\
356  if (*h == generation)\
357  goto next_##NAME;\
358  if (heap_pos < frontier) {\
359  pos = heap_pos++;\
360  } else {\
361  /* Try to replace one of the leaf nodes with the new \
362  * one, but try a different slot each time. */\
363  pos = (frontier >> 1) +\
364  (heap_pos & ((frontier >> 1) - 1));\
365  if (ssd > nodes_next[pos]->ssd)\
366  goto next_##NAME;\
367  heap_pos++;\
368  }\
369  *h = generation;\
370  u = nodes_next[pos];\
371  if (!u) {\
372  av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
373  u = t++;\
374  nodes_next[pos] = u;\
375  u->path = pathn++;\
376  }\
377  u->ssd = ssd;\
378  u->step = STEP_INDEX;\
379  u->sample2 = nodes[j]->sample1;\
380  u->sample1 = dec_sample;\
381  paths[u->path].nibble = nibble;\
382  paths[u->path].prev = nodes[j]->path;\
383  /* Sift the newly inserted node up in the heap to \
384  * restore the heap property. */\
385  while (pos > 0) {\
386  int parent = (pos - 1) >> 1;\
387  if (nodes_next[parent]->ssd <= ssd)\
388  break;\
389  FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
390  pos = parent;\
391  }\
392  next_##NAME:;
393  STORE_NODE(ms, FFMAX(16,
394  (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
395  }
396  } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
397  version == AV_CODEC_ID_ADPCM_IMA_QT ||
398  version == AV_CODEC_ID_ADPCM_SWF) {
399 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
400  const int predictor = nodes[j]->sample1;\
401  const int div = (sample - predictor) * 4 / STEP_TABLE;\
402  int nmin = av_clip(div - range, -7, 6);\
403  int nmax = av_clip(div + range, -6, 7);\
404  if (nmin <= 0)\
405  nmin--; /* distinguish -0 from +0 */\
406  if (nmax < 0)\
407  nmax--;\
408  for (nidx = nmin; nidx <= nmax; nidx++) {\
409  const int nibble = nidx < 0 ? 7 - nidx : nidx;\
410  int dec_sample = predictor +\
411  (STEP_TABLE *\
412  ff_adpcm_yamaha_difflookup[nibble]) / 8;\
413  STORE_NODE(NAME, STEP_INDEX);\
414  }
416  av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
417  } else { //AV_CODEC_ID_ADPCM_YAMAHA
418  LOOP_NODES(yamaha, step,
419  av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
420  127, 24576));
421 #undef LOOP_NODES
422 #undef STORE_NODE
423  }
424  }
425 
426  u = nodes;
427  nodes = nodes_next;
428  nodes_next = u;
429 
430  generation++;
431  if (generation == 255) {
432  memset(hash, 0xff, 65536 * sizeof(*hash));
433  generation = 0;
434  }
435 
436  // prevent overflow
437  if (nodes[0]->ssd > (1 << 28)) {
438  for (j = 1; j < frontier && nodes[j]; j++)
439  nodes[j]->ssd -= nodes[0]->ssd;
440  nodes[0]->ssd = 0;
441  }
442 
443  // merge old paths to save memory
444  if (i == froze + FREEZE_INTERVAL) {
445  p = &paths[nodes[0]->path];
446  for (k = i; k > froze; k--) {
447  dst[k] = p->nibble;
448  p = &paths[p->prev];
449  }
450  froze = i;
451  pathn = 0;
452  // other nodes might use paths that don't coincide with the frozen one.
453  // checking which nodes do so is too slow, so just kill them all.
454  // this also slightly improves quality, but I don't know why.
455  memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
456  }
457  }
458 
459  p = &paths[nodes[0]->path];
460  for (i = n - 1; i > froze; i--) {
461  dst[i] = p->nibble;
462  p = &paths[p->prev];
463  }
464 
465  c->predictor = nodes[0]->sample1;
466  c->sample1 = nodes[0]->sample1;
467  c->sample2 = nodes[0]->sample2;
468  c->step_index = nodes[0]->step;
469  c->step = nodes[0]->step;
470  c->idelta = nodes[0]->step;
471 }
472 
473 static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
474  const AVFrame *frame, int *got_packet_ptr)
475 {
476  int n, i, ch, st, pkt_size, ret;
477  const int16_t *samples;
478  int16_t **samples_p;
479  uint8_t *dst;
480  ADPCMEncodeContext *c = avctx->priv_data;
481  uint8_t *buf;
482 
483  samples = (const int16_t *)frame->data[0];
484  samples_p = (int16_t **)frame->extended_data;
485  st = avctx->channels == 2;
486 
487  if (avctx->codec_id == AV_CODEC_ID_ADPCM_SWF)
488  pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
489  else if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_SSI)
490  pkt_size = (frame->nb_samples * avctx->channels) / 2;
491  else
492  pkt_size = avctx->block_align;
493  if ((ret = ff_alloc_packet2(avctx, avpkt, pkt_size, 0)) < 0)
494  return ret;
495  dst = avpkt->data;
496 
497  switch(avctx->codec->id) {
499  {
500  int blocks, j;
501 
502  blocks = (frame->nb_samples - 1) / 8;
503 
504  for (ch = 0; ch < avctx->channels; ch++) {
506  status->prev_sample = samples_p[ch][0];
507  /* status->step_index = 0;
508  XXX: not sure how to init the state machine */
509  bytestream_put_le16(&dst, status->prev_sample);
510  *dst++ = status->step_index;
511  *dst++ = 0; /* unknown */
512  }
513 
514  /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
515  if (avctx->trellis > 0) {
516  if (!FF_ALLOC_TYPED_ARRAY(buf, avctx->channels * blocks * 8))
517  return AVERROR(ENOMEM);
518  for (ch = 0; ch < avctx->channels; ch++) {
519  adpcm_compress_trellis(avctx, &samples_p[ch][1],
520  buf + ch * blocks * 8, &c->status[ch],
521  blocks * 8, 1);
522  }
523  for (i = 0; i < blocks; i++) {
524  for (ch = 0; ch < avctx->channels; ch++) {
525  uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
526  for (j = 0; j < 8; j += 2)
527  *dst++ = buf1[j] | (buf1[j + 1] << 4);
528  }
529  }
530  av_free(buf);
531  } else {
532  for (i = 0; i < blocks; i++) {
533  for (ch = 0; ch < avctx->channels; ch++) {
535  const int16_t *smp = &samples_p[ch][1 + i * 8];
536  for (j = 0; j < 8; j += 2) {
537  uint8_t v = adpcm_ima_compress_sample(status, smp[j ]);
538  v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4;
539  *dst++ = v;
540  }
541  }
542  }
543  }
544  break;
545  }
547  {
548  PutBitContext pb;
549  init_put_bits(&pb, dst, pkt_size);
550 
551  for (ch = 0; ch < avctx->channels; ch++) {
553  put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7);
554  put_bits(&pb, 7, status->step_index);
555  if (avctx->trellis > 0) {
556  uint8_t buf[64];
557  adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
558  64, 1);
559  for (i = 0; i < 64; i++)
560  put_bits(&pb, 4, buf[i ^ 1]);
561  status->prev_sample = status->predictor;
562  } else {
563  for (i = 0; i < 64; i += 2) {
564  int t1, t2;
565  t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]);
566  t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]);
567  put_bits(&pb, 4, t2);
568  put_bits(&pb, 4, t1);
569  }
570  }
571  }
572 
573  flush_put_bits(&pb);
574  break;
575  }
577  {
578  PutBitContext pb;
579  init_put_bits(&pb, dst, pkt_size);
580 
581  av_assert0(avctx->trellis == 0);
582 
583  for (i = 0; i < frame->nb_samples; i++) {
584  for (ch = 0; ch < avctx->channels; ch++) {
585  put_bits(&pb, 4, adpcm_ima_qt_compress_sample(c->status + ch, *samples++));
586  }
587  }
588 
589  flush_put_bits(&pb);
590  break;
591  }
593  {
594  PutBitContext pb;
595  init_put_bits(&pb, dst, pkt_size);
596 
597  n = frame->nb_samples - 1;
598 
599  // store AdpcmCodeSize
600  put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
601 
602  // init the encoder state
603  for (i = 0; i < avctx->channels; i++) {
604  // clip step so it fits 6 bits
605  c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6);
606  put_sbits(&pb, 16, samples[i]);
607  put_bits(&pb, 6, c->status[i].step_index);
608  c->status[i].prev_sample = samples[i];
609  }
610 
611  if (avctx->trellis > 0) {
612  if (!(buf = av_malloc(2 * n)))
613  return AVERROR(ENOMEM);
614  adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
615  &c->status[0], n, avctx->channels);
616  if (avctx->channels == 2)
617  adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
618  buf + n, &c->status[1], n,
619  avctx->channels);
620  for (i = 0; i < n; i++) {
621  put_bits(&pb, 4, buf[i]);
622  if (avctx->channels == 2)
623  put_bits(&pb, 4, buf[n + i]);
624  }
625  av_free(buf);
626  } else {
627  for (i = 1; i < frame->nb_samples; i++) {
629  samples[avctx->channels * i]));
630  if (avctx->channels == 2)
632  samples[2 * i + 1]));
633  }
634  }
635  flush_put_bits(&pb);
636  break;
637  }
639  for (i = 0; i < avctx->channels; i++) {
640  int predictor = 0;
641  *dst++ = predictor;
642  c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
643  c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
644  }
645  for (i = 0; i < avctx->channels; i++) {
646  if (c->status[i].idelta < 16)
647  c->status[i].idelta = 16;
648  bytestream_put_le16(&dst, c->status[i].idelta);
649  }
650  for (i = 0; i < avctx->channels; i++)
651  c->status[i].sample2= *samples++;
652  for (i = 0; i < avctx->channels; i++) {
653  c->status[i].sample1 = *samples++;
654  bytestream_put_le16(&dst, c->status[i].sample1);
655  }
656  for (i = 0; i < avctx->channels; i++)
657  bytestream_put_le16(&dst, c->status[i].sample2);
658 
659  if (avctx->trellis > 0) {
660  n = avctx->block_align - 7 * avctx->channels;
661  if (!(buf = av_malloc(2 * n)))
662  return AVERROR(ENOMEM);
663  if (avctx->channels == 1) {
664  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
665  avctx->channels);
666  for (i = 0; i < n; i += 2)
667  *dst++ = (buf[i] << 4) | buf[i + 1];
668  } else {
669  adpcm_compress_trellis(avctx, samples, buf,
670  &c->status[0], n, avctx->channels);
671  adpcm_compress_trellis(avctx, samples + 1, buf + n,
672  &c->status[1], n, avctx->channels);
673  for (i = 0; i < n; i++)
674  *dst++ = (buf[i] << 4) | buf[n + i];
675  }
676  av_free(buf);
677  } else {
678  for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
679  int nibble;
680  nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
681  nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
682  *dst++ = nibble;
683  }
684  }
685  break;
687  n = frame->nb_samples / 2;
688  if (avctx->trellis > 0) {
689  if (!(buf = av_malloc(2 * n * 2)))
690  return AVERROR(ENOMEM);
691  n *= 2;
692  if (avctx->channels == 1) {
693  adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
694  avctx->channels);
695  for (i = 0; i < n; i += 2)
696  *dst++ = buf[i] | (buf[i + 1] << 4);
697  } else {
698  adpcm_compress_trellis(avctx, samples, buf,
699  &c->status[0], n, avctx->channels);
700  adpcm_compress_trellis(avctx, samples + 1, buf + n,
701  &c->status[1], n, avctx->channels);
702  for (i = 0; i < n; i++)
703  *dst++ = buf[i] | (buf[n + i] << 4);
704  }
705  av_free(buf);
706  } else
707  for (n *= avctx->channels; n > 0; n--) {
708  int nibble;
709  nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
710  nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
711  *dst++ = nibble;
712  }
713  break;
714  default:
715  return AVERROR(EINVAL);
716  }
717 
718  avpkt->size = pkt_size;
719  *got_packet_ptr = 1;
720  return 0;
721 }
722 
723 static const enum AVSampleFormat sample_fmts[] = {
725 };
726 
727 static const enum AVSampleFormat sample_fmts_p[] = {
729 };
730 
731 #define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_) \
732 AVCodec ff_ ## name_ ## _encoder = { \
733  .name = #name_, \
734  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
735  .type = AVMEDIA_TYPE_AUDIO, \
736  .id = id_, \
737  .priv_data_size = sizeof(ADPCMEncodeContext), \
738  .init = adpcm_encode_init, \
739  .encode2 = adpcm_encode_frame, \
740  .close = adpcm_encode_close, \
741  .sample_fmts = sample_fmts_, \
742  .capabilities = capabilities_, \
743  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \
744 }
745 
746 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, sample_fmts_p, 0, "ADPCM IMA QuickTime");
747 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_SSI, adpcm_ima_ssi, sample_fmts, AV_CODEC_CAP_SMALL_LAST_FRAME, "ADPCM IMA Simon & Schuster Interactive");
748 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, sample_fmts_p, 0, "ADPCM IMA WAV");
749 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, sample_fmts, 0, "ADPCM Microsoft");
750 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, sample_fmts, 0, "ADPCM Shockwave Flash");
751 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, sample_fmts, 0, "ADPCM Yamaha");
const struct AVCodec * codec
Definition: avcodec.h:535
#define ADPCM_ENCODER(id_, name_, sample_fmts_, capabilities_, long_name_)
Definition: adpcmenc.c:731
int sample1
Definition: adpcmenc.c:46
int path
Definition: adpcmenc.c:45
version
Definition: libkvazaar.c:292
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
Definition: adpcmenc.c:61
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:240
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:217
#define BLKSIZE
Definition: adpcm.h:31
#define ima
int size
Definition: packet.h:356
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:180
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:140
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
Definition: adpcmenc.c:155
#define sample
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:1223
uint8_t * trellis_hash
Definition: adpcmenc.c:56
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:246
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
float delta
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
ADPCM tables.
uint8_t * data
Definition: packet.h:355
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1750
#define av_log(a,...)
uint8_t hash[HASH_SIZE]
Definition: movenc.c:57
#define U(x)
Definition: vp56_arith.h:37
uint32_t ssd
Definition: adpcmenc.c:44
enum AVCodecID id
Definition: codec.h:204
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
Definition: utils.c:1561
ADPCM encoder/decoder common header.
#define STORE_NODE(NAME, STEP_INDEX)
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
#define t1
Definition: regdef.h:29
#define FFMAX(a, b)
Definition: common.h:94
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
#define FREEZE_INTERVAL
Definition: adpcmenc.c:59
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:80
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
Definition: adpcmenc.c:167
#define FFMIN(a, b)
Definition: common.h:96
TrellisNode ** nodep_buf
Definition: adpcmenc.c:55
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
Definition: adpcmenc.c:268
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: adpcmenc.c:473
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
TrellisPath * paths
Definition: adpcmenc.c:53
int sample2
Definition: adpcmenc.c:47
if(ret)
TrellisNode * node_buf
Definition: adpcmenc.c:54
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
enum AVCodecID codec_id
Definition: avcodec.h:536
int sample_rate
samples per second
Definition: avcodec.h:1186
#define abs(x)
Definition: cuda_runtime.h:35
main external API structure.
Definition: avcodec.h:526
int nibble
Definition: adpcmenc.c:39
int extradata_size
Definition: avcodec.h:628
int step
Definition: adpcmenc.c:48
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
signed 16 bits
Definition: samplefmt.h:61
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int trellis
trellis RD quantization
Definition: avcodec.h:1475
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
void * priv_data
Definition: avcodec.h:553
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
int channels
number of audio channels
Definition: avcodec.h:1187
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:723
Filter the word “frame” indicates either a video frame or a group of audio samples
#define av_freep(p)
int16_t step_index
Definition: adpcm.h:35
signed 16 bits, planar
Definition: samplefmt.h:67
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
ADPCMChannelStatus status[6]
Definition: adpcmenc.c:52
This structure stores compressed data.
Definition: packet.h:332
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
for(j=16;j >0;--j)
static enum AVSampleFormat sample_fmts_p[]
Definition: adpcmenc.c:727
#define t2
Definition: regdef.h:30
int i
Definition: input.c:406
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
bitstream writer API