FFmpeg
g723_1enc.c
Go to the documentation of this file.
1 /*
2  * G.723.1 compatible encoder
3  * Copyright (c) Mohamed Naufal <naufal22@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * G.723.1 compatible encoder
25  */
26 
27 #include <stdint.h>
28 #include <string.h>
29 
31 #include "libavutil/common.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 
35 #include "avcodec.h"
36 #include "celp_math.h"
37 #include "g723_1.h"
38 #include "internal.h"
39 
40 #define BITSTREAM_WRITER_LE
41 #include "put_bits.h"
42 
44 {
45  G723_1_Context *s = avctx->priv_data;
46  G723_1_ChannelContext *p = &s->ch[0];
47 
48  if (avctx->sample_rate != 8000) {
49  av_log(avctx, AV_LOG_ERROR, "Only 8000Hz sample rate supported\n");
50  return AVERROR(EINVAL);
51  }
52 
53  if (avctx->channels != 1) {
54  av_log(avctx, AV_LOG_ERROR, "Only mono supported\n");
55  return AVERROR(EINVAL);
56  }
57 
58  if (avctx->bit_rate == 6300) {
59  p->cur_rate = RATE_6300;
60  } else if (avctx->bit_rate == 5300) {
61  av_log(avctx, AV_LOG_ERROR, "Use bitrate 6300 instead of 5300.\n");
62  avpriv_report_missing_feature(avctx, "Bitrate 5300");
63  return AVERROR_PATCHWELCOME;
64  } else {
65  av_log(avctx, AV_LOG_ERROR, "Bitrate not supported, use 6300\n");
66  return AVERROR(EINVAL);
67  }
68  avctx->frame_size = 240;
69  memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(int16_t));
70 
71  return 0;
72 }
73 
74 /**
75  * Remove DC component from the input signal.
76  *
77  * @param buf input signal
78  * @param fir zero memory
79  * @param iir pole memory
80  */
81 static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
82 {
83  int i;
84  for (i = 0; i < FRAME_LEN; i++) {
85  *iir = (buf[i] << 15) + ((-*fir) << 15) + MULL2(*iir, 0x7f00);
86  *fir = buf[i];
87  buf[i] = av_clipl_int32((int64_t)*iir + (1 << 15)) >> 16;
88  }
89 }
90 
91 /**
92  * Estimate autocorrelation of the input vector.
93  *
94  * @param buf input buffer
95  * @param autocorr autocorrelation coefficients vector
96  */
97 static void comp_autocorr(int16_t *buf, int16_t *autocorr)
98 {
99  int i, scale, temp;
100  int16_t vector[LPC_FRAME];
101 
103 
104  /* Apply the Hamming window */
105  for (i = 0; i < LPC_FRAME; i++)
106  vector[i] = (vector[i] * hamming_window[i] + (1 << 14)) >> 15;
107 
108  /* Compute the first autocorrelation coefficient */
109  temp = ff_dot_product(vector, vector, LPC_FRAME);
110 
111  /* Apply a white noise correlation factor of (1025/1024) */
112  temp += temp >> 10;
113 
114  /* Normalize */
115  scale = ff_g723_1_normalize_bits(temp, 31);
116  autocorr[0] = av_clipl_int32((int64_t) (temp << scale) +
117  (1 << 15)) >> 16;
118 
119  /* Compute the remaining coefficients */
120  if (!autocorr[0]) {
121  memset(autocorr + 1, 0, LPC_ORDER * sizeof(int16_t));
122  } else {
123  for (i = 1; i <= LPC_ORDER; i++) {
124  temp = ff_dot_product(vector, vector + i, LPC_FRAME - i);
125  temp = MULL2((temp << scale), binomial_window[i - 1]);
126  autocorr[i] = av_clipl_int32((int64_t) temp + (1 << 15)) >> 16;
127  }
128  }
129 }
130 
131 /**
132  * Use Levinson-Durbin recursion to compute LPC coefficients from
133  * autocorrelation values.
134  *
135  * @param lpc LPC coefficients vector
136  * @param autocorr autocorrelation coefficients vector
137  * @param error prediction error
138  */
139 static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
140 {
141  int16_t vector[LPC_ORDER];
142  int16_t partial_corr;
143  int i, j, temp;
144 
145  memset(lpc, 0, LPC_ORDER * sizeof(int16_t));
146 
147  for (i = 0; i < LPC_ORDER; i++) {
148  /* Compute the partial correlation coefficient */
149  temp = 0;
150  for (j = 0; j < i; j++)
151  temp -= lpc[j] * autocorr[i - j - 1];
152  temp = ((autocorr[i] << 13) + temp) << 3;
153 
154  if (FFABS(temp) >= (error << 16))
155  break;
156 
157  partial_corr = temp / (error << 1);
158 
159  lpc[i] = av_clipl_int32((int64_t) (partial_corr << 14) +
160  (1 << 15)) >> 16;
161 
162  /* Update the prediction error */
163  temp = MULL2(temp, partial_corr);
164  error = av_clipl_int32((int64_t) (error << 16) - temp +
165  (1 << 15)) >> 16;
166 
167  memcpy(vector, lpc, i * sizeof(int16_t));
168  for (j = 0; j < i; j++) {
169  temp = partial_corr * vector[i - j - 1] << 1;
170  lpc[j] = av_clipl_int32((int64_t) (lpc[j] << 16) - temp +
171  (1 << 15)) >> 16;
172  }
173  }
174 }
175 
176 /**
177  * Calculate LPC coefficients for the current frame.
178  *
179  * @param buf current frame
180  * @param prev_data 2 trailing subframes of the previous frame
181  * @param lpc LPC coefficients vector
182  */
183 static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
184 {
185  int16_t autocorr[(LPC_ORDER + 1) * SUBFRAMES];
186  int16_t *autocorr_ptr = autocorr;
187  int16_t *lpc_ptr = lpc;
188  int i, j;
189 
190  for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
191  comp_autocorr(buf + i, autocorr_ptr);
192  levinson_durbin(lpc_ptr, autocorr_ptr + 1, autocorr_ptr[0]);
193 
194  lpc_ptr += LPC_ORDER;
195  autocorr_ptr += LPC_ORDER + 1;
196  }
197 }
198 
199 static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
200 {
201  int f[LPC_ORDER + 2]; ///< coefficients of the sum and difference
202  ///< polynomials (F1, F2) ordered as
203  ///< f1[0], f2[0], ...., f1[5], f2[5]
204 
205  int max, shift, cur_val, prev_val, count, p;
206  int i, j;
207  int64_t temp;
208 
209  /* Initialize f1[0] and f2[0] to 1 in Q25 */
210  for (i = 0; i < LPC_ORDER; i++)
211  lsp[i] = (lpc[i] * bandwidth_expand[i] + (1 << 14)) >> 15;
212 
213  /* Apply bandwidth expansion on the LPC coefficients */
214  f[0] = f[1] = 1 << 25;
215 
216  /* Compute the remaining coefficients */
217  for (i = 0; i < LPC_ORDER / 2; i++) {
218  /* f1 */
219  f[2 * i + 2] = -f[2 * i] - ((lsp[i] + lsp[LPC_ORDER - 1 - i]) << 12);
220  /* f2 */
221  f[2 * i + 3] = f[2 * i + 1] - ((lsp[i] - lsp[LPC_ORDER - 1 - i]) << 12);
222  }
223 
224  /* Divide f1[5] and f2[5] by 2 for use in polynomial evaluation */
225  f[LPC_ORDER] >>= 1;
226  f[LPC_ORDER + 1] >>= 1;
227 
228  /* Normalize and shorten */
229  max = FFABS(f[0]);
230  for (i = 1; i < LPC_ORDER + 2; i++)
231  max = FFMAX(max, FFABS(f[i]));
232 
234 
235  for (i = 0; i < LPC_ORDER + 2; i++)
236  f[i] = av_clipl_int32((int64_t) (f[i] << shift) + (1 << 15)) >> 16;
237 
238  /**
239  * Evaluate F1 and F2 at uniform intervals of pi/256 along the
240  * unit circle and check for zero crossings.
241  */
242  p = 0;
243  temp = 0;
244  for (i = 0; i <= LPC_ORDER / 2; i++)
245  temp += f[2 * i] * cos_tab[0];
246  prev_val = av_clipl_int32(temp << 1);
247  count = 0;
248  for (i = 1; i < COS_TBL_SIZE / 2; i++) {
249  /* Evaluate */
250  temp = 0;
251  for (j = 0; j <= LPC_ORDER / 2; j++)
252  temp += f[LPC_ORDER - 2 * j + p] * cos_tab[i * j % COS_TBL_SIZE];
253  cur_val = av_clipl_int32(temp << 1);
254 
255  /* Check for sign change, indicating a zero crossing */
256  if ((cur_val ^ prev_val) < 0) {
257  int abs_cur = FFABS(cur_val);
258  int abs_prev = FFABS(prev_val);
259  int sum = abs_cur + abs_prev;
260 
261  shift = ff_g723_1_normalize_bits(sum, 31);
262  sum <<= shift;
263  abs_prev = abs_prev << shift >> 8;
264  lsp[count++] = ((i - 1) << 7) + (abs_prev >> 1) / (sum >> 16);
265 
266  if (count == LPC_ORDER)
267  break;
268 
269  /* Switch between sum and difference polynomials */
270  p ^= 1;
271 
272  /* Evaluate */
273  temp = 0;
274  for (j = 0; j <= LPC_ORDER / 2; j++)
275  temp += f[LPC_ORDER - 2 * j + p] *
276  cos_tab[i * j % COS_TBL_SIZE];
277  cur_val = av_clipl_int32(temp << 1);
278  }
279  prev_val = cur_val;
280  }
281 
282  if (count != LPC_ORDER)
283  memcpy(lsp, prev_lsp, LPC_ORDER * sizeof(int16_t));
284 }
285 
286 /**
287  * Quantize the current LSP subvector.
288  *
289  * @param num band number
290  * @param offset offset of the current subvector in an LPC_ORDER vector
291  * @param size size of the current subvector
292  */
293 #define get_index(num, offset, size) \
294 { \
295  int error, max = -1; \
296  int16_t temp[4]; \
297  int i, j; \
298  \
299  for (i = 0; i < LSP_CB_SIZE; i++) { \
300  for (j = 0; j < size; j++){ \
301  temp[j] = (weight[j + (offset)] * lsp_band##num[i][j] + \
302  (1 << 14)) >> 15; \
303  } \
304  error = ff_g723_1_dot_product(lsp + (offset), temp, size) << 1; \
305  error -= ff_g723_1_dot_product(lsp_band##num[i], temp, size); \
306  if (error > max) { \
307  max = error; \
308  lsp_index[num] = i; \
309  } \
310  } \
311 }
312 
313 /**
314  * Vector quantize the LSP frequencies.
315  *
316  * @param lsp the current lsp vector
317  * @param prev_lsp the previous lsp vector
318  */
319 static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
320 {
321  int16_t weight[LPC_ORDER];
322  int16_t min, max;
323  int shift, i;
324 
325  /* Calculate the VQ weighting vector */
326  weight[0] = (1 << 20) / (lsp[1] - lsp[0]);
327  weight[LPC_ORDER - 1] = (1 << 20) /
328  (lsp[LPC_ORDER - 1] - lsp[LPC_ORDER - 2]);
329 
330  for (i = 1; i < LPC_ORDER - 1; i++) {
331  min = FFMIN(lsp[i] - lsp[i - 1], lsp[i + 1] - lsp[i]);
332  if (min > 0x20)
333  weight[i] = (1 << 20) / min;
334  else
335  weight[i] = INT16_MAX;
336  }
337 
338  /* Normalize */
339  max = 0;
340  for (i = 0; i < LPC_ORDER; i++)
341  max = FFMAX(weight[i], max);
342 
344  for (i = 0; i < LPC_ORDER; i++) {
345  weight[i] <<= shift;
346  }
347 
348  /* Compute the VQ target vector */
349  for (i = 0; i < LPC_ORDER; i++) {
350  lsp[i] -= dc_lsp[i] +
351  (((prev_lsp[i] - dc_lsp[i]) * 12288 + (1 << 14)) >> 15);
352  }
353 
354  get_index(0, 0, 3);
355  get_index(1, 3, 3);
356  get_index(2, 6, 4);
357 }
358 
359 /**
360  * Perform IIR filtering.
361  *
362  * @param fir_coef FIR coefficients
363  * @param iir_coef IIR coefficients
364  * @param src source vector
365  * @param dest destination vector
366  */
367 static void iir_filter(int16_t *fir_coef, int16_t *iir_coef,
368  int16_t *src, int16_t *dest)
369 {
370  int m, n;
371 
372  for (m = 0; m < SUBFRAME_LEN; m++) {
373  int64_t filter = 0;
374  for (n = 1; n <= LPC_ORDER; n++) {
375  filter -= fir_coef[n - 1] * src[m - n] -
376  iir_coef[n - 1] * dest[m - n];
377  }
378 
379  dest[m] = av_clipl_int32((src[m] << 16) + (filter << 3) +
380  (1 << 15)) >> 16;
381  }
382 }
383 
384 /**
385  * Apply the formant perceptual weighting filter.
386  *
387  * @param flt_coef filter coefficients
388  * @param unq_lpc unquantized lpc vector
389  */
390 static void perceptual_filter(G723_1_ChannelContext *p, int16_t *flt_coef,
391  int16_t *unq_lpc, int16_t *buf)
392 {
393  int16_t vector[FRAME_LEN + LPC_ORDER];
394  int i, j, k, l = 0;
395 
396  memcpy(buf, p->iir_mem, sizeof(int16_t) * LPC_ORDER);
397  memcpy(vector, p->fir_mem, sizeof(int16_t) * LPC_ORDER);
398  memcpy(vector + LPC_ORDER, buf + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
399 
400  for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
401  for (k = 0; k < LPC_ORDER; k++) {
402  flt_coef[k + 2 * l] = (unq_lpc[k + l] * percept_flt_tbl[0][k] +
403  (1 << 14)) >> 15;
404  flt_coef[k + 2 * l + LPC_ORDER] = (unq_lpc[k + l] *
405  percept_flt_tbl[1][k] +
406  (1 << 14)) >> 15;
407  }
408  iir_filter(flt_coef + 2 * l, flt_coef + 2 * l + LPC_ORDER,
409  vector + i, buf + i);
410  l += LPC_ORDER;
411  }
412  memcpy(p->iir_mem, buf + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
413  memcpy(p->fir_mem, vector + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
414 }
415 
416 /**
417  * Estimate the open loop pitch period.
418  *
419  * @param buf perceptually weighted speech
420  * @param start estimation is carried out from this position
421  */
422 static int estimate_pitch(int16_t *buf, int start)
423 {
424  int max_exp = 32;
425  int max_ccr = 0x4000;
426  int max_eng = 0x7fff;
427  int index = PITCH_MIN;
428  int offset = start - PITCH_MIN + 1;
429 
430  int ccr, eng, orig_eng, ccr_eng, exp;
431  int diff, temp;
432 
433  int i;
434 
435  orig_eng = ff_dot_product(buf + offset, buf + offset, HALF_FRAME_LEN);
436 
437  for (i = PITCH_MIN; i <= PITCH_MAX - 3; i++) {
438  offset--;
439 
440  /* Update energy and compute correlation */
441  orig_eng += buf[offset] * buf[offset] -
444  if (ccr <= 0)
445  continue;
446 
447  /* Split into mantissa and exponent to maintain precision */
448  exp = ff_g723_1_normalize_bits(ccr, 31);
449  ccr = av_clipl_int32((int64_t) (ccr << exp) + (1 << 15)) >> 16;
450  exp <<= 1;
451  ccr *= ccr;
452  temp = ff_g723_1_normalize_bits(ccr, 31);
453  ccr = ccr << temp >> 16;
454  exp += temp;
455 
456  temp = ff_g723_1_normalize_bits(orig_eng, 31);
457  eng = av_clipl_int32((int64_t) (orig_eng << temp) + (1 << 15)) >> 16;
458  exp -= temp;
459 
460  if (ccr >= eng) {
461  exp--;
462  ccr >>= 1;
463  }
464  if (exp > max_exp)
465  continue;
466 
467  if (exp + 1 < max_exp)
468  goto update;
469 
470  /* Equalize exponents before comparison */
471  if (exp + 1 == max_exp)
472  temp = max_ccr >> 1;
473  else
474  temp = max_ccr;
475  ccr_eng = ccr * max_eng;
476  diff = ccr_eng - eng * temp;
477  if (diff > 0 && (i - index < PITCH_MIN || diff > ccr_eng >> 2)) {
478 update:
479  index = i;
480  max_exp = exp;
481  max_ccr = ccr;
482  max_eng = eng;
483  }
484  }
485  return index;
486 }
487 
488 /**
489  * Compute harmonic noise filter parameters.
490  *
491  * @param buf perceptually weighted speech
492  * @param pitch_lag open loop pitch period
493  * @param hf harmonic filter parameters
494  */
495 static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
496 {
497  int ccr, eng, max_ccr, max_eng;
498  int exp, max, diff;
499  int energy[15];
500  int i, j;
501 
502  for (i = 0, j = pitch_lag - 3; j <= pitch_lag + 3; i++, j++) {
503  /* Compute residual energy */
504  energy[i << 1] = ff_dot_product(buf - j, buf - j, SUBFRAME_LEN);
505  /* Compute correlation */
506  energy[(i << 1) + 1] = ff_dot_product(buf, buf - j, SUBFRAME_LEN);
507  }
508 
509  /* Compute target energy */
510  energy[14] = ff_dot_product(buf, buf, SUBFRAME_LEN);
511 
512  /* Normalize */
513  max = 0;
514  for (i = 0; i < 15; i++)
515  max = FFMAX(max, FFABS(energy[i]));
516 
518  for (i = 0; i < 15; i++) {
519  energy[i] = av_clipl_int32((int64_t)(energy[i] << exp) +
520  (1 << 15)) >> 16;
521  }
522 
523  hf->index = -1;
524  hf->gain = 0;
525  max_ccr = 1;
526  max_eng = 0x7fff;
527 
528  for (i = 0; i <= 6; i++) {
529  eng = energy[i << 1];
530  ccr = energy[(i << 1) + 1];
531 
532  if (ccr <= 0)
533  continue;
534 
535  ccr = (ccr * ccr + (1 << 14)) >> 15;
536  diff = ccr * max_eng - eng * max_ccr;
537  if (diff > 0) {
538  max_ccr = ccr;
539  max_eng = eng;
540  hf->index = i;
541  }
542  }
543 
544  if (hf->index == -1) {
545  hf->index = pitch_lag;
546  return;
547  }
548 
549  eng = energy[14] * max_eng;
550  eng = (eng >> 2) + (eng >> 3);
551  ccr = energy[(hf->index << 1) + 1] * energy[(hf->index << 1) + 1];
552  if (eng < ccr) {
553  eng = energy[(hf->index << 1) + 1];
554 
555  if (eng >= max_eng)
556  hf->gain = 0x2800;
557  else
558  hf->gain = ((eng << 15) / max_eng * 0x2800 + (1 << 14)) >> 15;
559  }
560  hf->index += pitch_lag - 3;
561 }
562 
563 /**
564  * Apply the harmonic noise shaping filter.
565  *
566  * @param hf filter parameters
567  */
568 static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
569 {
570  int i;
571 
572  for (i = 0; i < SUBFRAME_LEN; i++) {
573  int64_t temp = hf->gain * src[i - hf->index] << 1;
574  dest[i] = av_clipl_int32((src[i] << 16) - temp + (1 << 15)) >> 16;
575  }
576 }
577 
578 static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
579 {
580  int i;
581  for (i = 0; i < SUBFRAME_LEN; i++) {
582  int64_t temp = hf->gain * src[i - hf->index] << 1;
583  dest[i] = av_clipl_int32(((dest[i] - src[i]) << 16) + temp +
584  (1 << 15)) >> 16;
585  }
586 }
587 
588 /**
589  * Combined synthesis and formant perceptual weighting filer.
590  *
591  * @param qnt_lpc quantized lpc coefficients
592  * @param perf_lpc perceptual filter coefficients
593  * @param perf_fir perceptual filter fir memory
594  * @param perf_iir perceptual filter iir memory
595  * @param scale the filter output will be scaled by 2^scale
596  */
597 static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc,
598  int16_t *perf_fir, int16_t *perf_iir,
599  const int16_t *src, int16_t *dest, int scale)
600 {
601  int i, j;
602  int16_t buf_16[SUBFRAME_LEN + LPC_ORDER];
603  int64_t buf[SUBFRAME_LEN];
604 
605  int16_t *bptr_16 = buf_16 + LPC_ORDER;
606 
607  memcpy(buf_16, perf_fir, sizeof(int16_t) * LPC_ORDER);
608  memcpy(dest - LPC_ORDER, perf_iir, sizeof(int16_t) * LPC_ORDER);
609 
610  for (i = 0; i < SUBFRAME_LEN; i++) {
611  int64_t temp = 0;
612  for (j = 1; j <= LPC_ORDER; j++)
613  temp -= qnt_lpc[j - 1] * bptr_16[i - j];
614 
615  buf[i] = (src[i] << 15) + (temp << 3);
616  bptr_16[i] = av_clipl_int32(buf[i] + (1 << 15)) >> 16;
617  }
618 
619  for (i = 0; i < SUBFRAME_LEN; i++) {
620  int64_t fir = 0, iir = 0;
621  for (j = 1; j <= LPC_ORDER; j++) {
622  fir -= perf_lpc[j - 1] * bptr_16[i - j];
623  iir += perf_lpc[j + LPC_ORDER - 1] * dest[i - j];
624  }
625  dest[i] = av_clipl_int32(((buf[i] + (fir << 3)) << scale) + (iir << 3) +
626  (1 << 15)) >> 16;
627  }
628  memcpy(perf_fir, buf_16 + SUBFRAME_LEN, sizeof(int16_t) * LPC_ORDER);
629  memcpy(perf_iir, dest + SUBFRAME_LEN - LPC_ORDER,
630  sizeof(int16_t) * LPC_ORDER);
631 }
632 
633 /**
634  * Compute the adaptive codebook contribution.
635  *
636  * @param buf input signal
637  * @param index the current subframe index
638  */
639 static void acb_search(G723_1_ChannelContext *p, int16_t *residual,
640  int16_t *impulse_resp, const int16_t *buf,
641  int index)
642 {
643  int16_t flt_buf[PITCH_ORDER][SUBFRAME_LEN];
644 
645  const int16_t *cb_tbl = adaptive_cb_gain85;
646 
647  int ccr_buf[PITCH_ORDER * SUBFRAMES << 2];
648 
649  int pitch_lag = p->pitch_lag[index >> 1];
650  int acb_lag = 1;
651  int acb_gain = 0;
652  int odd_frame = index & 1;
653  int iter = 3 + odd_frame;
654  int count = 0;
655  int tbl_size = 85;
656 
657  int i, j, k, l, max;
658  int64_t temp;
659 
660  if (!odd_frame) {
661  if (pitch_lag == PITCH_MIN)
662  pitch_lag++;
663  else
664  pitch_lag = FFMIN(pitch_lag, PITCH_MAX - 5);
665  }
666 
667  for (i = 0; i < iter; i++) {
668  ff_g723_1_get_residual(residual, p->prev_excitation, pitch_lag + i - 1);
669 
670  for (j = 0; j < SUBFRAME_LEN; j++) {
671  temp = 0;
672  for (k = 0; k <= j; k++)
673  temp += residual[PITCH_ORDER - 1 + k] * impulse_resp[j - k];
674  flt_buf[PITCH_ORDER - 1][j] = av_clipl_int32((temp << 1) +
675  (1 << 15)) >> 16;
676  }
677 
678  for (j = PITCH_ORDER - 2; j >= 0; j--) {
679  flt_buf[j][0] = ((residual[j] << 13) + (1 << 14)) >> 15;
680  for (k = 1; k < SUBFRAME_LEN; k++) {
681  temp = (flt_buf[j + 1][k - 1] << 15) +
682  residual[j] * impulse_resp[k];
683  flt_buf[j][k] = av_clipl_int32((temp << 1) + (1 << 15)) >> 16;
684  }
685  }
686 
687  /* Compute crosscorrelation with the signal */
688  for (j = 0; j < PITCH_ORDER; j++) {
689  temp = ff_dot_product(buf, flt_buf[j], SUBFRAME_LEN);
690  ccr_buf[count++] = av_clipl_int32(temp << 1);
691  }
692 
693  /* Compute energies */
694  for (j = 0; j < PITCH_ORDER; j++) {
695  ccr_buf[count++] = ff_g723_1_dot_product(flt_buf[j], flt_buf[j],
696  SUBFRAME_LEN);
697  }
698 
699  for (j = 1; j < PITCH_ORDER; j++) {
700  for (k = 0; k < j; k++) {
701  temp = ff_dot_product(flt_buf[j], flt_buf[k], SUBFRAME_LEN);
702  ccr_buf[count++] = av_clipl_int32(temp << 2);
703  }
704  }
705  }
706 
707  /* Normalize and shorten */
708  max = 0;
709  for (i = 0; i < 20 * iter; i++)
710  max = FFMAX(max, FFABS(ccr_buf[i]));
711 
713 
714  for (i = 0; i < 20 * iter; i++)
715  ccr_buf[i] = av_clipl_int32((int64_t) (ccr_buf[i] << temp) +
716  (1 << 15)) >> 16;
717 
718  max = 0;
719  for (i = 0; i < iter; i++) {
720  /* Select quantization table */
721  if (!odd_frame && pitch_lag + i - 1 >= SUBFRAME_LEN - 2 ||
722  odd_frame && pitch_lag >= SUBFRAME_LEN - 2) {
723  cb_tbl = adaptive_cb_gain170;
724  tbl_size = 170;
725  }
726 
727  for (j = 0, k = 0; j < tbl_size; j++, k += 20) {
728  temp = 0;
729  for (l = 0; l < 20; l++)
730  temp += ccr_buf[20 * i + l] * cb_tbl[k + l];
731  temp = av_clipl_int32(temp);
732 
733  if (temp > max) {
734  max = temp;
735  acb_gain = j;
736  acb_lag = i;
737  }
738  }
739  }
740 
741  if (!odd_frame) {
742  pitch_lag += acb_lag - 1;
743  acb_lag = 1;
744  }
745 
746  p->pitch_lag[index >> 1] = pitch_lag;
747  p->subframe[index].ad_cb_lag = acb_lag;
748  p->subframe[index].ad_cb_gain = acb_gain;
749 }
750 
751 /**
752  * Subtract the adaptive codebook contribution from the input
753  * to obtain the residual.
754  *
755  * @param buf target vector
756  */
757 static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp,
758  int16_t *buf)
759 {
760  int i, j;
761  /* Subtract adaptive CB contribution to obtain the residual */
762  for (i = 0; i < SUBFRAME_LEN; i++) {
763  int64_t temp = buf[i] << 14;
764  for (j = 0; j <= i; j++)
765  temp -= residual[j] * impulse_resp[i - j];
766 
767  buf[i] = av_clipl_int32((temp << 2) + (1 << 15)) >> 16;
768  }
769 }
770 
771 /**
772  * Quantize the residual signal using the fixed codebook (MP-MLQ).
773  *
774  * @param optim optimized fixed codebook parameters
775  * @param buf excitation vector
776  */
777 static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp,
778  int16_t *buf, int pulse_cnt, int pitch_lag)
779 {
780  FCBParam param;
781  int16_t impulse_r[SUBFRAME_LEN];
782  int16_t temp_corr[SUBFRAME_LEN];
783  int16_t impulse_corr[SUBFRAME_LEN];
784 
785  int ccr1[SUBFRAME_LEN];
786  int ccr2[SUBFRAME_LEN];
787  int amp, err, max, max_amp_index, min, scale, i, j, k, l;
788 
789  int64_t temp;
790 
791  /* Update impulse response */
792  memcpy(impulse_r, impulse_resp, sizeof(int16_t) * SUBFRAME_LEN);
793  param.dirac_train = 0;
794  if (pitch_lag < SUBFRAME_LEN - 2) {
795  param.dirac_train = 1;
796  ff_g723_1_gen_dirac_train(impulse_r, pitch_lag);
797  }
798 
799  for (i = 0; i < SUBFRAME_LEN; i++)
800  temp_corr[i] = impulse_r[i] >> 1;
801 
802  /* Compute impulse response autocorrelation */
803  temp = ff_g723_1_dot_product(temp_corr, temp_corr, SUBFRAME_LEN);
804 
805  scale = ff_g723_1_normalize_bits(temp, 31);
806  impulse_corr[0] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
807 
808  for (i = 1; i < SUBFRAME_LEN; i++) {
809  temp = ff_g723_1_dot_product(temp_corr + i, temp_corr,
810  SUBFRAME_LEN - i);
811  impulse_corr[i] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
812  }
813 
814  /* Compute crosscorrelation of impulse response with residual signal */
815  scale -= 4;
816  for (i = 0; i < SUBFRAME_LEN; i++) {
817  temp = ff_g723_1_dot_product(buf + i, impulse_r, SUBFRAME_LEN - i);
818  if (scale < 0)
819  ccr1[i] = temp >> -scale;
820  else
821  ccr1[i] = av_clipl_int32(temp << scale);
822  }
823 
824  /* Search loop */
825  for (i = 0; i < GRID_SIZE; i++) {
826  /* Maximize the crosscorrelation */
827  max = 0;
828  for (j = i; j < SUBFRAME_LEN; j += GRID_SIZE) {
829  temp = FFABS(ccr1[j]);
830  if (temp >= max) {
831  max = temp;
832  param.pulse_pos[0] = j;
833  }
834  }
835 
836  /* Quantize the gain (max crosscorrelation/impulse_corr[0]) */
837  amp = max;
838  min = 1 << 30;
839  max_amp_index = GAIN_LEVELS - 2;
840  for (j = max_amp_index; j >= 2; j--) {
841  temp = av_clipl_int32((int64_t) fixed_cb_gain[j] *
842  impulse_corr[0] << 1);
843  temp = FFABS(temp - amp);
844  if (temp < min) {
845  min = temp;
846  max_amp_index = j;
847  }
848  }
849 
850  max_amp_index--;
851  /* Select additional gain values */
852  for (j = 1; j < 5; j++) {
853  for (k = i; k < SUBFRAME_LEN; k += GRID_SIZE) {
854  temp_corr[k] = 0;
855  ccr2[k] = ccr1[k];
856  }
857  param.amp_index = max_amp_index + j - 2;
858  amp = fixed_cb_gain[param.amp_index];
859 
860  param.pulse_sign[0] = (ccr2[param.pulse_pos[0]] < 0) ? -amp : amp;
861  temp_corr[param.pulse_pos[0]] = 1;
862 
863  for (k = 1; k < pulse_cnt; k++) {
864  max = INT_MIN;
865  for (l = i; l < SUBFRAME_LEN; l += GRID_SIZE) {
866  if (temp_corr[l])
867  continue;
868  temp = impulse_corr[FFABS(l - param.pulse_pos[k - 1])];
869  temp = av_clipl_int32((int64_t) temp *
870  param.pulse_sign[k - 1] << 1);
871  ccr2[l] -= temp;
872  temp = FFABS(ccr2[l]);
873  if (temp > max) {
874  max = temp;
875  param.pulse_pos[k] = l;
876  }
877  }
878 
879  param.pulse_sign[k] = (ccr2[param.pulse_pos[k]] < 0) ?
880  -amp : amp;
881  temp_corr[param.pulse_pos[k]] = 1;
882  }
883 
884  /* Create the error vector */
885  memset(temp_corr, 0, sizeof(int16_t) * SUBFRAME_LEN);
886 
887  for (k = 0; k < pulse_cnt; k++)
888  temp_corr[param.pulse_pos[k]] = param.pulse_sign[k];
889 
890  for (k = SUBFRAME_LEN - 1; k >= 0; k--) {
891  temp = 0;
892  for (l = 0; l <= k; l++) {
893  int prod = av_clipl_int32((int64_t) temp_corr[l] *
894  impulse_r[k - l] << 1);
895  temp = av_clipl_int32(temp + prod);
896  }
897  temp_corr[k] = temp << 2 >> 16;
898  }
899 
900  /* Compute square of error */
901  err = 0;
902  for (k = 0; k < SUBFRAME_LEN; k++) {
903  int64_t prod;
904  prod = av_clipl_int32((int64_t) buf[k] * temp_corr[k] << 1);
905  err = av_clipl_int32(err - prod);
906  prod = av_clipl_int32((int64_t) temp_corr[k] * temp_corr[k]);
907  err = av_clipl_int32(err + prod);
908  }
909 
910  /* Minimize */
911  if (err < optim->min_err) {
912  optim->min_err = err;
913  optim->grid_index = i;
914  optim->amp_index = param.amp_index;
915  optim->dirac_train = param.dirac_train;
916 
917  for (k = 0; k < pulse_cnt; k++) {
918  optim->pulse_sign[k] = param.pulse_sign[k];
919  optim->pulse_pos[k] = param.pulse_pos[k];
920  }
921  }
922  }
923  }
924 }
925 
926 /**
927  * Encode the pulse position and gain of the current subframe.
928  *
929  * @param optim optimized fixed CB parameters
930  * @param buf excitation vector
931  */
932 static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim,
933  int16_t *buf, int pulse_cnt)
934 {
935  int i, j;
936 
937  j = PULSE_MAX - pulse_cnt;
938 
939  subfrm->pulse_sign = 0;
940  subfrm->pulse_pos = 0;
941 
942  for (i = 0; i < SUBFRAME_LEN >> 1; i++) {
943  int val = buf[optim->grid_index + (i << 1)];
944  if (!val) {
945  subfrm->pulse_pos += combinatorial_table[j][i];
946  } else {
947  subfrm->pulse_sign <<= 1;
948  if (val < 0)
949  subfrm->pulse_sign++;
950  j++;
951 
952  if (j == PULSE_MAX)
953  break;
954  }
955  }
956  subfrm->amp_index = optim->amp_index;
957  subfrm->grid_index = optim->grid_index;
958  subfrm->dirac_train = optim->dirac_train;
959 }
960 
961 /**
962  * Compute the fixed codebook excitation.
963  *
964  * @param buf target vector
965  * @param impulse_resp impulse response of the combined filter
966  */
967 static void fcb_search(G723_1_ChannelContext *p, int16_t *impulse_resp,
968  int16_t *buf, int index)
969 {
970  FCBParam optim;
971  int pulse_cnt = pulses[index];
972  int i;
973 
974  optim.min_err = 1 << 30;
975  get_fcb_param(&optim, impulse_resp, buf, pulse_cnt, SUBFRAME_LEN);
976 
977  if (p->pitch_lag[index >> 1] < SUBFRAME_LEN - 2) {
978  get_fcb_param(&optim, impulse_resp, buf, pulse_cnt,
979  p->pitch_lag[index >> 1]);
980  }
981 
982  /* Reconstruct the excitation */
983  memset(buf, 0, sizeof(int16_t) * SUBFRAME_LEN);
984  for (i = 0; i < pulse_cnt; i++)
985  buf[optim.pulse_pos[i]] = optim.pulse_sign[i];
986 
987  pack_fcb_param(&p->subframe[index], &optim, buf, pulse_cnt);
988 
989  if (optim.dirac_train)
991 }
992 
993 /**
994  * Pack the frame parameters into output bitstream.
995  *
996  * @param frame output buffer
997  * @param size size of the buffer
998  */
1000 {
1001  PutBitContext pb;
1002  int info_bits = 0;
1003  int i, temp;
1004 
1005  init_put_bits(&pb, avpkt->data, avpkt->size);
1006 
1007  put_bits(&pb, 2, info_bits);
1008 
1009  put_bits(&pb, 8, p->lsp_index[2]);
1010  put_bits(&pb, 8, p->lsp_index[1]);
1011  put_bits(&pb, 8, p->lsp_index[0]);
1012 
1013  put_bits(&pb, 7, p->pitch_lag[0] - PITCH_MIN);
1014  put_bits(&pb, 2, p->subframe[1].ad_cb_lag);
1015  put_bits(&pb, 7, p->pitch_lag[1] - PITCH_MIN);
1016  put_bits(&pb, 2, p->subframe[3].ad_cb_lag);
1017 
1018  /* Write 12 bit combined gain */
1019  for (i = 0; i < SUBFRAMES; i++) {
1021  p->subframe[i].amp_index;
1022  if (p->cur_rate == RATE_6300)
1023  temp += p->subframe[i].dirac_train << 11;
1024  put_bits(&pb, 12, temp);
1025  }
1026 
1027  put_bits(&pb, 1, p->subframe[0].grid_index);
1028  put_bits(&pb, 1, p->subframe[1].grid_index);
1029  put_bits(&pb, 1, p->subframe[2].grid_index);
1030  put_bits(&pb, 1, p->subframe[3].grid_index);
1031 
1032  if (p->cur_rate == RATE_6300) {
1033  skip_put_bits(&pb, 1); /* reserved bit */
1034 
1035  /* Write 13 bit combined position index */
1036  temp = (p->subframe[0].pulse_pos >> 16) * 810 +
1037  (p->subframe[1].pulse_pos >> 14) * 90 +
1038  (p->subframe[2].pulse_pos >> 16) * 9 +
1039  (p->subframe[3].pulse_pos >> 14);
1040  put_bits(&pb, 13, temp);
1041 
1042  put_bits(&pb, 16, p->subframe[0].pulse_pos & 0xffff);
1043  put_bits(&pb, 14, p->subframe[1].pulse_pos & 0x3fff);
1044  put_bits(&pb, 16, p->subframe[2].pulse_pos & 0xffff);
1045  put_bits(&pb, 14, p->subframe[3].pulse_pos & 0x3fff);
1046 
1047  put_bits(&pb, 6, p->subframe[0].pulse_sign);
1048  put_bits(&pb, 5, p->subframe[1].pulse_sign);
1049  put_bits(&pb, 6, p->subframe[2].pulse_sign);
1050  put_bits(&pb, 5, p->subframe[3].pulse_sign);
1051  }
1052 
1053  flush_put_bits(&pb);
1054  return frame_size[info_bits];
1055 }
1056 
1057 static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
1058  const AVFrame *frame, int *got_packet_ptr)
1059 {
1060  G723_1_Context *s = avctx->priv_data;
1061  G723_1_ChannelContext *p = &s->ch[0];
1062  int16_t unq_lpc[LPC_ORDER * SUBFRAMES];
1063  int16_t qnt_lpc[LPC_ORDER * SUBFRAMES];
1064  int16_t cur_lsp[LPC_ORDER];
1065  int16_t weighted_lpc[LPC_ORDER * SUBFRAMES << 1];
1066  int16_t vector[FRAME_LEN + PITCH_MAX];
1067  int offset, ret, i, j;
1068  int16_t *in, *start;
1069  HFParam hf[4];
1070 
1071  /* duplicate input */
1072  start = in = av_malloc(frame->nb_samples * sizeof(int16_t));
1073  if (!in)
1074  return AVERROR(ENOMEM);
1075  memcpy(in, frame->data[0], frame->nb_samples * sizeof(int16_t));
1076 
1078 
1079  memcpy(vector, p->prev_data, HALF_FRAME_LEN * sizeof(int16_t));
1080  memcpy(vector + HALF_FRAME_LEN, in, FRAME_LEN * sizeof(int16_t));
1081 
1082  comp_lpc_coeff(vector, unq_lpc);
1083  lpc2lsp(&unq_lpc[LPC_ORDER * 3], p->prev_lsp, cur_lsp);
1084  lsp_quantize(p->lsp_index, cur_lsp, p->prev_lsp);
1085 
1086  /* Update memory */
1087  memcpy(vector + LPC_ORDER, p->prev_data + SUBFRAME_LEN,
1088  sizeof(int16_t) * SUBFRAME_LEN);
1089  memcpy(vector + LPC_ORDER + SUBFRAME_LEN, in,
1090  sizeof(int16_t) * (HALF_FRAME_LEN + SUBFRAME_LEN));
1091  memcpy(p->prev_data, in + HALF_FRAME_LEN,
1092  sizeof(int16_t) * HALF_FRAME_LEN);
1093  memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
1094 
1095  perceptual_filter(p, weighted_lpc, unq_lpc, vector);
1096 
1097  memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
1098  memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
1099  memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
1100 
1101  ff_g723_1_scale_vector(vector, vector, FRAME_LEN + PITCH_MAX);
1102 
1103  p->pitch_lag[0] = estimate_pitch(vector, PITCH_MAX);
1104  p->pitch_lag[1] = estimate_pitch(vector, PITCH_MAX + HALF_FRAME_LEN);
1105 
1106  for (i = PITCH_MAX, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
1107  comp_harmonic_coeff(vector + i, p->pitch_lag[j >> 1], hf + j);
1108 
1109  memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
1110  memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
1111  memcpy(p->prev_weight_sig, vector + FRAME_LEN, sizeof(int16_t) * PITCH_MAX);
1112 
1113  for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
1114  harmonic_filter(hf + j, vector + PITCH_MAX + i, in + i);
1115 
1116  ff_g723_1_inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, 0);
1117  ff_g723_1_lsp_interpolate(qnt_lpc, cur_lsp, p->prev_lsp);
1118 
1119  memcpy(p->prev_lsp, cur_lsp, sizeof(int16_t) * LPC_ORDER);
1120 
1121  offset = 0;
1122  for (i = 0; i < SUBFRAMES; i++) {
1123  int16_t impulse_resp[SUBFRAME_LEN];
1124  int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
1125  int16_t flt_in[SUBFRAME_LEN];
1126  int16_t zero[LPC_ORDER], fir[LPC_ORDER], iir[LPC_ORDER];
1127 
1128  /**
1129  * Compute the combined impulse response of the synthesis filter,
1130  * formant perceptual weighting filter and harmonic noise shaping filter
1131  */
1132  memset(zero, 0, sizeof(int16_t) * LPC_ORDER);
1133  memset(vector, 0, sizeof(int16_t) * PITCH_MAX);
1134  memset(flt_in, 0, sizeof(int16_t) * SUBFRAME_LEN);
1135 
1136  flt_in[0] = 1 << 13; /* Unit impulse */
1137  synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
1138  zero, zero, flt_in, vector + PITCH_MAX, 1);
1139  harmonic_filter(hf + i, vector + PITCH_MAX, impulse_resp);
1140 
1141  /* Compute the combined zero input response */
1142  flt_in[0] = 0;
1143  memcpy(fir, p->perf_fir_mem, sizeof(int16_t) * LPC_ORDER);
1144  memcpy(iir, p->perf_iir_mem, sizeof(int16_t) * LPC_ORDER);
1145 
1146  synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
1147  fir, iir, flt_in, vector + PITCH_MAX, 0);
1148  memcpy(vector, p->harmonic_mem, sizeof(int16_t) * PITCH_MAX);
1149  harmonic_noise_sub(hf + i, vector + PITCH_MAX, in);
1150 
1151  acb_search(p, residual, impulse_resp, in, i);
1153  p->pitch_lag[i >> 1], &p->subframe[i],
1154  p->cur_rate);
1155  sub_acb_contrib(residual, impulse_resp, in);
1156 
1157  fcb_search(p, impulse_resp, in, i);
1158 
1159  /* Reconstruct the excitation */
1161  p->pitch_lag[i >> 1], &p->subframe[i],
1162  RATE_6300);
1163 
1164  memmove(p->prev_excitation, p->prev_excitation + SUBFRAME_LEN,
1165  sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
1166  for (j = 0; j < SUBFRAME_LEN; j++)
1167  in[j] = av_clip_int16((in[j] << 1) + impulse_resp[j]);
1168  memcpy(p->prev_excitation + PITCH_MAX - SUBFRAME_LEN, in,
1169  sizeof(int16_t) * SUBFRAME_LEN);
1170 
1171  /* Update filter memories */
1172  synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
1173  p->perf_fir_mem, p->perf_iir_mem,
1174  in, vector + PITCH_MAX, 0);
1175  memmove(p->harmonic_mem, p->harmonic_mem + SUBFRAME_LEN,
1176  sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
1177  memcpy(p->harmonic_mem + PITCH_MAX - SUBFRAME_LEN, vector + PITCH_MAX,
1178  sizeof(int16_t) * SUBFRAME_LEN);
1179 
1180  in += SUBFRAME_LEN;
1181  offset += LPC_ORDER;
1182  }
1183 
1184  av_free(start);
1185 
1186  if ((ret = ff_alloc_packet2(avctx, avpkt, 24, 0)) < 0)
1187  return ret;
1188 
1189  *got_packet_ptr = 1;
1190  avpkt->size = pack_bitstream(p, avpkt);
1191  return 0;
1192 }
1193 
1194 static const AVCodecDefault defaults[] = {
1195  { "b", "6300" },
1196  { NULL },
1197 };
1198 
1200  .name = "g723_1",
1201  .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
1202  .type = AVMEDIA_TYPE_AUDIO,
1203  .id = AV_CODEC_ID_G723_1,
1204  .priv_data_size = sizeof(G723_1_Context),
1206  .encode2 = g723_1_encode_frame,
1207  .defaults = defaults,
1208  .sample_fmts = (const enum AVSampleFormat[]) {
1210  },
1211 };
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2245
AVCodec
AVCodec.
Definition: avcodec.h:3481
g723_1_encode_init
static av_cold int g723_1_encode_init(AVCodecContext *avctx)
Definition: g723_1enc.c:43
G723_1_ChannelContext::prev_data
int16_t prev_data[HALF_FRAME_LEN]
Definition: g723_1.h:148
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
G723_1_Subframe::ad_cb_gain
int ad_cb_gain
Definition: g723_1.h:82
FRAME_LEN
#define FRAME_LEN
Definition: g723_1.h:37
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:2225
n
int n
Definition: avisynth_c.h:760
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
G723_1_ChannelContext::pitch_lag
int pitch_lag[2]
Definition: g723_1.h:125
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
count
void INT64 INT64 count
Definition: avisynth_c.h:767
FCBParam::amp_index
int amp_index
Definition: g723_1.h:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
G723_1_Subframe::pulse_sign
int pulse_sign
Definition: g723_1.h:84
binomial_window
static const int16_t binomial_window[LPC_ORDER]
Binomial window coefficients scaled by 2^15.
Definition: g723_1.h:1417
G723_1_Subframe::ad_cb_lag
int ad_cb_lag
adaptive codebook lag
Definition: g723_1.h:81
G723_1_Context
Definition: g723_1.h:159
max
#define max(a, b)
Definition: cuda_runtime.h:33
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
get_index
#define get_index(num, offset, size)
Quantize the current LSP subvector.
Definition: g723_1enc.c:293
ff_g723_1_inverse_quant
void ff_g723_1_inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp, uint8_t *lsp_index, int bad_frame)
Perform inverse quantization of LSP frequencies.
Definition: g723_1.c:201
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
G723_1_ChannelContext::prev_excitation
int16_t prev_excitation[PITCH_MAX]
Definition: g723_1.h:130
harmonic_noise_sub
static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
Definition: g723_1enc.c:578
combinatorial_table
static const int32_t combinatorial_table[PULSE_MAX][SUBFRAME_LEN/GRID_SIZE]
Used for the coding/decoding of the pulses positions for the MP-MLQ codebook.
Definition: g723_1.h:630
G723_1_Subframe::pulse_pos
int pulse_pos
Definition: g723_1.h:87
ff_g723_1_normalize_bits
int ff_g723_1_normalize_bits(int num, int width)
Calculate the number of left-shifts required for normalizing the input.
Definition: g723_1.c:49
adaptive_cb_gain85
static const int16_t adaptive_cb_gain85[85 *20]
Definition: g723_1.h:736
perceptual_filter
static void perceptual_filter(G723_1_ChannelContext *p, int16_t *flt_coef, int16_t *unq_lpc, int16_t *buf)
Apply the formant perceptual weighting filter.
Definition: g723_1enc.c:390
PITCH_MIN
#define PITCH_MIN
Definition: g723_1.h:43
ff_g723_1_gen_dirac_train
void ff_g723_1_gen_dirac_train(int16_t *buf, int pitch_lag)
Generate a train of dirac functions with period as pitch lag.
Definition: g723_1.c:74
start
void INT64 start
Definition: avisynth_c.h:767
percept_flt_tbl
static const int16_t percept_flt_tbl[2][LPC_ORDER]
0.5^i scaled by 2^15
Definition: g723_1.h:1431
G723_1_ChannelContext::hpf_fir_mem
int16_t hpf_fir_mem
highpass filter fir
Definition: g723_1.h:151
G723_1_ChannelContext::prev_weight_sig
int16_t prev_weight_sig[PITCH_MAX]
Definition: g723_1.h:149
FCBParam::grid_index
int grid_index
Definition: g723_1.h:113
GRID_SIZE
#define GRID_SIZE
Definition: g723_1.h:46
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:77
PITCH_ORDER
#define PITCH_ORDER
Definition: g723_1.h:45
src
#define src
Definition: vp8dsp.c:254
G723_1_ChannelContext::cur_rate
enum Rate cur_rate
Definition: g723_1.h:123
adaptive_cb_gain170
static const int16_t adaptive_cb_gain170[170 *20]
Definition: g723_1.h:952
comp_harmonic_coeff
static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
Compute harmonic noise filter parameters.
Definition: g723_1enc.c:495
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
HFParam
Harmonic filter parameters.
Definition: g723_1.h:102
ff_g723_1_gen_acb_excitation
void ff_g723_1_gen_acb_excitation(int16_t *vector, int16_t *prev_excitation, int pitch_lag, G723_1_Subframe *subfrm, enum Rate cur_rate)
Generate adaptive codebook excitation.
Definition: g723_1.c:86
G723_1_ChannelContext::perf_fir_mem
int16_t perf_fir_mem[LPC_ORDER]
perceptual filter fir
Definition: g723_1.h:153
s
#define s(width, name)
Definition: cbs_vp9.c:257
frame_size
int frame_size
Definition: mxfenc.c:2215
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
pack_bitstream
static int pack_bitstream(G723_1_ChannelContext *p, AVPacket *avpkt)
Pack the frame parameters into output bitstream.
Definition: g723_1enc.c:999
skip_put_bits
static void skip_put_bits(PutBitContext *s, int n)
Skip the given number of bits.
Definition: put_bits.h:346
G723_1_ChannelContext::fir_mem
int16_t fir_mem[LPC_ORDER]
Definition: g723_1.h:133
FCBParam::dirac_train
int dirac_train
Definition: g723_1.h:114
comp_lpc_coeff
static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
Calculate LPC coefficients for the current frame.
Definition: g723_1enc.c:183
ff_g723_1_encoder
AVCodec ff_g723_1_encoder
Definition: g723_1enc.c:1199
FCBParam
Optimized fixed codebook excitation parameters.
Definition: g723_1.h:110
f
#define f(width, name)
Definition: cbs_vp9.c:255
fcb_search
static void fcb_search(G723_1_ChannelContext *p, int16_t *impulse_resp, int16_t *buf, int index)
Compute the fixed codebook excitation.
Definition: g723_1enc.c:967
PutBitContext
Definition: put_bits.h:35
LPC_ORDER
#define LPC_ORDER
Definition: g723_1.h:40
acb_search
static void acb_search(G723_1_ChannelContext *p, int16_t *residual, int16_t *impulse_resp, const int16_t *buf, int index)
Compute the adaptive codebook contribution.
Definition: g723_1enc.c:639
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AVCodecDefault
Definition: internal.h:231
FCBParam::pulse_pos
int pulse_pos[PULSE_MAX]
Definition: g723_1.h:115
NULL
#define NULL
Definition: coverity.c:32
ff_g723_1_lsp_interpolate
void ff_g723_1_lsp_interpolate(int16_t *lpc, int16_t *cur_lsp, int16_t *prev_lsp)
Quantize LSP frequencies by interpolation and convert them to the corresponding LPC coefficients.
Definition: g723_1.c:180
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
LPC_FRAME
#define LPC_FRAME
Definition: g723_1.h:39
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1615
AV_CODEC_ID_G723_1
@ AV_CODEC_ID_G723_1
Definition: avcodec.h:616
ff_g723_1_get_residual
void ff_g723_1_get_residual(int16_t *residual, int16_t *prev_excitation, int lag)
Get delayed contribution from the previous excitation vector.
Definition: g723_1.c:60
RATE_6300
@ RATE_6300
Definition: g723_1.h:73
lsp_quantize
static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
Vector quantize the LSP frequencies.
Definition: g723_1enc.c:319
ff_g723_1_dot_product
int ff_g723_1_dot_product(const int16_t *a, const int16_t *b, int length)
Definition: g723_1.c:54
G723_1_ChannelContext::subframe
G723_1_Subframe subframe[4]
Definition: g723_1.h:120
G723_1_Subframe::amp_index
int amp_index
Definition: g723_1.h:86
exp
int8_t exp
Definition: eval.c:72
FCBParam::min_err
int min_err
Definition: g723_1.h:111
index
int index
Definition: gxfenc.c:89
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1564
error
static void error(const char *err)
Definition: target_dec_fuzzer.c:61
SUBFRAMES
#define SUBFRAMES
Definition: dcaenc.c:50
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
GAIN_LEVELS
#define GAIN_LEVELS
Definition: g723_1.h:48
g723_1.h
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
FCBParam::pulse_sign
int pulse_sign[PULSE_MAX]
Definition: g723_1.h:116
G723_1_ChannelContext::lsp_index
uint8_t lsp_index[LSP_BANDS]
Definition: g723_1.h:124
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
celp_math.h
HALF_FRAME_LEN
#define HALF_FRAME_LEN
Definition: g723_1.h:38
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
SUBFRAME_LEN
#define SUBFRAME_LEN
Definition: g723_1.h:36
val
const char const char void * val
Definition: avisynth_c.h:863
levinson_durbin
static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
Use Levinson-Durbin recursion to compute LPC coefficients from autocorrelation values.
Definition: g723_1enc.c:139
G723_1_Subframe
G723.1 unpacked data subframe.
Definition: g723_1.h:80
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
residual
uint64_t residual
Definition: dirac_vlc.h:29
PITCH_MAX
#define PITCH_MAX
Definition: g723_1.h:44
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
fixed_cb_gain
static const int16_t fixed_cb_gain[GAIN_LEVELS]
Definition: g723_1.h:730
HFParam::index
int index
Definition: g723_1.h:103
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
G723_1_ChannelContext::perf_iir_mem
int16_t perf_iir_mem[LPC_ORDER]
and iir memories
Definition: g723_1.h:154
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
pack_fcb_param
static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim, int16_t *buf, int pulse_cnt)
Encode the pulse position and gain of the current subframe.
Definition: g723_1enc.c:932
comp_autocorr
static void comp_autocorr(int16_t *buf, int16_t *autocorr)
Estimate autocorrelation of the input vector.
Definition: g723_1enc.c:97
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
ff_dot_product
int64_t ff_dot_product(const int16_t *a, const int16_t *b, int length)
Calculate the dot product of 2 int16_t vectors.
Definition: celp_math.c:98
ff_g723_1_scale_vector
int ff_g723_1_scale_vector(int16_t *dst, const int16_t *vector, int length)
Scale vector contents based on the largest of their absolutes.
Definition: g723_1.c:32
common.h
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
g723_1_encode_frame
static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: g723_1enc.c:1057
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
HFParam::gain
int gain
Definition: g723_1.h:104
avcodec.h
hamming_window
static const int16_t hamming_window[LPC_FRAME]
Hamming window coefficients scaled by 2^15.
Definition: g723_1.h:1393
bandwidth_expand
static const int16_t bandwidth_expand[LPC_ORDER]
0.994^i scaled by 2^15
Definition: g723_1.h:1424
ret
ret
Definition: filter_design.txt:187
synth_percept_filter
static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc, int16_t *perf_fir, int16_t *perf_iir, const int16_t *src, int16_t *dest, int scale)
Combined synthesis and formant perceptual weighting filer.
Definition: g723_1enc.c:597
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
G723_1_ChannelContext::prev_lsp
int16_t prev_lsp[LPC_ORDER]
Definition: g723_1.h:128
highpass_filter
static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
Remove DC component from the input signal.
Definition: g723_1enc.c:81
dc_lsp
static const int16_t dc_lsp[LPC_ORDER]
LSP DC component.
Definition: g723_1.h:232
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
estimate_pitch
static int estimate_pitch(int16_t *buf, int start)
Estimate the open loop pitch period.
Definition: g723_1enc.c:422
channel_layout.h
iir_filter
static void iir_filter(int16_t *fir_coef, int16_t *iir_coef, int16_t *src, int16_t *dest)
Perform IIR filtering.
Definition: g723_1enc.c:367
sub_acb_contrib
static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp, int16_t *buf)
Subtract the adaptive codebook contribution from the input to obtain the residual.
Definition: g723_1enc.c:757
G723_1_ChannelContext::iir_mem
int iir_mem[LPC_ORDER]
Definition: g723_1.h:134
temp
else temp
Definition: vf_mcdeint.c:256
PULSE_MAX
#define PULSE_MAX
Definition: dss_sp.c:32
shift
static int shift(int a, int b)
Definition: sonic.c:82
zero
#define zero
Definition: regdef.h:64
mem.h
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
lpc2lsp
static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
Definition: g723_1enc.c:199
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
G723_1_ChannelContext::harmonic_mem
int16_t harmonic_mem[PITCH_MAX]
Definition: g723_1.h:156
G723_1_Subframe::grid_index
int grid_index
Definition: g723_1.h:85
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
MULL2
#define MULL2(a, b)
Bitexact implementation of 2ab scaled by 1/2^16.
Definition: g723_1.h:57
G723_1_Subframe::dirac_train
int dirac_train
Definition: g723_1.h:83
G723_1_ChannelContext::hpf_iir_mem
int hpf_iir_mem
and iir memories
Definition: g723_1.h:152
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
cos_tab
static float cos_tab[256]
Definition: dca_lbr.c:123
put_bits.h
pulses
static const int8_t pulses[4]
Number of non-zero pulses in the MP-MLQ excitation.
Definition: g723_1.h:723
G723_1_ChannelContext
Definition: g723_1.h:119
get_fcb_param
static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp, int16_t *buf, int pulse_cnt, int pitch_lag)
Quantize the residual signal using the fixed codebook (MP-MLQ).
Definition: g723_1enc.c:777
COS_TBL_SIZE
#define COS_TBL_SIZE
Definition: g723_1.h:49
defaults
static const AVCodecDefault defaults[]
Definition: g723_1enc.c:1194
harmonic_filter
static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
Apply the harmonic noise shaping filter.
Definition: g723_1enc.c:568
min
float min
Definition: vorbis_enc_data.h:456