FFmpeg
g723_1enc.c
Go to the documentation of this file.
1 /*
2  * G.723.1 compatible encoder
3  * Copyright (c) Mohamed Naufal <naufal22@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * G.723.1 compatible encoder
25  */
26 
27 #include <stdint.h>
28 #include <string.h>
29 
31 #include "libavutil/common.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 
35 #include "avcodec.h"
36 #include "celp_math.h"
37 #include "g723_1.h"
38 #include "internal.h"
39 
40 #define BITSTREAM_WRITER_LE
41 #include "put_bits.h"
42 
43 /**
44  * Hamming window coefficients scaled by 2^15
45  */
46 static const int16_t hamming_window[LPC_FRAME] = {
47  2621, 2631, 2659, 2705, 2770, 2853, 2955, 3074, 3212, 3367,
48  3541, 3731, 3939, 4164, 4405, 4663, 4937, 5226, 5531, 5851,
49  6186, 6534, 6897, 7273, 7661, 8062, 8475, 8899, 9334, 9780,
50  10235, 10699, 11172, 11653, 12141, 12636, 13138, 13645, 14157, 14673,
51  15193, 15716, 16242, 16769, 17298, 17827, 18356, 18884, 19411, 19935,
52  20457, 20975, 21489, 21999, 22503, 23002, 23494, 23978, 24455, 24924,
53  25384, 25834, 26274, 26704, 27122, 27529, 27924, 28306, 28675, 29031,
54  29373, 29700, 30012, 30310, 30592, 30857, 31107, 31340, 31557, 31756,
55  31938, 32102, 32249, 32377, 32488, 32580, 32654, 32710, 32747, 32766,
56  32766, 32747, 32710, 32654, 32580, 32488, 32377, 32249, 32102, 31938,
57  31756, 31557, 31340, 31107, 30857, 30592, 30310, 30012, 29700, 29373,
58  29031, 28675, 28306, 27924, 27529, 27122, 26704, 26274, 25834, 25384,
59  24924, 24455, 23978, 23494, 23002, 22503, 21999, 21489, 20975, 20457,
60  19935, 19411, 18884, 18356, 17827, 17298, 16769, 16242, 15716, 15193,
61  14673, 14157, 13645, 13138, 12636, 12141, 11653, 11172, 10699, 10235,
62  9780, 9334, 8899, 8475, 8062, 7661, 7273, 6897, 6534, 6186,
63  5851, 5531, 5226, 4937, 4663, 4405, 4164, 3939, 3731, 3541,
64  3367, 3212, 3074, 2955, 2853, 2770, 2705, 2659, 2631, 2621
65 };
66 
67 /**
68  * Binomial window coefficients scaled by 2^15
69  */
70 static const int16_t binomial_window[LPC_ORDER] = {
71  32749, 32695, 32604, 32477, 32315, 32118, 31887, 31622, 31324, 30995
72 };
73 
74 /**
75  * 0.994^i scaled by 2^15
76  */
77 static const int16_t bandwidth_expand[LPC_ORDER] = {
78  32571, 32376, 32182, 31989, 31797, 31606, 31416, 31228, 31040, 30854
79 };
80 
81 /**
82  * 0.5^i scaled by 2^15
83  */
84 static const int16_t percept_flt_tbl[2][LPC_ORDER] = {
85  /* Zero part */
86  {29491, 26542, 23888, 21499, 19349, 17414, 15673, 14106, 12695, 11425},
87  /* Pole part */
88  {16384, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32}
89 };
90 
92 {
93  G723_1_Context *s = avctx->priv_data;
94  G723_1_ChannelContext *p = &s->ch[0];
95 
96  if (avctx->sample_rate != 8000) {
97  av_log(avctx, AV_LOG_ERROR, "Only 8000Hz sample rate supported\n");
98  return AVERROR(EINVAL);
99  }
100 
101  if (avctx->channels != 1) {
102  av_log(avctx, AV_LOG_ERROR, "Only mono supported\n");
103  return AVERROR(EINVAL);
104  }
105 
106  if (avctx->bit_rate == 6300) {
107  p->cur_rate = RATE_6300;
108  } else if (avctx->bit_rate == 5300) {
109  av_log(avctx, AV_LOG_ERROR, "Use bitrate 6300 instead of 5300.\n");
110  avpriv_report_missing_feature(avctx, "Bitrate 5300");
111  return AVERROR_PATCHWELCOME;
112  } else {
113  av_log(avctx, AV_LOG_ERROR, "Bitrate not supported, use 6300\n");
114  return AVERROR(EINVAL);
115  }
116  avctx->frame_size = 240;
117  memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(int16_t));
118 
119  return 0;
120 }
121 
122 /**
123  * Remove DC component from the input signal.
124  *
125  * @param buf input signal
126  * @param fir zero memory
127  * @param iir pole memory
128  */
129 static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
130 {
131  int i;
132  for (i = 0; i < FRAME_LEN; i++) {
133  *iir = (buf[i] << 15) + ((-*fir) << 15) + MULL2(*iir, 0x7f00);
134  *fir = buf[i];
135  buf[i] = av_clipl_int32((int64_t)*iir + (1 << 15)) >> 16;
136  }
137 }
138 
139 /**
140  * Estimate autocorrelation of the input vector.
141  *
142  * @param buf input buffer
143  * @param autocorr autocorrelation coefficients vector
144  */
145 static void comp_autocorr(int16_t *buf, int16_t *autocorr)
146 {
147  int i, scale, temp;
148  int16_t vector[LPC_FRAME];
149 
150  ff_g723_1_scale_vector(vector, buf, LPC_FRAME);
151 
152  /* Apply the Hamming window */
153  for (i = 0; i < LPC_FRAME; i++)
154  vector[i] = (vector[i] * hamming_window[i] + (1 << 14)) >> 15;
155 
156  /* Compute the first autocorrelation coefficient */
157  temp = ff_dot_product(vector, vector, LPC_FRAME);
158 
159  /* Apply a white noise correlation factor of (1025/1024) */
160  temp += temp >> 10;
161 
162  /* Normalize */
163  scale = ff_g723_1_normalize_bits(temp, 31);
164  autocorr[0] = av_clipl_int32((int64_t) (temp << scale) +
165  (1 << 15)) >> 16;
166 
167  /* Compute the remaining coefficients */
168  if (!autocorr[0]) {
169  memset(autocorr + 1, 0, LPC_ORDER * sizeof(int16_t));
170  } else {
171  for (i = 1; i <= LPC_ORDER; i++) {
172  temp = ff_dot_product(vector, vector + i, LPC_FRAME - i);
173  temp = MULL2((temp << scale), binomial_window[i - 1]);
174  autocorr[i] = av_clipl_int32((int64_t) temp + (1 << 15)) >> 16;
175  }
176  }
177 }
178 
179 /**
180  * Use Levinson-Durbin recursion to compute LPC coefficients from
181  * autocorrelation values.
182  *
183  * @param lpc LPC coefficients vector
184  * @param autocorr autocorrelation coefficients vector
185  * @param error prediction error
186  */
187 static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
188 {
189  int16_t vector[LPC_ORDER];
190  int16_t partial_corr;
191  int i, j, temp;
192 
193  memset(lpc, 0, LPC_ORDER * sizeof(int16_t));
194 
195  for (i = 0; i < LPC_ORDER; i++) {
196  /* Compute the partial correlation coefficient */
197  temp = 0;
198  for (j = 0; j < i; j++)
199  temp -= lpc[j] * autocorr[i - j - 1];
200  temp = ((autocorr[i] << 13) + temp) << 3;
201 
202  if (FFABS(temp) >= (error << 16))
203  break;
204 
205  partial_corr = temp / (error << 1);
206 
207  lpc[i] = av_clipl_int32((int64_t) (partial_corr << 14) +
208  (1 << 15)) >> 16;
209 
210  /* Update the prediction error */
211  temp = MULL2(temp, partial_corr);
212  error = av_clipl_int32((int64_t) (error << 16) - temp +
213  (1 << 15)) >> 16;
214 
215  memcpy(vector, lpc, i * sizeof(int16_t));
216  for (j = 0; j < i; j++) {
217  temp = partial_corr * vector[i - j - 1] << 1;
218  lpc[j] = av_clipl_int32((int64_t) (lpc[j] << 16) - temp +
219  (1 << 15)) >> 16;
220  }
221  }
222 }
223 
224 /**
225  * Calculate LPC coefficients for the current frame.
226  *
227  * @param buf current frame
228  * @param prev_data 2 trailing subframes of the previous frame
229  * @param lpc LPC coefficients vector
230  */
231 static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
232 {
233  int16_t autocorr[(LPC_ORDER + 1) * SUBFRAMES];
234  int16_t *autocorr_ptr = autocorr;
235  int16_t *lpc_ptr = lpc;
236  int i, j;
237 
238  for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
239  comp_autocorr(buf + i, autocorr_ptr);
240  levinson_durbin(lpc_ptr, autocorr_ptr + 1, autocorr_ptr[0]);
241 
242  lpc_ptr += LPC_ORDER;
243  autocorr_ptr += LPC_ORDER + 1;
244  }
245 }
246 
247 static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
248 {
249  int f[LPC_ORDER + 2]; ///< coefficients of the sum and difference
250  ///< polynomials (F1, F2) ordered as
251  ///< f1[0], f2[0], ...., f1[5], f2[5]
252 
253  int max, shift, cur_val, prev_val, count, p;
254  int i, j;
255  int64_t temp;
256 
257  /* Initialize f1[0] and f2[0] to 1 in Q25 */
258  for (i = 0; i < LPC_ORDER; i++)
259  lsp[i] = (lpc[i] * bandwidth_expand[i] + (1 << 14)) >> 15;
260 
261  /* Apply bandwidth expansion on the LPC coefficients */
262  f[0] = f[1] = 1 << 25;
263 
264  /* Compute the remaining coefficients */
265  for (i = 0; i < LPC_ORDER / 2; i++) {
266  /* f1 */
267  f[2 * i + 2] = -f[2 * i] - ((lsp[i] + lsp[LPC_ORDER - 1 - i]) << 12);
268  /* f2 */
269  f[2 * i + 3] = f[2 * i + 1] - ((lsp[i] - lsp[LPC_ORDER - 1 - i]) << 12);
270  }
271 
272  /* Divide f1[5] and f2[5] by 2 for use in polynomial evaluation */
273  f[LPC_ORDER] >>= 1;
274  f[LPC_ORDER + 1] >>= 1;
275 
276  /* Normalize and shorten */
277  max = FFABS(f[0]);
278  for (i = 1; i < LPC_ORDER + 2; i++)
279  max = FFMAX(max, FFABS(f[i]));
280 
282 
283  for (i = 0; i < LPC_ORDER + 2; i++)
284  f[i] = av_clipl_int32((int64_t) (f[i] << shift) + (1 << 15)) >> 16;
285 
286  /**
287  * Evaluate F1 and F2 at uniform intervals of pi/256 along the
288  * unit circle and check for zero crossings.
289  */
290  p = 0;
291  temp = 0;
292  for (i = 0; i <= LPC_ORDER / 2; i++)
294  prev_val = av_clipl_int32(temp << 1);
295  count = 0;
296  for (i = 1; i < COS_TBL_SIZE / 2; i++) {
297  /* Evaluate */
298  temp = 0;
299  for (j = 0; j <= LPC_ORDER / 2; j++)
300  temp += f[LPC_ORDER - 2 * j + p] * ff_g723_1_cos_tab[i * j % COS_TBL_SIZE];
301  cur_val = av_clipl_int32(temp << 1);
302 
303  /* Check for sign change, indicating a zero crossing */
304  if ((cur_val ^ prev_val) < 0) {
305  int abs_cur = FFABS(cur_val);
306  int abs_prev = FFABS(prev_val);
307  int sum = abs_cur + abs_prev;
308 
309  shift = ff_g723_1_normalize_bits(sum, 31);
310  sum <<= shift;
311  abs_prev = abs_prev << shift >> 8;
312  lsp[count++] = ((i - 1) << 7) + (abs_prev >> 1) / (sum >> 16);
313 
314  if (count == LPC_ORDER)
315  break;
316 
317  /* Switch between sum and difference polynomials */
318  p ^= 1;
319 
320  /* Evaluate */
321  temp = 0;
322  for (j = 0; j <= LPC_ORDER / 2; j++)
323  temp += f[LPC_ORDER - 2 * j + p] *
325  cur_val = av_clipl_int32(temp << 1);
326  }
327  prev_val = cur_val;
328  }
329 
330  if (count != LPC_ORDER)
331  memcpy(lsp, prev_lsp, LPC_ORDER * sizeof(int16_t));
332 }
333 
334 /**
335  * Quantize the current LSP subvector.
336  *
337  * @param num band number
338  * @param offset offset of the current subvector in an LPC_ORDER vector
339  * @param size size of the current subvector
340  */
341 #define get_index(num, offset, size) \
342 { \
343  int error, max = -1; \
344  int16_t temp[4]; \
345  int i, j; \
346  \
347  for (i = 0; i < LSP_CB_SIZE; i++) { \
348  for (j = 0; j < size; j++){ \
349  temp[j] = (weight[j + (offset)] * ff_g723_1_lsp_band##num[i][j] + \
350  (1 << 14)) >> 15; \
351  } \
352  error = ff_g723_1_dot_product(lsp + (offset), temp, size) << 1; \
353  error -= ff_g723_1_dot_product(ff_g723_1_lsp_band##num[i], temp, size); \
354  if (error > max) { \
355  max = error; \
356  lsp_index[num] = i; \
357  } \
358  } \
359 }
360 
361 /**
362  * Vector quantize the LSP frequencies.
363  *
364  * @param lsp the current lsp vector
365  * @param prev_lsp the previous lsp vector
366  */
367 static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
368 {
369  int16_t weight[LPC_ORDER];
370  int16_t min, max;
371  int shift, i;
372 
373  /* Calculate the VQ weighting vector */
374  weight[0] = (1 << 20) / (lsp[1] - lsp[0]);
375  weight[LPC_ORDER - 1] = (1 << 20) /
376  (lsp[LPC_ORDER - 1] - lsp[LPC_ORDER - 2]);
377 
378  for (i = 1; i < LPC_ORDER - 1; i++) {
379  min = FFMIN(lsp[i] - lsp[i - 1], lsp[i + 1] - lsp[i]);
380  if (min > 0x20)
381  weight[i] = (1 << 20) / min;
382  else
383  weight[i] = INT16_MAX;
384  }
385 
386  /* Normalize */
387  max = 0;
388  for (i = 0; i < LPC_ORDER; i++)
389  max = FFMAX(weight[i], max);
390 
392  for (i = 0; i < LPC_ORDER; i++) {
393  weight[i] <<= shift;
394  }
395 
396  /* Compute the VQ target vector */
397  for (i = 0; i < LPC_ORDER; i++) {
398  lsp[i] -= dc_lsp[i] +
399  (((prev_lsp[i] - dc_lsp[i]) * 12288 + (1 << 14)) >> 15);
400  }
401 
402  get_index(0, 0, 3);
403  get_index(1, 3, 3);
404  get_index(2, 6, 4);
405 }
406 
407 /**
408  * Perform IIR filtering.
409  *
410  * @param fir_coef FIR coefficients
411  * @param iir_coef IIR coefficients
412  * @param src source vector
413  * @param dest destination vector
414  */
415 static void iir_filter(int16_t *fir_coef, int16_t *iir_coef,
416  int16_t *src, int16_t *dest)
417 {
418  int m, n;
419 
420  for (m = 0; m < SUBFRAME_LEN; m++) {
421  int64_t filter = 0;
422  for (n = 1; n <= LPC_ORDER; n++) {
423  filter -= fir_coef[n - 1] * src[m - n] -
424  iir_coef[n - 1] * dest[m - n];
425  }
426 
427  dest[m] = av_clipl_int32((src[m] << 16) + (filter << 3) +
428  (1 << 15)) >> 16;
429  }
430 }
431 
432 /**
433  * Apply the formant perceptual weighting filter.
434  *
435  * @param flt_coef filter coefficients
436  * @param unq_lpc unquantized lpc vector
437  */
438 static void perceptual_filter(G723_1_ChannelContext *p, int16_t *flt_coef,
439  int16_t *unq_lpc, int16_t *buf)
440 {
441  int16_t vector[FRAME_LEN + LPC_ORDER];
442  int i, j, k, l = 0;
443 
444  memcpy(buf, p->iir_mem, sizeof(int16_t) * LPC_ORDER);
445  memcpy(vector, p->fir_mem, sizeof(int16_t) * LPC_ORDER);
446  memcpy(vector + LPC_ORDER, buf + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
447 
448  for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
449  for (k = 0; k < LPC_ORDER; k++) {
450  flt_coef[k + 2 * l] = (unq_lpc[k + l] * percept_flt_tbl[0][k] +
451  (1 << 14)) >> 15;
452  flt_coef[k + 2 * l + LPC_ORDER] = (unq_lpc[k + l] *
453  percept_flt_tbl[1][k] +
454  (1 << 14)) >> 15;
455  }
456  iir_filter(flt_coef + 2 * l, flt_coef + 2 * l + LPC_ORDER,
457  vector + i, buf + i);
458  l += LPC_ORDER;
459  }
460  memcpy(p->iir_mem, buf + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
461  memcpy(p->fir_mem, vector + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
462 }
463 
464 /**
465  * Estimate the open loop pitch period.
466  *
467  * @param buf perceptually weighted speech
468  * @param start estimation is carried out from this position
469  */
470 static int estimate_pitch(int16_t *buf, int start)
471 {
472  int max_exp = 32;
473  int max_ccr = 0x4000;
474  int max_eng = 0x7fff;
475  int index = PITCH_MIN;
476  int offset = start - PITCH_MIN + 1;
477 
478  int ccr, eng, orig_eng, ccr_eng, exp;
479  int diff, temp;
480 
481  int i;
482 
483  orig_eng = ff_dot_product(buf + offset, buf + offset, HALF_FRAME_LEN);
484 
485  for (i = PITCH_MIN; i <= PITCH_MAX - 3; i++) {
486  offset--;
487 
488  /* Update energy and compute correlation */
489  orig_eng += buf[offset] * buf[offset] -
490  buf[offset + HALF_FRAME_LEN] * buf[offset + HALF_FRAME_LEN];
491  ccr = ff_dot_product(buf + start, buf + offset, HALF_FRAME_LEN);
492  if (ccr <= 0)
493  continue;
494 
495  /* Split into mantissa and exponent to maintain precision */
496  exp = ff_g723_1_normalize_bits(ccr, 31);
497  ccr = av_clipl_int32((int64_t) (ccr << exp) + (1 << 15)) >> 16;
498  exp <<= 1;
499  ccr *= ccr;
500  temp = ff_g723_1_normalize_bits(ccr, 31);
501  ccr = ccr << temp >> 16;
502  exp += temp;
503 
504  temp = ff_g723_1_normalize_bits(orig_eng, 31);
505  eng = av_clipl_int32((int64_t) (orig_eng << temp) + (1 << 15)) >> 16;
506  exp -= temp;
507 
508  if (ccr >= eng) {
509  exp--;
510  ccr >>= 1;
511  }
512  if (exp > max_exp)
513  continue;
514 
515  if (exp + 1 < max_exp)
516  goto update;
517 
518  /* Equalize exponents before comparison */
519  if (exp + 1 == max_exp)
520  temp = max_ccr >> 1;
521  else
522  temp = max_ccr;
523  ccr_eng = ccr * max_eng;
524  diff = ccr_eng - eng * temp;
525  if (diff > 0 && (i - index < PITCH_MIN || diff > ccr_eng >> 2)) {
526 update:
527  index = i;
528  max_exp = exp;
529  max_ccr = ccr;
530  max_eng = eng;
531  }
532  }
533  return index;
534 }
535 
536 /**
537  * Compute harmonic noise filter parameters.
538  *
539  * @param buf perceptually weighted speech
540  * @param pitch_lag open loop pitch period
541  * @param hf harmonic filter parameters
542  */
543 static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
544 {
545  int ccr, eng, max_ccr, max_eng;
546  int exp, max, diff;
547  int energy[15];
548  int i, j;
549 
550  for (i = 0, j = pitch_lag - 3; j <= pitch_lag + 3; i++, j++) {
551  /* Compute residual energy */
552  energy[i << 1] = ff_dot_product(buf - j, buf - j, SUBFRAME_LEN);
553  /* Compute correlation */
554  energy[(i << 1) + 1] = ff_dot_product(buf, buf - j, SUBFRAME_LEN);
555  }
556 
557  /* Compute target energy */
558  energy[14] = ff_dot_product(buf, buf, SUBFRAME_LEN);
559 
560  /* Normalize */
561  max = 0;
562  for (i = 0; i < 15; i++)
563  max = FFMAX(max, FFABS(energy[i]));
564 
566  for (i = 0; i < 15; i++) {
567  energy[i] = av_clipl_int32((int64_t)(energy[i] << exp) +
568  (1 << 15)) >> 16;
569  }
570 
571  hf->index = -1;
572  hf->gain = 0;
573  max_ccr = 1;
574  max_eng = 0x7fff;
575 
576  for (i = 0; i <= 6; i++) {
577  eng = energy[i << 1];
578  ccr = energy[(i << 1) + 1];
579 
580  if (ccr <= 0)
581  continue;
582 
583  ccr = (ccr * ccr + (1 << 14)) >> 15;
584  diff = ccr * max_eng - eng * max_ccr;
585  if (diff > 0) {
586  max_ccr = ccr;
587  max_eng = eng;
588  hf->index = i;
589  }
590  }
591 
592  if (hf->index == -1) {
593  hf->index = pitch_lag;
594  return;
595  }
596 
597  eng = energy[14] * max_eng;
598  eng = (eng >> 2) + (eng >> 3);
599  ccr = energy[(hf->index << 1) + 1] * energy[(hf->index << 1) + 1];
600  if (eng < ccr) {
601  eng = energy[(hf->index << 1) + 1];
602 
603  if (eng >= max_eng)
604  hf->gain = 0x2800;
605  else
606  hf->gain = ((eng << 15) / max_eng * 0x2800 + (1 << 14)) >> 15;
607  }
608  hf->index += pitch_lag - 3;
609 }
610 
611 /**
612  * Apply the harmonic noise shaping filter.
613  *
614  * @param hf filter parameters
615  */
616 static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
617 {
618  int i;
619 
620  for (i = 0; i < SUBFRAME_LEN; i++) {
621  int64_t temp = hf->gain * src[i - hf->index] << 1;
622  dest[i] = av_clipl_int32((src[i] << 16) - temp + (1 << 15)) >> 16;
623  }
624 }
625 
626 static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
627 {
628  int i;
629  for (i = 0; i < SUBFRAME_LEN; i++) {
630  int64_t temp = hf->gain * src[i - hf->index] << 1;
631  dest[i] = av_clipl_int32(((dest[i] - src[i]) << 16) + temp +
632  (1 << 15)) >> 16;
633  }
634 }
635 
636 /**
637  * Combined synthesis and formant perceptual weighting filer.
638  *
639  * @param qnt_lpc quantized lpc coefficients
640  * @param perf_lpc perceptual filter coefficients
641  * @param perf_fir perceptual filter fir memory
642  * @param perf_iir perceptual filter iir memory
643  * @param scale the filter output will be scaled by 2^scale
644  */
645 static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc,
646  int16_t *perf_fir, int16_t *perf_iir,
647  const int16_t *src, int16_t *dest, int scale)
648 {
649  int i, j;
650  int16_t buf_16[SUBFRAME_LEN + LPC_ORDER];
651  int64_t buf[SUBFRAME_LEN];
652 
653  int16_t *bptr_16 = buf_16 + LPC_ORDER;
654 
655  memcpy(buf_16, perf_fir, sizeof(int16_t) * LPC_ORDER);
656  memcpy(dest - LPC_ORDER, perf_iir, sizeof(int16_t) * LPC_ORDER);
657 
658  for (i = 0; i < SUBFRAME_LEN; i++) {
659  int64_t temp = 0;
660  for (j = 1; j <= LPC_ORDER; j++)
661  temp -= qnt_lpc[j - 1] * bptr_16[i - j];
662 
663  buf[i] = (src[i] << 15) + (temp << 3);
664  bptr_16[i] = av_clipl_int32(buf[i] + (1 << 15)) >> 16;
665  }
666 
667  for (i = 0; i < SUBFRAME_LEN; i++) {
668  int64_t fir = 0, iir = 0;
669  for (j = 1; j <= LPC_ORDER; j++) {
670  fir -= perf_lpc[j - 1] * bptr_16[i - j];
671  iir += perf_lpc[j + LPC_ORDER - 1] * dest[i - j];
672  }
673  dest[i] = av_clipl_int32(((buf[i] + (fir << 3)) << scale) + (iir << 3) +
674  (1 << 15)) >> 16;
675  }
676  memcpy(perf_fir, buf_16 + SUBFRAME_LEN, sizeof(int16_t) * LPC_ORDER);
677  memcpy(perf_iir, dest + SUBFRAME_LEN - LPC_ORDER,
678  sizeof(int16_t) * LPC_ORDER);
679 }
680 
681 /**
682  * Compute the adaptive codebook contribution.
683  *
684  * @param buf input signal
685  * @param index the current subframe index
686  */
687 static void acb_search(G723_1_ChannelContext *p, int16_t *residual,
688  int16_t *impulse_resp, const int16_t *buf,
689  int index)
690 {
691  int16_t flt_buf[PITCH_ORDER][SUBFRAME_LEN];
692 
693  const int16_t *cb_tbl = ff_g723_1_adaptive_cb_gain85;
694 
695  int ccr_buf[PITCH_ORDER * SUBFRAMES << 2];
696 
697  int pitch_lag = p->pitch_lag[index >> 1];
698  int acb_lag = 1;
699  int acb_gain = 0;
700  int odd_frame = index & 1;
701  int iter = 3 + odd_frame;
702  int count = 0;
703  int tbl_size = 85;
704 
705  int i, j, k, l, max;
706  int64_t temp;
707 
708  if (!odd_frame) {
709  if (pitch_lag == PITCH_MIN)
710  pitch_lag++;
711  else
712  pitch_lag = FFMIN(pitch_lag, PITCH_MAX - 5);
713  }
714 
715  for (i = 0; i < iter; i++) {
716  ff_g723_1_get_residual(residual, p->prev_excitation, pitch_lag + i - 1);
717 
718  for (j = 0; j < SUBFRAME_LEN; j++) {
719  temp = 0;
720  for (k = 0; k <= j; k++)
721  temp += residual[PITCH_ORDER - 1 + k] * impulse_resp[j - k];
722  flt_buf[PITCH_ORDER - 1][j] = av_clipl_int32((temp << 1) +
723  (1 << 15)) >> 16;
724  }
725 
726  for (j = PITCH_ORDER - 2; j >= 0; j--) {
727  flt_buf[j][0] = ((residual[j] << 13) + (1 << 14)) >> 15;
728  for (k = 1; k < SUBFRAME_LEN; k++) {
729  temp = (flt_buf[j + 1][k - 1] << 15) +
730  residual[j] * impulse_resp[k];
731  flt_buf[j][k] = av_clipl_int32((temp << 1) + (1 << 15)) >> 16;
732  }
733  }
734 
735  /* Compute crosscorrelation with the signal */
736  for (j = 0; j < PITCH_ORDER; j++) {
737  temp = ff_dot_product(buf, flt_buf[j], SUBFRAME_LEN);
738  ccr_buf[count++] = av_clipl_int32(temp << 1);
739  }
740 
741  /* Compute energies */
742  for (j = 0; j < PITCH_ORDER; j++) {
743  ccr_buf[count++] = ff_g723_1_dot_product(flt_buf[j], flt_buf[j],
744  SUBFRAME_LEN);
745  }
746 
747  for (j = 1; j < PITCH_ORDER; j++) {
748  for (k = 0; k < j; k++) {
749  temp = ff_dot_product(flt_buf[j], flt_buf[k], SUBFRAME_LEN);
750  ccr_buf[count++] = av_clipl_int32(temp << 2);
751  }
752  }
753  }
754 
755  /* Normalize and shorten */
756  max = 0;
757  for (i = 0; i < 20 * iter; i++)
758  max = FFMAX(max, FFABS(ccr_buf[i]));
759 
761 
762  for (i = 0; i < 20 * iter; i++)
763  ccr_buf[i] = av_clipl_int32((int64_t) (ccr_buf[i] << temp) +
764  (1 << 15)) >> 16;
765 
766  max = 0;
767  for (i = 0; i < iter; i++) {
768  /* Select quantization table */
769  if (!odd_frame && pitch_lag + i - 1 >= SUBFRAME_LEN - 2 ||
770  odd_frame && pitch_lag >= SUBFRAME_LEN - 2) {
772  tbl_size = 170;
773  }
774 
775  for (j = 0, k = 0; j < tbl_size; j++, k += 20) {
776  temp = 0;
777  for (l = 0; l < 20; l++)
778  temp += ccr_buf[20 * i + l] * cb_tbl[k + l];
780 
781  if (temp > max) {
782  max = temp;
783  acb_gain = j;
784  acb_lag = i;
785  }
786  }
787  }
788 
789  if (!odd_frame) {
790  pitch_lag += acb_lag - 1;
791  acb_lag = 1;
792  }
793 
794  p->pitch_lag[index >> 1] = pitch_lag;
795  p->subframe[index].ad_cb_lag = acb_lag;
796  p->subframe[index].ad_cb_gain = acb_gain;
797 }
798 
799 /**
800  * Subtract the adaptive codebook contribution from the input
801  * to obtain the residual.
802  *
803  * @param buf target vector
804  */
805 static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp,
806  int16_t *buf)
807 {
808  int i, j;
809  /* Subtract adaptive CB contribution to obtain the residual */
810  for (i = 0; i < SUBFRAME_LEN; i++) {
811  int64_t temp = buf[i] << 14;
812  for (j = 0; j <= i; j++)
813  temp -= residual[j] * impulse_resp[i - j];
814 
815  buf[i] = av_clipl_int32((temp << 2) + (1 << 15)) >> 16;
816  }
817 }
818 
819 /**
820  * Quantize the residual signal using the fixed codebook (MP-MLQ).
821  *
822  * @param optim optimized fixed codebook parameters
823  * @param buf excitation vector
824  */
825 static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp,
826  int16_t *buf, int pulse_cnt, int pitch_lag)
827 {
828  FCBParam param;
829  int16_t impulse_r[SUBFRAME_LEN];
830  int16_t temp_corr[SUBFRAME_LEN];
831  int16_t impulse_corr[SUBFRAME_LEN];
832 
833  int ccr1[SUBFRAME_LEN];
834  int ccr2[SUBFRAME_LEN];
835  int amp, err, max, max_amp_index, min, scale, i, j, k, l;
836 
837  int64_t temp;
838 
839  /* Update impulse response */
840  memcpy(impulse_r, impulse_resp, sizeof(int16_t) * SUBFRAME_LEN);
841  param.dirac_train = 0;
842  if (pitch_lag < SUBFRAME_LEN - 2) {
843  param.dirac_train = 1;
844  ff_g723_1_gen_dirac_train(impulse_r, pitch_lag);
845  }
846 
847  for (i = 0; i < SUBFRAME_LEN; i++)
848  temp_corr[i] = impulse_r[i] >> 1;
849 
850  /* Compute impulse response autocorrelation */
851  temp = ff_g723_1_dot_product(temp_corr, temp_corr, SUBFRAME_LEN);
852 
853  scale = ff_g723_1_normalize_bits(temp, 31);
854  impulse_corr[0] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
855 
856  for (i = 1; i < SUBFRAME_LEN; i++) {
857  temp = ff_g723_1_dot_product(temp_corr + i, temp_corr,
858  SUBFRAME_LEN - i);
859  impulse_corr[i] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
860  }
861 
862  /* Compute crosscorrelation of impulse response with residual signal */
863  scale -= 4;
864  for (i = 0; i < SUBFRAME_LEN; i++) {
865  temp = ff_g723_1_dot_product(buf + i, impulse_r, SUBFRAME_LEN - i);
866  if (scale < 0)
867  ccr1[i] = temp >> -scale;
868  else
869  ccr1[i] = av_clipl_int32(temp << scale);
870  }
871 
872  /* Search loop */
873  for (i = 0; i < GRID_SIZE; i++) {
874  /* Maximize the crosscorrelation */
875  max = 0;
876  for (j = i; j < SUBFRAME_LEN; j += GRID_SIZE) {
877  temp = FFABS(ccr1[j]);
878  if (temp >= max) {
879  max = temp;
880  param.pulse_pos[0] = j;
881  }
882  }
883 
884  /* Quantize the gain (max crosscorrelation/impulse_corr[0]) */
885  amp = max;
886  min = 1 << 30;
887  max_amp_index = GAIN_LEVELS - 2;
888  for (j = max_amp_index; j >= 2; j--) {
890  impulse_corr[0] << 1);
891  temp = FFABS(temp - amp);
892  if (temp < min) {
893  min = temp;
894  max_amp_index = j;
895  }
896  }
897 
898  max_amp_index--;
899  /* Select additional gain values */
900  for (j = 1; j < 5; j++) {
901  for (k = i; k < SUBFRAME_LEN; k += GRID_SIZE) {
902  temp_corr[k] = 0;
903  ccr2[k] = ccr1[k];
904  }
905  param.amp_index = max_amp_index + j - 2;
906  amp = ff_g723_1_fixed_cb_gain[param.amp_index];
907 
908  param.pulse_sign[0] = (ccr2[param.pulse_pos[0]] < 0) ? -amp : amp;
909  temp_corr[param.pulse_pos[0]] = 1;
910 
911  for (k = 1; k < pulse_cnt; k++) {
912  max = INT_MIN;
913  for (l = i; l < SUBFRAME_LEN; l += GRID_SIZE) {
914  if (temp_corr[l])
915  continue;
916  temp = impulse_corr[FFABS(l - param.pulse_pos[k - 1])];
917  temp = av_clipl_int32((int64_t) temp *
918  param.pulse_sign[k - 1] << 1);
919  ccr2[l] -= temp;
920  temp = FFABS(ccr2[l]);
921  if (temp > max) {
922  max = temp;
923  param.pulse_pos[k] = l;
924  }
925  }
926 
927  param.pulse_sign[k] = (ccr2[param.pulse_pos[k]] < 0) ?
928  -amp : amp;
929  temp_corr[param.pulse_pos[k]] = 1;
930  }
931 
932  /* Create the error vector */
933  memset(temp_corr, 0, sizeof(int16_t) * SUBFRAME_LEN);
934 
935  for (k = 0; k < pulse_cnt; k++)
936  temp_corr[param.pulse_pos[k]] = param.pulse_sign[k];
937 
938  for (k = SUBFRAME_LEN - 1; k >= 0; k--) {
939  temp = 0;
940  for (l = 0; l <= k; l++) {
941  int prod = av_clipl_int32((int64_t) temp_corr[l] *
942  impulse_r[k - l] << 1);
943  temp = av_clipl_int32(temp + prod);
944  }
945  temp_corr[k] = temp << 2 >> 16;
946  }
947 
948  /* Compute square of error */
949  err = 0;
950  for (k = 0; k < SUBFRAME_LEN; k++) {
951  int64_t prod;
952  prod = av_clipl_int32((int64_t) buf[k] * temp_corr[k] << 1);
953  err = av_clipl_int32(err - prod);
954  prod = av_clipl_int32((int64_t) temp_corr[k] * temp_corr[k]);
955  err = av_clipl_int32(err + prod);
956  }
957 
958  /* Minimize */
959  if (err < optim->min_err) {
960  optim->min_err = err;
961  optim->grid_index = i;
962  optim->amp_index = param.amp_index;
963  optim->dirac_train = param.dirac_train;
964 
965  for (k = 0; k < pulse_cnt; k++) {
966  optim->pulse_sign[k] = param.pulse_sign[k];
967  optim->pulse_pos[k] = param.pulse_pos[k];
968  }
969  }
970  }
971  }
972 }
973 
974 /**
975  * Encode the pulse position and gain of the current subframe.
976  *
977  * @param optim optimized fixed CB parameters
978  * @param buf excitation vector
979  */
980 static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim,
981  int16_t *buf, int pulse_cnt)
982 {
983  int i, j;
984 
985  j = PULSE_MAX - pulse_cnt;
986 
987  subfrm->pulse_sign = 0;
988  subfrm->pulse_pos = 0;
989 
990  for (i = 0; i < SUBFRAME_LEN >> 1; i++) {
991  int val = buf[optim->grid_index + (i << 1)];
992  if (!val) {
994  } else {
995  subfrm->pulse_sign <<= 1;
996  if (val < 0)
997  subfrm->pulse_sign++;
998  j++;
999 
1000  if (j == PULSE_MAX)
1001  break;
1002  }
1003  }
1004  subfrm->amp_index = optim->amp_index;
1005  subfrm->grid_index = optim->grid_index;
1006  subfrm->dirac_train = optim->dirac_train;
1007 }
1008 
1009 /**
1010  * Compute the fixed codebook excitation.
1011  *
1012  * @param buf target vector
1013  * @param impulse_resp impulse response of the combined filter
1014  */
1015 static void fcb_search(G723_1_ChannelContext *p, int16_t *impulse_resp,
1016  int16_t *buf, int index)
1017 {
1018  FCBParam optim;
1019  int pulse_cnt = pulses[index];
1020  int i;
1021 
1022  optim.min_err = 1 << 30;
1023  get_fcb_param(&optim, impulse_resp, buf, pulse_cnt, SUBFRAME_LEN);
1024 
1025  if (p->pitch_lag[index >> 1] < SUBFRAME_LEN - 2) {
1026  get_fcb_param(&optim, impulse_resp, buf, pulse_cnt,
1027  p->pitch_lag[index >> 1]);
1028  }
1029 
1030  /* Reconstruct the excitation */
1031  memset(buf, 0, sizeof(int16_t) * SUBFRAME_LEN);
1032  for (i = 0; i < pulse_cnt; i++)
1033  buf[optim.pulse_pos[i]] = optim.pulse_sign[i];
1034 
1035  pack_fcb_param(&p->subframe[index], &optim, buf, pulse_cnt);
1036 
1037  if (optim.dirac_train)
1038  ff_g723_1_gen_dirac_train(buf, p->pitch_lag[index >> 1]);
1039 }
1040 
1041 /**
1042  * Pack the frame parameters into output bitstream.
1043  *
1044  * @param frame output buffer
1045  * @param size size of the buffer
1046  */
1048 {
1049  PutBitContext pb;
1050  int info_bits = 0;
1051  int i, temp;
1052 
1053  init_put_bits(&pb, avpkt->data, avpkt->size);
1054 
1055  put_bits(&pb, 2, info_bits);
1056 
1057  put_bits(&pb, 8, p->lsp_index[2]);
1058  put_bits(&pb, 8, p->lsp_index[1]);
1059  put_bits(&pb, 8, p->lsp_index[0]);
1060 
1061  put_bits(&pb, 7, p->pitch_lag[0] - PITCH_MIN);
1062  put_bits(&pb, 2, p->subframe[1].ad_cb_lag);
1063  put_bits(&pb, 7, p->pitch_lag[1] - PITCH_MIN);
1064  put_bits(&pb, 2, p->subframe[3].ad_cb_lag);
1065 
1066  /* Write 12 bit combined gain */
1067  for (i = 0; i < SUBFRAMES; i++) {
1069  p->subframe[i].amp_index;
1070  if (p->cur_rate == RATE_6300)
1071  temp += p->subframe[i].dirac_train << 11;
1072  put_bits(&pb, 12, temp);
1073  }
1074 
1075  put_bits(&pb, 1, p->subframe[0].grid_index);
1076  put_bits(&pb, 1, p->subframe[1].grid_index);
1077  put_bits(&pb, 1, p->subframe[2].grid_index);
1078  put_bits(&pb, 1, p->subframe[3].grid_index);
1079 
1080  if (p->cur_rate == RATE_6300) {
1081  put_bits(&pb, 1, 0); /* reserved bit */
1082 
1083  /* Write 13 bit combined position index */
1084  temp = (p->subframe[0].pulse_pos >> 16) * 810 +
1085  (p->subframe[1].pulse_pos >> 14) * 90 +
1086  (p->subframe[2].pulse_pos >> 16) * 9 +
1087  (p->subframe[3].pulse_pos >> 14);
1088  put_bits(&pb, 13, temp);
1089 
1090  put_bits(&pb, 16, p->subframe[0].pulse_pos & 0xffff);
1091  put_bits(&pb, 14, p->subframe[1].pulse_pos & 0x3fff);
1092  put_bits(&pb, 16, p->subframe[2].pulse_pos & 0xffff);
1093  put_bits(&pb, 14, p->subframe[3].pulse_pos & 0x3fff);
1094 
1095  put_bits(&pb, 6, p->subframe[0].pulse_sign);
1096  put_bits(&pb, 5, p->subframe[1].pulse_sign);
1097  put_bits(&pb, 6, p->subframe[2].pulse_sign);
1098  put_bits(&pb, 5, p->subframe[3].pulse_sign);
1099  }
1100 
1101  flush_put_bits(&pb);
1102  return frame_size[info_bits];
1103 }
1104 
1105 static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
1106  const AVFrame *frame, int *got_packet_ptr)
1107 {
1108  G723_1_Context *s = avctx->priv_data;
1109  G723_1_ChannelContext *p = &s->ch[0];
1110  int16_t unq_lpc[LPC_ORDER * SUBFRAMES];
1111  int16_t qnt_lpc[LPC_ORDER * SUBFRAMES];
1112  int16_t cur_lsp[LPC_ORDER];
1113  int16_t weighted_lpc[LPC_ORDER * SUBFRAMES << 1];
1114  int16_t vector[FRAME_LEN + PITCH_MAX];
1115  int offset, ret, i, j;
1116  int16_t *in, *start;
1117  HFParam hf[4];
1118 
1119  /* duplicate input */
1120  start = in = av_malloc(frame->nb_samples * sizeof(int16_t));
1121  if (!in)
1122  return AVERROR(ENOMEM);
1123  memcpy(in, frame->data[0], frame->nb_samples * sizeof(int16_t));
1124 
1126 
1127  memcpy(vector, p->prev_data, HALF_FRAME_LEN * sizeof(int16_t));
1128  memcpy(vector + HALF_FRAME_LEN, in, FRAME_LEN * sizeof(int16_t));
1129 
1130  comp_lpc_coeff(vector, unq_lpc);
1131  lpc2lsp(&unq_lpc[LPC_ORDER * 3], p->prev_lsp, cur_lsp);
1132  lsp_quantize(p->lsp_index, cur_lsp, p->prev_lsp);
1133 
1134  /* Update memory */
1135  memcpy(vector + LPC_ORDER, p->prev_data + SUBFRAME_LEN,
1136  sizeof(int16_t) * SUBFRAME_LEN);
1137  memcpy(vector + LPC_ORDER + SUBFRAME_LEN, in,
1138  sizeof(int16_t) * (HALF_FRAME_LEN + SUBFRAME_LEN));
1139  memcpy(p->prev_data, in + HALF_FRAME_LEN,
1140  sizeof(int16_t) * HALF_FRAME_LEN);
1141  memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
1142 
1143  perceptual_filter(p, weighted_lpc, unq_lpc, vector);
1144 
1145  memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
1146  memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
1147  memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
1148 
1149  ff_g723_1_scale_vector(vector, vector, FRAME_LEN + PITCH_MAX);
1150 
1151  p->pitch_lag[0] = estimate_pitch(vector, PITCH_MAX);
1152  p->pitch_lag[1] = estimate_pitch(vector, PITCH_MAX + HALF_FRAME_LEN);
1153 
1154  for (i = PITCH_MAX, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
1155  comp_harmonic_coeff(vector + i, p->pitch_lag[j >> 1], hf + j);
1156 
1157  memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
1158  memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
1159  memcpy(p->prev_weight_sig, vector + FRAME_LEN, sizeof(int16_t) * PITCH_MAX);
1160 
1161  for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
1162  harmonic_filter(hf + j, vector + PITCH_MAX + i, in + i);
1163 
1164  ff_g723_1_inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, 0);
1165  ff_g723_1_lsp_interpolate(qnt_lpc, cur_lsp, p->prev_lsp);
1166 
1167  memcpy(p->prev_lsp, cur_lsp, sizeof(int16_t) * LPC_ORDER);
1168 
1169  offset = 0;
1170  for (i = 0; i < SUBFRAMES; i++) {
1171  int16_t impulse_resp[SUBFRAME_LEN];
1172  int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
1173  int16_t flt_in[SUBFRAME_LEN];
1174  int16_t zero[LPC_ORDER], fir[LPC_ORDER], iir[LPC_ORDER];
1175 
1176  /**
1177  * Compute the combined impulse response of the synthesis filter,
1178  * formant perceptual weighting filter and harmonic noise shaping filter
1179  */
1180  memset(zero, 0, sizeof(int16_t) * LPC_ORDER);
1181  memset(vector, 0, sizeof(int16_t) * PITCH_MAX);
1182  memset(flt_in, 0, sizeof(int16_t) * SUBFRAME_LEN);
1183 
1184  flt_in[0] = 1 << 13; /* Unit impulse */
1185  synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
1186  zero, zero, flt_in, vector + PITCH_MAX, 1);
1187  harmonic_filter(hf + i, vector + PITCH_MAX, impulse_resp);
1188 
1189  /* Compute the combined zero input response */
1190  flt_in[0] = 0;
1191  memcpy(fir, p->perf_fir_mem, sizeof(int16_t) * LPC_ORDER);
1192  memcpy(iir, p->perf_iir_mem, sizeof(int16_t) * LPC_ORDER);
1193 
1194  synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
1195  fir, iir, flt_in, vector + PITCH_MAX, 0);
1196  memcpy(vector, p->harmonic_mem, sizeof(int16_t) * PITCH_MAX);
1197  harmonic_noise_sub(hf + i, vector + PITCH_MAX, in);
1198 
1199  acb_search(p, residual, impulse_resp, in, i);
1201  p->pitch_lag[i >> 1], &p->subframe[i],
1202  p->cur_rate);
1203  sub_acb_contrib(residual, impulse_resp, in);
1204 
1205  fcb_search(p, impulse_resp, in, i);
1206 
1207  /* Reconstruct the excitation */
1209  p->pitch_lag[i >> 1], &p->subframe[i],
1210  RATE_6300);
1211 
1212  memmove(p->prev_excitation, p->prev_excitation + SUBFRAME_LEN,
1213  sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
1214  for (j = 0; j < SUBFRAME_LEN; j++)
1215  in[j] = av_clip_int16((in[j] << 1) + impulse_resp[j]);
1216  memcpy(p->prev_excitation + PITCH_MAX - SUBFRAME_LEN, in,
1217  sizeof(int16_t) * SUBFRAME_LEN);
1218 
1219  /* Update filter memories */
1220  synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
1221  p->perf_fir_mem, p->perf_iir_mem,
1222  in, vector + PITCH_MAX, 0);
1223  memmove(p->harmonic_mem, p->harmonic_mem + SUBFRAME_LEN,
1224  sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
1225  memcpy(p->harmonic_mem + PITCH_MAX - SUBFRAME_LEN, vector + PITCH_MAX,
1226  sizeof(int16_t) * SUBFRAME_LEN);
1227 
1228  in += SUBFRAME_LEN;
1229  offset += LPC_ORDER;
1230  }
1231 
1232  av_free(start);
1233 
1234  if ((ret = ff_alloc_packet2(avctx, avpkt, 24, 0)) < 0)
1235  return ret;
1236 
1237  *got_packet_ptr = 1;
1238  avpkt->size = pack_bitstream(p, avpkt);
1239  return 0;
1240 }
1241 
1242 static const AVCodecDefault defaults[] = {
1243  { "b", "6300" },
1244  { NULL },
1245 };
1246 
1248  .name = "g723_1",
1249  .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
1250  .type = AVMEDIA_TYPE_AUDIO,
1251  .id = AV_CODEC_ID_G723_1,
1252  .priv_data_size = sizeof(G723_1_Context),
1254  .encode2 = g723_1_encode_frame,
1255  .defaults = defaults,
1256  .sample_fmts = (const enum AVSampleFormat[]) {
1258  },
1259 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:30
ff_g723_1_fixed_cb_gain
const int16_t ff_g723_1_fixed_cb_gain[GAIN_LEVELS]
Definition: g723_1.c:453
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1216
AVCodec
AVCodec.
Definition: codec.h:197
g723_1_encode_init
static av_cold int g723_1_encode_init(AVCodecContext *avctx)
Definition: g723_1enc.c:91
G723_1_ChannelContext::prev_data
int16_t prev_data[HALF_FRAME_LEN]
Definition: g723_1.h:148
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
G723_1_Subframe::ad_cb_gain
int ad_cb_gain
Definition: g723_1.h:82
FRAME_LEN
#define FRAME_LEN
Definition: g723_1.h:37
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1196
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
G723_1_ChannelContext::pitch_lag
int pitch_lag[2]
Definition: g723_1.h:125
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:57
FCBParam::amp_index
int amp_index
Definition: g723_1.h:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:369
G723_1_Subframe::pulse_sign
int pulse_sign
Definition: g723_1.h:84
G723_1_Subframe::ad_cb_lag
int ad_cb_lag
adaptive codebook lag
Definition: g723_1.h:81
G723_1_Context
Definition: g723_1.h:159
max
#define max(a, b)
Definition: cuda_runtime.h:33
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
get_index
#define get_index(num, offset, size)
Quantize the current LSP subvector.
Definition: g723_1enc.c:341
ff_g723_1_inverse_quant
void ff_g723_1_inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp, uint8_t *lsp_index, int bad_frame)
Perform inverse quantization of LSP frequencies.
Definition: g723_1.c:1272
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
G723_1_ChannelContext::prev_excitation
int16_t prev_excitation[PITCH_MAX]
Definition: g723_1.h:130
harmonic_noise_sub
static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
Definition: g723_1enc.c:626
G723_1_Subframe::pulse_pos
int pulse_pos
Definition: g723_1.h:87
ff_g723_1_normalize_bits
int ff_g723_1_normalize_bits(int num, int width)
Calculate the number of left-shifts required for normalizing the input.
Definition: g723_1.c:1120
perceptual_filter
static void perceptual_filter(G723_1_ChannelContext *p, int16_t *flt_coef, int16_t *unq_lpc, int16_t *buf)
Apply the formant perceptual weighting filter.
Definition: g723_1enc.c:438
PITCH_MIN
#define PITCH_MIN
Definition: g723_1.h:43
ff_g723_1_gen_dirac_train
void ff_g723_1_gen_dirac_train(int16_t *buf, int pitch_lag)
Generate a train of dirac functions with period as pitch lag.
Definition: g723_1.c:1145
G723_1_ChannelContext::hpf_fir_mem
int16_t hpf_fir_mem
highpass filter fir
Definition: g723_1.h:151
G723_1_ChannelContext::prev_weight_sig
int16_t prev_weight_sig[PITCH_MAX]
Definition: g723_1.h:149
FCBParam::grid_index
int grid_index
Definition: g723_1.h:113
GRID_SIZE
#define GRID_SIZE
Definition: g723_1.h:46
val
static double val(void *priv, double ch)
Definition: aeval.c:76
update
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
Definition: af_silencedetect.c:78
PITCH_ORDER
#define PITCH_ORDER
Definition: g723_1.h:45
G723_1_ChannelContext::cur_rate
enum Rate cur_rate
Definition: g723_1.h:123
comp_harmonic_coeff
static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
Compute harmonic noise filter parameters.
Definition: g723_1enc.c:543
bandwidth_expand
static const int16_t bandwidth_expand[LPC_ORDER]
0.994^i scaled by 2^15
Definition: g723_1enc.c:77
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
HFParam
Harmonic filter parameters.
Definition: g723_1.h:102
G723_1_COS_TAB_FIRST_ELEMENT
#define G723_1_COS_TAB_FIRST_ELEMENT
Definition: g723_1.h:242
ff_g723_1_gen_acb_excitation
void ff_g723_1_gen_acb_excitation(int16_t *vector, int16_t *prev_excitation, int pitch_lag, G723_1_Subframe *subfrm, enum Rate cur_rate)
Generate adaptive codebook excitation.
Definition: g723_1.c:1157
G723_1_ChannelContext::perf_fir_mem
int16_t perf_fir_mem[LPC_ORDER]
perceptual filter fir
Definition: g723_1.h:153
s
#define s(width, name)
Definition: cbs_vp9.c:257
frame_size
int frame_size
Definition: mxfenc.c:2206
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
pack_bitstream
static int pack_bitstream(G723_1_ChannelContext *p, AVPacket *avpkt)
Pack the frame parameters into output bitstream.
Definition: g723_1enc.c:1047
G723_1_ChannelContext::fir_mem
int16_t fir_mem[LPC_ORDER]
Definition: g723_1.h:133
FCBParam::dirac_train
int dirac_train
Definition: g723_1.h:114
hamming_window
static const int16_t hamming_window[LPC_FRAME]
Hamming window coefficients scaled by 2^15.
Definition: g723_1enc.c:46
comp_lpc_coeff
static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
Calculate LPC coefficients for the current frame.
Definition: g723_1enc.c:231
ff_g723_1_encoder
AVCodec ff_g723_1_encoder
Definition: g723_1enc.c:1247
FCBParam
Optimized fixed codebook excitation parameters.
Definition: g723_1.h:110
ff_g723_1_adaptive_cb_gain170
const int16_t ff_g723_1_adaptive_cb_gain170[170 *20]
Definition: g723_1.c:675
f
#define f(width, name)
Definition: cbs_vp9.c:255
fcb_search
static void fcb_search(G723_1_ChannelContext *p, int16_t *impulse_resp, int16_t *buf, int index)
Compute the fixed codebook excitation.
Definition: g723_1enc.c:1015
PutBitContext
Definition: put_bits.h:44
LPC_ORDER
#define LPC_ORDER
Definition: g723_1.h:40
acb_search
static void acb_search(G723_1_ChannelContext *p, int16_t *residual, int16_t *impulse_resp, const int16_t *buf, int index)
Compute the adaptive codebook contribution.
Definition: g723_1enc.c:687
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AVCodecDefault
Definition: internal.h:222
FCBParam::pulse_pos
int pulse_pos[PULSE_MAX]
Definition: g723_1.h:115
av_clip_int16
#define av_clip_int16
Definition: common.h:137
NULL
#define NULL
Definition: coverity.c:32
ff_g723_1_lsp_interpolate
void ff_g723_1_lsp_interpolate(int16_t *lpc, int16_t *cur_lsp, int16_t *prev_lsp)
Quantize LSP frequencies by interpolation and convert them to the corresponding LPC coefficients.
Definition: g723_1.c:1251
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
LPC_FRAME
#define LPC_FRAME
Definition: g723_1.h:39
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:586
AV_CODEC_ID_G723_1
@ AV_CODEC_ID_G723_1
Definition: codec_id.h:476
src
#define src
Definition: vp8dsp.c:255
ff_g723_1_get_residual
void ff_g723_1_get_residual(int16_t *residual, int16_t *prev_excitation, int lag)
Get delayed contribution from the previous excitation vector.
Definition: g723_1.c:1131
RATE_6300
@ RATE_6300
Definition: g723_1.h:73
lsp_quantize
static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
Vector quantize the LSP frequencies.
Definition: g723_1enc.c:367
ff_g723_1_dot_product
int ff_g723_1_dot_product(const int16_t *a, const int16_t *b, int length)
Definition: g723_1.c:1125
G723_1_ChannelContext::subframe
G723_1_Subframe subframe[4]
Definition: g723_1.h:120
G723_1_Subframe::amp_index
int amp_index
Definition: g723_1.h:86
exp
int8_t exp
Definition: eval.c:72
ff_g723_1_adaptive_cb_gain85
const int16_t ff_g723_1_adaptive_cb_gain85[85 *20]
Definition: g723_1.c:459
FCBParam::min_err
int min_err
Definition: g723_1.h:111
index
int index
Definition: gxfenc.c:89
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1561
SUBFRAMES
#define SUBFRAMES
Definition: dcaenc.c:51
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
GAIN_LEVELS
#define GAIN_LEVELS
Definition: g723_1.h:48
g723_1.h
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_clipl_int32
#define av_clipl_int32
Definition: common.h:140
FCBParam::pulse_sign
int pulse_sign[PULSE_MAX]
Definition: g723_1.h:116
G723_1_ChannelContext::lsp_index
uint8_t lsp_index[LSP_BANDS]
Definition: g723_1.h:124
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
celp_math.h
HALF_FRAME_LEN
#define HALF_FRAME_LEN
Definition: g723_1.h:38
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
SUBFRAME_LEN
#define SUBFRAME_LEN
Definition: g723_1.h:36
levinson_durbin
static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
Use Levinson-Durbin recursion to compute LPC coefficients from autocorrelation values.
Definition: g723_1enc.c:187
G723_1_Subframe
G723.1 unpacked data subframe.
Definition: g723_1.h:80
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
PITCH_MAX
#define PITCH_MAX
Definition: g723_1.h:44
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
HFParam::index
int index
Definition: g723_1.h:103
percept_flt_tbl
static const int16_t percept_flt_tbl[2][LPC_ORDER]
0.5^i scaled by 2^15
Definition: g723_1enc.c:84
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1197
G723_1_ChannelContext::perf_iir_mem
int16_t perf_iir_mem[LPC_ORDER]
and iir memories
Definition: g723_1.h:154
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
pack_fcb_param
static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim, int16_t *buf, int pulse_cnt)
Encode the pulse position and gain of the current subframe.
Definition: g723_1enc.c:980
comp_autocorr
static void comp_autocorr(int16_t *buf, int16_t *autocorr)
Estimate autocorrelation of the input vector.
Definition: g723_1enc.c:145
i
int i
Definition: input.c:407
ff_dot_product
int64_t ff_dot_product(const int16_t *a, const int16_t *b, int length)
Calculate the dot product of 2 int16_t vectors.
Definition: celp_math.c:100
ff_g723_1_scale_vector
int ff_g723_1_scale_vector(int16_t *dst, const int16_t *vector, int length)
Scale vector contents based on the largest of their absolutes.
Definition: g723_1.c:1103
common.h
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
g723_1_encode_frame
static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: g723_1enc.c:1105
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
HFParam::gain
int gain
Definition: g723_1.h:104
avcodec.h
ff_g723_1_combinatorial_table
const int32_t ff_g723_1_combinatorial_table[PULSE_MAX][SUBFRAME_LEN/GRID_SIZE]
Used for the coding/decoding of the pulses positions for the MP-MLQ codebook.
Definition: g723_1.c:409
ret
ret
Definition: filter_design.txt:187
synth_percept_filter
static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc, int16_t *perf_fir, int16_t *perf_iir, const int16_t *src, int16_t *dest, int scale)
Combined synthesis and formant perceptual weighting filer.
Definition: g723_1enc.c:645
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
G723_1_ChannelContext::prev_lsp
int16_t prev_lsp[LPC_ORDER]
Definition: g723_1.h:128
highpass_filter
static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
Remove DC component from the input signal.
Definition: g723_1enc.c:129
dc_lsp
static const int16_t dc_lsp[LPC_ORDER]
LSP DC component.
Definition: g723_1.h:227
AVCodecContext
main external API structure.
Definition: avcodec.h:536
estimate_pitch
static int estimate_pitch(int16_t *buf, int start)
Estimate the open loop pitch period.
Definition: g723_1enc.c:470
channel_layout.h
iir_filter
static void iir_filter(int16_t *fir_coef, int16_t *iir_coef, int16_t *src, int16_t *dest)
Perform IIR filtering.
Definition: g723_1enc.c:415
sub_acb_contrib
static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp, int16_t *buf)
Subtract the adaptive codebook contribution from the input to obtain the residual.
Definition: g723_1enc.c:805
G723_1_ChannelContext::iir_mem
int iir_mem[LPC_ORDER]
Definition: g723_1.h:134
temp
else temp
Definition: vf_mcdeint.c:259
PULSE_MAX
#define PULSE_MAX
Definition: dss_sp.c:33
ff_g723_1_cos_tab
const int16_t ff_g723_1_cos_tab[COS_TBL_SIZE+1]
Definition: g723_1.c:32
shift
static int shift(int a, int b)
Definition: sonic.c:82
zero
#define zero
Definition: regdef.h:64
mem.h
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:110
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
lpc2lsp
static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
Definition: g723_1enc.c:247
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
G723_1_ChannelContext::harmonic_mem
int16_t harmonic_mem[PITCH_MAX]
Definition: g723_1.h:156
binomial_window
static const int16_t binomial_window[LPC_ORDER]
Binomial window coefficients scaled by 2^15.
Definition: g723_1enc.c:70
G723_1_Subframe::grid_index
int grid_index
Definition: g723_1.h:85
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
MULL2
#define MULL2(a, b)
Bitexact implementation of 2ab scaled by 1/2^16.
Definition: g723_1.h:57
G723_1_Subframe::dirac_train
int dirac_train
Definition: g723_1.h:83
G723_1_ChannelContext::hpf_iir_mem
int hpf_iir_mem
and iir memories
Definition: g723_1.h:152
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
put_bits.h
pulses
static const int8_t pulses[4]
Number of non-zero pulses in the MP-MLQ excitation.
Definition: g723_1.h:260
G723_1_ChannelContext
Definition: g723_1.h:119
get_fcb_param
static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp, int16_t *buf, int pulse_cnt, int pitch_lag)
Quantize the residual signal using the fixed codebook (MP-MLQ).
Definition: g723_1enc.c:825
COS_TBL_SIZE
#define COS_TBL_SIZE
Definition: g723_1.h:49
defaults
static const AVCodecDefault defaults[]
Definition: g723_1enc.c:1242
harmonic_filter
static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
Apply the harmonic noise shaping filter.
Definition: g723_1enc.c:616
min
float min
Definition: vorbis_enc_data.h:456