FFmpeg
g722enc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) CMU 1993 Computer Science, Speech Group
3  * Chengxiang Lu and Alex Hauptmann
4  * Copyright (c) 2005 Steve Underwood <steveu at coppice.org>
5  * Copyright (c) 2009 Kenan Gillet
6  * Copyright (c) 2010 Martin Storsjo
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * G.722 ADPCM audio encoder
28  */
29 
30 #include "libavutil/avassert.h"
32 #include "libavutil/mem.h"
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "encode.h"
36 #include "g722.h"
37 #include "libavutil/common.h"
38 
39 #define FREEZE_INTERVAL 128
40 
41 /* This is an arbitrary value. Allowing insanely large values leads to strange
42  problems, so we limit it to a reasonable value */
43 #define MAX_FRAME_SIZE 32768
44 
45 /* We clip the value of avctx->trellis to prevent data type overflows and
46  undefined behavior. Using larger values is insanely slow anyway. */
47 #define MIN_TRELLIS 0
48 #define MAX_TRELLIS 16
49 
51 {
52  G722Context *c = avctx->priv_data;
53  int i;
54  for (i = 0; i < 2; i++) {
55  av_freep(&c->paths[i]);
56  av_freep(&c->node_buf[i]);
57  av_freep(&c->nodep_buf[i]);
58  }
59  return 0;
60 }
61 
63 {
64  G722Context *c = avctx->priv_data;
65 
66  c->band[0].scale_factor = 8;
67  c->band[1].scale_factor = 2;
68  c->prev_samples_pos = 22;
69 
70  if (avctx->frame_size) {
71  /* validate frame size */
72  if (avctx->frame_size & 1 || avctx->frame_size > MAX_FRAME_SIZE) {
73  int new_frame_size;
74 
75  if (avctx->frame_size == 1)
76  new_frame_size = 2;
77  else if (avctx->frame_size > MAX_FRAME_SIZE)
78  new_frame_size = MAX_FRAME_SIZE;
79  else
80  new_frame_size = avctx->frame_size - 1;
81 
82  av_log(avctx, AV_LOG_WARNING, "Requested frame size is not "
83  "allowed. Using %d instead of %d\n", new_frame_size,
84  avctx->frame_size);
85  avctx->frame_size = new_frame_size;
86  }
87  } else {
88  /* This is arbitrary. We use 320 because it's 20ms @ 16kHz, which is
89  a common packet size for VoIP applications */
90  avctx->frame_size = 320;
91  }
92  avctx->initial_padding = 22;
93 
94  if (avctx->trellis) {
95  /* validate trellis */
96  if (avctx->trellis < MIN_TRELLIS || avctx->trellis > MAX_TRELLIS) {
97  int new_trellis = av_clip(avctx->trellis, MIN_TRELLIS, MAX_TRELLIS);
98  av_log(avctx, AV_LOG_WARNING, "Requested trellis value is not "
99  "allowed. Using %d instead of %d\n", new_trellis,
100  avctx->trellis);
101  avctx->trellis = new_trellis;
102  }
103  if (avctx->trellis) {
104  int frontier = 1 << avctx->trellis;
105  int max_paths = frontier * FREEZE_INTERVAL;
106 
107  for (int i = 0; i < 2; i++) {
108  c->paths[i] = av_calloc(max_paths, sizeof(**c->paths));
109  c->node_buf[i] = av_calloc(frontier, 2 * sizeof(**c->node_buf));
110  c->nodep_buf[i] = av_calloc(frontier, 2 * sizeof(**c->nodep_buf));
111  if (!c->paths[i] || !c->node_buf[i] || !c->nodep_buf[i])
112  return AVERROR(ENOMEM);
113  }
114  }
115  }
116 
117  ff_g722dsp_init(&c->dsp);
118 
119  return 0;
120 }
121 
122 static const int16_t low_quant[33] = {
123  35, 72, 110, 150, 190, 233, 276, 323,
124  370, 422, 473, 530, 587, 650, 714, 786,
125  858, 940, 1023, 1121, 1219, 1339, 1458, 1612,
126  1765, 1980, 2195, 2557, 2919
127 };
128 
129 static inline void filter_samples(G722Context *c, const int16_t *samples,
130  int *xlow, int *xhigh)
131 {
132  int xout[2];
133  c->prev_samples[c->prev_samples_pos++] = samples[0];
134  c->prev_samples[c->prev_samples_pos++] = samples[1];
135  c->dsp.apply_qmf(c->prev_samples + c->prev_samples_pos - 24, xout);
136  *xlow = xout[0] + xout[1] >> 14;
137  *xhigh = xout[0] - xout[1] >> 14;
138  if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
139  memmove(c->prev_samples,
140  c->prev_samples + c->prev_samples_pos - 22,
141  22 * sizeof(c->prev_samples[0]));
142  c->prev_samples_pos = 22;
143  }
144 }
145 
146 static inline int encode_high(const struct G722Band *state, int xhigh)
147 {
148  int diff = av_clip_int16(xhigh - state->s_predictor);
149  int pred = 141 * state->scale_factor >> 8;
150  /* = diff >= 0 ? (diff < pred) + 2 : diff >= -pred */
151  return ((diff ^ (diff >> (sizeof(diff)*8-1))) < pred) + 2*(diff >= 0);
152 }
153 
154 static inline int encode_low(const struct G722Band* state, int xlow)
155 {
156  int diff = av_clip_int16(xlow - state->s_predictor);
157  /* = diff >= 0 ? diff : -(diff + 1) */
158  int limit = diff ^ (diff >> (sizeof(diff)*8-1));
159  int i = 0;
160  limit = limit + 1 << 10;
161  if (limit > low_quant[8] * state->scale_factor)
162  i = 9;
163  while (i < 29 && limit > low_quant[i] * state->scale_factor)
164  i++;
165  return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i;
166 }
167 
168 static void g722_encode_trellis(G722Context *c, int trellis,
169  uint8_t *dst, int nb_samples,
170  const int16_t *samples)
171 {
172  int i, j, k;
173  int frontier = 1 << trellis;
174  struct TrellisNode **nodes[2];
175  struct TrellisNode **nodes_next[2];
176  int pathn[2] = {0, 0}, froze = -1;
177  struct TrellisPath *p[2];
178 
179  for (i = 0; i < 2; i++) {
180  nodes[i] = c->nodep_buf[i];
181  nodes_next[i] = c->nodep_buf[i] + frontier;
182  memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf[i]));
183  nodes[i][0] = c->node_buf[i] + frontier;
184  nodes[i][0]->ssd = 0;
185  nodes[i][0]->path = 0;
186  nodes[i][0]->state = c->band[i];
187  }
188 
189  for (i = 0; i < nb_samples >> 1; i++) {
190  int xlow, xhigh;
191  struct TrellisNode *next[2];
192  int heap_pos[2] = {0, 0};
193 
194  for (j = 0; j < 2; j++) {
195  next[j] = c->node_buf[j] + frontier*(i & 1);
196  memset(nodes_next[j], 0, frontier * sizeof(**nodes_next));
197  }
198 
199  filter_samples(c, &samples[2*i], &xlow, &xhigh);
200 
201  for (j = 0; j < frontier && nodes[0][j]; j++) {
202  /* Only k >> 2 affects the future adaptive state, therefore testing
203  * small steps that don't change k >> 2 is useless, the original
204  * value from encode_low is better than them. Since we step k
205  * in steps of 4, make sure range is a multiple of 4, so that
206  * we don't miss the original value from encode_low. */
207  int range = j < frontier/2 ? 4 : 0;
208  struct TrellisNode *cur_node = nodes[0][j];
209 
210  int ilow = encode_low(&cur_node->state, xlow);
211 
212  for (k = ilow - range; k <= ilow + range && k <= 63; k += 4) {
213  int decoded, dec_diff, pos;
214  uint32_t ssd;
215  struct TrellisNode* node;
216 
217  if (k < 0)
218  continue;
219 
220  decoded = av_clip_intp2((cur_node->state.scale_factor *
221  ff_g722_low_inv_quant6[k] >> 10)
222  + cur_node->state.s_predictor, 14);
223  dec_diff = xlow - decoded;
224 
225 #define STORE_NODE(index, UPDATE, VALUE)\
226  ssd = cur_node->ssd + dec_diff*dec_diff;\
227  /* Check for wraparound. Using 64 bit ssd counters would \
228  * be simpler, but is slower on x86 32 bit. */\
229  if (ssd < cur_node->ssd)\
230  continue;\
231  if (heap_pos[index] < frontier) {\
232  pos = heap_pos[index]++;\
233  av_assert2(pathn[index] < FREEZE_INTERVAL * frontier);\
234  node = nodes_next[index][pos] = next[index]++;\
235  node->path = pathn[index]++;\
236  } else {\
237  /* Try to replace one of the leaf nodes with the new \
238  * one, but not always testing the same leaf position */\
239  pos = (frontier>>1) + (heap_pos[index] & ((frontier>>1) - 1));\
240  if (ssd >= nodes_next[index][pos]->ssd)\
241  continue;\
242  heap_pos[index]++;\
243  node = nodes_next[index][pos];\
244  }\
245  node->ssd = ssd;\
246  node->state = cur_node->state;\
247  UPDATE;\
248  c->paths[index][node->path].value = VALUE;\
249  c->paths[index][node->path].prev = cur_node->path;\
250  /* Sift the newly inserted node up in the heap to restore \
251  * the heap property */\
252  while (pos > 0) {\
253  int parent = (pos - 1) >> 1;\
254  if (nodes_next[index][parent]->ssd <= ssd)\
255  break;\
256  FFSWAP(struct TrellisNode*, nodes_next[index][parent],\
257  nodes_next[index][pos]);\
258  pos = parent;\
259  }
260  STORE_NODE(0, ff_g722_update_low_predictor(&node->state, k >> 2), k);
261  }
262  }
263 
264  for (j = 0; j < frontier && nodes[1][j]; j++) {
265  int ihigh;
266  struct TrellisNode *cur_node = nodes[1][j];
267 
268  /* We don't try to get any initial guess for ihigh via
269  * encode_high - since there's only 4 possible values, test
270  * them all. Testing all of these gives a much, much larger
271  * gain than testing a larger range around ilow. */
272  for (ihigh = 0; ihigh < 4; ihigh++) {
273  int dhigh, decoded, dec_diff, pos;
274  uint32_t ssd;
275  struct TrellisNode* node;
276 
277  dhigh = cur_node->state.scale_factor *
278  ff_g722_high_inv_quant[ihigh] >> 10;
279  decoded = av_clip_intp2(dhigh + cur_node->state.s_predictor, 14);
280  dec_diff = xhigh - decoded;
281 
282  STORE_NODE(1, ff_g722_update_high_predictor(&node->state, dhigh, ihigh), ihigh);
283  }
284  }
285 
286  for (j = 0; j < 2; j++) {
287  FFSWAP(struct TrellisNode**, nodes[j], nodes_next[j]);
288 
289  if (nodes[j][0]->ssd > (1 << 16)) {
290  for (k = 1; k < frontier && nodes[j][k]; k++)
291  nodes[j][k]->ssd -= nodes[j][0]->ssd;
292  nodes[j][0]->ssd = 0;
293  }
294  }
295 
296  if (i == froze + FREEZE_INTERVAL) {
297  p[0] = &c->paths[0][nodes[0][0]->path];
298  p[1] = &c->paths[1][nodes[1][0]->path];
299  for (j = i; j > froze; j--) {
300  dst[j] = p[1]->value << 6 | p[0]->value;
301  p[0] = &c->paths[0][p[0]->prev];
302  p[1] = &c->paths[1][p[1]->prev];
303  }
304  froze = i;
305  pathn[0] = pathn[1] = 0;
306  memset(nodes[0] + 1, 0, (frontier - 1)*sizeof(**nodes));
307  memset(nodes[1] + 1, 0, (frontier - 1)*sizeof(**nodes));
308  }
309  }
310 
311  p[0] = &c->paths[0][nodes[0][0]->path];
312  p[1] = &c->paths[1][nodes[1][0]->path];
313  for (j = i; j > froze; j--) {
314  dst[j] = p[1]->value << 6 | p[0]->value;
315  p[0] = &c->paths[0][p[0]->prev];
316  p[1] = &c->paths[1][p[1]->prev];
317  }
318  c->band[0] = nodes[0][0]->state;
319  c->band[1] = nodes[1][0]->state;
320 }
321 
322 static av_always_inline void encode_byte(G722Context *c, uint8_t *dst,
323  const int16_t *samples)
324 {
325  int xlow, xhigh, ilow, ihigh;
326  filter_samples(c, samples, &xlow, &xhigh);
327  ihigh = encode_high(&c->band[1], xhigh);
328  ilow = encode_low (&c->band[0], xlow);
329  ff_g722_update_high_predictor(&c->band[1], c->band[1].scale_factor *
330  ff_g722_high_inv_quant[ihigh] >> 10, ihigh);
331  ff_g722_update_low_predictor(&c->band[0], ilow >> 2);
332  *dst = ihigh << 6 | ilow;
333 }
334 
336  uint8_t *dst, int nb_samples,
337  const int16_t *samples)
338 {
339  int i;
340  for (i = 0; i < nb_samples; i += 2)
342 }
343 
344 static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
345  const AVFrame *frame, int *got_packet_ptr)
346 {
347  G722Context *c = avctx->priv_data;
348  const int16_t *samples = (const int16_t *)frame->data[0];
349  int nb_samples, out_size, ret;
350 
351  out_size = (frame->nb_samples + 1) / 2;
352  if ((ret = ff_get_encode_buffer(avctx, avpkt, out_size, 0)) < 0)
353  return ret;
354 
355  nb_samples = frame->nb_samples - (frame->nb_samples & 1);
356 
357  if (avctx->trellis)
358  g722_encode_trellis(c, avctx->trellis, avpkt->data, nb_samples, samples);
359  else
360  g722_encode_no_trellis(c, avpkt->data, nb_samples, samples);
361 
362  /* handle last frame with odd frame_size */
363  if (nb_samples < frame->nb_samples) {
364  int16_t last_samples[2] = { samples[nb_samples], samples[nb_samples] };
365  encode_byte(c, &avpkt->data[nb_samples >> 1], last_samples);
366  }
367 
368  if (frame->pts != AV_NOPTS_VALUE)
369  avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->initial_padding);
370  *got_packet_ptr = 1;
371  return 0;
372 }
373 
375  .p.name = "g722",
376  CODEC_LONG_NAME("G.722 ADPCM"),
377  .p.type = AVMEDIA_TYPE_AUDIO,
378  .p.id = AV_CODEC_ID_ADPCM_G722,
379  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME |
381  .priv_data_size = sizeof(G722Context),
383  .close = g722_encode_close,
385  .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
386  .p.ch_layouts = (const AVChannelLayout[]){
388  },
389  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
390 };
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1083
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PREV_SAMPLES_BUF_SIZE
#define PREV_SAMPLES_BUF_SIZE
Definition: g722.h:32
TrellisNode::path
int path
Definition: adpcmenc.c:63
out_size
int out_size
Definition: movenc.c:56
MIN_TRELLIS
#define MIN_TRELLIS
Definition: g722enc.c:47
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVPacket::data
uint8_t * data
Definition: packet.h:539
encode.h
FFCodec
Definition: codec_internal.h:127
AV_CODEC_ID_ADPCM_G722
@ AV_CODEC_ID_ADPCM_G722
Definition: codec_id.h:401
ff_adpcm_g722_encoder
const FFCodec ff_adpcm_g722_encoder
Definition: g722enc.c:371
encode_high
static int encode_high(const struct G722Band *state, int xhigh)
Definition: g722enc.c:146
filter_samples
static void filter_samples(G722Context *c, const int16_t *samples, int *xlow, int *xhigh)
Definition: g722enc.c:129
g722_encode_init
static av_cold int g722_encode_init(AVCodecContext *avctx)
Definition: g722enc.c:62
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
g722_encode_no_trellis
static void g722_encode_no_trellis(G722Context *c, uint8_t *dst, int nb_samples, const int16_t *samples)
Definition: g722enc.c:332
ff_g722_low_inv_quant6
const int16_t ff_g722_low_inv_quant6[64]
Definition: g722.c:63
AVCodecContext::initial_padding
int initial_padding
Audio only.
Definition: avcodec.h:1128
TrellisNode
Definition: adpcmenc.c:61
FF_CODEC_ENCODE_CB
#define FF_CODEC_ENCODE_CB(func)
Definition: codec_internal.h:320
avassert.h
TrellisNode::ssd
uint32_t ssd
Definition: adpcmenc.c:62
av_cold
#define av_cold
Definition: attributes.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_g722_high_inv_quant
const int16_t ff_g722_high_inv_quant[4]
Definition: g722.c:51
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:159
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
TrellisPath
Definition: aaccoder.c:411
G722Context
Definition: g722.h:34
av_clip_int16
#define av_clip_int16
Definition: common.h:115
av_clip_intp2
#define av_clip_intp2
Definition: common.h:121
ff_g722_update_high_predictor
void ff_g722_update_high_predictor(struct G722Band *band, const int dhigh, const int ihigh)
Definition: g722.c:154
g722_encode_trellis
static void g722_encode_trellis(G722Context *c, int trellis, uint8_t *dst, int nb_samples, const int16_t *samples)
Definition: g722enc.c:168
state
static struct @466 state
ff_samples_to_time_base
static av_always_inline int64_t ff_samples_to_time_base(const AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
Definition: encode.h:90
g722_encode_close
static av_cold int g722_encode_close(AVCodecContext *avctx)
Definition: g722enc.c:50
FREEZE_INTERVAL
#define FREEZE_INTERVAL
Definition: g722enc.c:39
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1337
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:317
codec_internal.h
MAX_FRAME_SIZE
#define MAX_FRAME_SIZE
Definition: g722enc.c:43
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
low_quant
static const int16_t low_quant[33]
Definition: g722enc.c:122
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
encode_low
static int encode_low(const struct G722Band *state, int xlow)
Definition: g722enc.c:154
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2594
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
g722.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
common.h
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
av_always_inline
#define av_always_inline
Definition: attributes.h:49
TrellisPath::prev
int prev
Definition: aaccoder.c:413
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:414
AVCodecContext
main external API structure.
Definition: avcodec.h:451
channel_layout.h
STORE_NODE
#define STORE_NODE(index, UPDATE, VALUE)
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:106
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
mem.h
g722_encode_frame
static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: g722enc.c:341
MAX_TRELLIS
#define MAX_TRELLIS
Definition: g722enc.c:48
AV_CHANNEL_LAYOUT_MONO
#define AV_CHANNEL_LAYOUT_MONO
Definition: channel_layout.h:392
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
ff_g722_update_low_predictor
void ff_g722_update_low_predictor(struct G722Band *band, const int ilow)
Definition: g722.c:143
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_g722dsp_init
av_cold void ff_g722dsp_init(G722DSPContext *c)
Definition: g722dsp.c:68
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:81
encode_byte
static av_always_inline void encode_byte(G722Context *c, uint8_t *dst, const int16_t *samples)
Definition: g722enc.c:319