36 #define FREEZE_INTERVAL 128 40 #define MAX_FRAME_SIZE 32768 45 #define MAX_TRELLIS 16 51 for (i = 0; i < 2; i++) {
69 int frontier = 1 << avctx->
trellis;
72 for (i = 0; i < 2; i++) {
96 "allowed. Using %d instead of %d\n", new_frame_size,
112 "allowed. Using %d instead of %d\n", new_trellis,
127 35, 72, 110, 150, 190, 233, 276, 323,
128 370, 422, 473, 530, 587, 650, 714, 786,
129 858, 940, 1023, 1121, 1219, 1339, 1458, 1612,
130 1765, 1980, 2195, 2557, 2919
134 int *xlow,
int *xhigh)
140 *xlow = xout[0] + xout[1] >> 14;
141 *xhigh = xout[0] - xout[1] >> 14;
152 int diff = av_clip_int16(xhigh - state->s_predictor);
153 int pred = 141 * state->scale_factor >> 8;
155 return ((diff ^ (diff >> (
sizeof(diff)*8-1))) < pred) + 2*(diff >= 0);
160 int diff = av_clip_int16(xlow - state->s_predictor);
162 int limit = diff ^ (diff >> (
sizeof(
diff)*8-1));
164 limit = limit + 1 << 10;
165 if (limit >
low_quant[8] * state->scale_factor)
167 while (i < 29 && limit >
low_quant[i] * state->scale_factor)
169 return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i;
177 int frontier = 1 << trellis;
180 int pathn[2] = {0, 0}, froze = -1;
183 for (i = 0; i < 2; i++) {
188 nodes[
i][0]->
ssd = 0;
189 nodes[
i][0]->
path = 0;
190 nodes[
i][0]->state = c->
band[
i];
193 for (i = 0; i < nb_samples >> 1; i++) {
196 int heap_pos[2] = {0, 0};
198 for (j = 0; j < 2; j++) {
199 next[j] = c->
node_buf[j] + frontier*(i & 1);
200 memset(nodes_next[j], 0, frontier *
sizeof(**nodes_next));
205 for (j = 0; j < frontier && nodes[0][j]; j++) {
211 int range = j < frontier/2 ? 4 : 0;
214 int ilow =
encode_low(&cur_node->state, xlow);
216 for (k = ilow - range; k <= ilow + range && k <= 63; k += 4) {
217 int decoded, dec_diff, pos;
224 decoded = av_clip_intp2((cur_node->state.scale_factor *
226 + cur_node->state.s_predictor, 14);
227 dec_diff = xlow - decoded;
229 #define STORE_NODE(index, UPDATE, VALUE)\ 230 ssd = cur_node->ssd + dec_diff*dec_diff;\ 233 if (ssd < cur_node->ssd)\ 235 if (heap_pos[index] < frontier) {\ 236 pos = heap_pos[index]++;\ 237 av_assert2(pathn[index] < FREEZE_INTERVAL * frontier);\ 238 node = nodes_next[index][pos] = next[index]++;\ 239 node->path = pathn[index]++;\ 243 pos = (frontier>>1) + (heap_pos[index] & ((frontier>>1) - 1));\ 244 if (ssd >= nodes_next[index][pos]->ssd)\ 247 node = nodes_next[index][pos];\ 250 node->state = cur_node->state;\ 252 c->paths[index][node->path].value = VALUE;\ 253 c->paths[index][node->path].prev = cur_node->path;\ 257 int parent = (pos - 1) >> 1;\ 258 if (nodes_next[index][parent]->ssd <= ssd)\ 260 FFSWAP(struct TrellisNode*, nodes_next[index][parent],\ 261 nodes_next[index][pos]);\ 268 for (j = 0; j < frontier && nodes[1][j]; j++) {
276 for (ihigh = 0; ihigh < 4; ihigh++) {
277 int dhigh, decoded, dec_diff, pos;
281 dhigh = cur_node->state.scale_factor *
283 decoded = av_clip_intp2(dhigh + cur_node->state.s_predictor, 14);
284 dec_diff = xhigh - decoded;
290 for (j = 0; j < 2; j++) {
293 if (nodes[j][0]->
ssd > (1 << 16)) {
294 for (k = 1; k < frontier && nodes[j][k]; k++)
295 nodes[j][k]->
ssd -= nodes[j][0]->
ssd;
296 nodes[j][0]->ssd = 0;
301 p[0] = &c->
paths[0][nodes[0][0]->path];
302 p[1] = &c->
paths[1][nodes[1][0]->path];
303 for (j = i; j > froze; j--) {
304 dst[j] = p[1]->value << 6 | p[0]->value;
309 pathn[0] = pathn[1] = 0;
310 memset(nodes[0] + 1, 0, (frontier - 1)*
sizeof(**nodes));
311 memset(nodes[1] + 1, 0, (frontier - 1)*
sizeof(**nodes));
317 for (j = i; j > froze; j--) {
318 dst[j] = p[1]->value << 6 | p[0]->value;
322 c->
band[0] = nodes[0][0]->state;
323 c->
band[1] = nodes[1][0]->state;
329 int xlow, xhigh, ilow, ihigh;
336 *dst = ihigh << 6 | ilow;
344 for (i = 0; i < nb_samples; i += 2)
352 const int16_t *
samples = (
const int16_t *)frame->
data[0];
367 if (nb_samples < frame->nb_samples) {
368 int16_t last_samples[2] = { samples[nb_samples], samples[nb_samples] };
struct G722Context::TrellisNode ** nodep_buf[2]
This structure describes decoded (raw) audio or video data.
#define AV_LOG_WARNING
Something somehow does not look correct.
static av_cold int init(AVCodecContext *avctx)
struct G722Context::TrellisPath * paths[2]
static void filter_samples(G722Context *c, const int16_t *samples, int *xlow, int *xhigh)
static void g722_encode_no_trellis(G722Context *c, uint8_t *dst, int nb_samples, const int16_t *samples)
void(* apply_qmf)(const int16_t *prev_samples, int xout[2])
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int encode_high(const struct G722Band *state, int xhigh)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define PREV_SAMPLES_BUF_SIZE
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static av_cold int g722_encode_init(AVCodecContext *avctx)
#define i(width, name, range_min, range_max)
struct G722Context::TrellisNode * node_buf[2]
const int16_t ff_g722_low_inv_quant6[64]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int initial_padding
Audio only.
int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]
memory of past decoded samples
simple assert() macros that are a bit more flexible than ISO C assert().
AVCodec ff_adpcm_g722_encoder
const char * name
Name of the codec implementation.
struct G722Context::G722Band band[2]
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
static void g722_encode_trellis(G722Context *c, int trellis, uint8_t *dst, int nb_samples, const int16_t *samples)
static av_cold int g722_encode_close(AVCodecContext *avctx)
static int encode_low(const struct G722Band *state, int xlow)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
void ff_g722_update_low_predictor(struct G722Band *band, const int ilow)
static void error(const char *err)
static const float pred[4]
int frame_size
Number of samples per channel in an audio frame.
static const int16_t low_quant[33]
Libavcodec external API header.
AVSampleFormat
Audio sample formats.
main external API structure.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
common internal api header.
common internal and external API header
int prev_samples_pos
the number of values in prev_samples
int trellis
trellis RD quantization
#define STORE_NODE(index, UPDATE, VALUE)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
const int16_t ff_g722_high_inv_quant[4]
av_cold void ff_g722dsp_init(G722DSPContext *c)
static av_always_inline void encode_byte(G722Context *c, uint8_t *dst, const int16_t *samples)
static enum AVSampleFormat sample_fmts[]
Filter the word “frame” indicates either a video frame or a group of audio samples
static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
#define FFSWAP(type, a, b)
void ff_g722_update_high_predictor(struct G722Band *band, const int dhigh, const int ihigh)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define AV_CH_LAYOUT_MONO
This structure stores compressed data.
int16_t scale_factor
delayed quantizer scale factor
int nb_samples
number of audio samples (per channel) described by this frame
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
void * av_mallocz_array(size_t nmemb, size_t size)