Go to the documentation of this file.
40 #define BITSTREAM_WRITER_LE
141 #define MAX_CHANNELS 2
142 #define MAX_CODEBOOK_DIM 8
144 #define MAX_FLOOR_CLASS_DIM 4
145 #define NUM_FLOOR_PARTITIONS 8
146 #define MAX_FLOOR_VALUES (MAX_FLOOR_CLASS_DIM*NUM_FLOOR_PARTITIONS+2)
148 #define RESIDUE_SIZE 1600
149 #define RESIDUE_PART_SIZE 32
150 #define NUM_RESIDUE_PARTITIONS (RESIDUE_SIZE/RESIDUE_PART_SIZE)
169 return dimensions *entries;
185 if (!
cb->dimensions || !
cb->pow2)
187 for (
i = 0;
i <
cb->nentries;
i++) {
191 for (j = 0; j <
cb->ndimensions; j++) {
194 off = (
i / div) % vals;
196 off =
i *
cb->ndimensions + j;
198 cb->dimensions[
i *
cb->ndimensions + j] = last +
cb->min +
cb->quantlist[off] *
cb->delta;
200 last =
cb->dimensions[
i *
cb->ndimensions + j];
201 cb->pow2[
i] +=
cb->dimensions[
i *
cb->ndimensions + j] *
cb->dimensions[
i *
cb->ndimensions + j];
220 for (j = 0; j < 8; j++)
221 if (rc->
books[
i][j] != -1)
226 assert(
cb->ndimensions >= 2);
229 for (j = 0; j <
cb->nentries; j++) {
233 a = fabs(
cb->dimensions[j *
cb->ndimensions]);
236 a = fabs(
cb->dimensions[j *
cb->ndimensions + 1]);
289 for (book = 0; book < venc->
ncodebooks; book++) {
301 if (!
cb->lens || !
cb->codewords)
311 for (
i = 0;
i < vals;
i++)
328 fc->partition_to_class =
av_malloc(
sizeof(
int) *
fc->partitions);
329 if (!
fc->partition_to_class)
332 for (
i = 0;
i <
fc->partitions;
i++) {
333 static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4};
334 fc->partition_to_class[
i] =
a[
i];
335 fc->nclasses =
FFMAX(
fc->nclasses,
fc->partition_to_class[
i]);
341 for (
i = 0;
i <
fc->nclasses;
i++) {
347 books = (1 <<
c->subclass);
351 for (j = 0; j < books; j++)
358 for (
i = 0;
i <
fc->partitions;
i++)
359 fc->values +=
fc->classes[
fc->partition_to_class[
i]].dim;
365 fc->list[1].x = 1 <<
fc->rangebits;
366 for (
i = 2;
i <
fc->values;
i++) {
367 static const int a[] = {
368 93, 23,372, 6, 46,186,750, 14, 33, 65,
369 130,260,556, 3, 10, 18, 28, 39, 55, 79,
370 111,158,220,312,464,650,850
372 fc->list[
i].x =
a[
i - 2];
394 static const int8_t
a[10][8] = {
395 { -1, -1, -1, -1, -1, -1, -1, -1, },
396 { -1, -1, 16, -1, -1, -1, -1, -1, },
397 { -1, -1, 17, -1, -1, -1, -1, -1, },
398 { -1, -1, 18, -1, -1, -1, -1, -1, },
399 { -1, -1, 19, -1, -1, -1, -1, -1, },
400 { -1, -1, 20, -1, -1, -1, -1, -1, },
401 { -1, -1, 21, -1, -1, -1, -1, -1, },
402 { 22, 23, -1, -1, -1, -1, -1, -1, },
403 { 24, 25, -1, -1, -1, -1, -1, -1, },
404 { 26, 27, 28, -1, -1, -1, -1, -1, },
406 memcpy(rc->
books,
a,
sizeof a);
426 if (!
mc->floor || !
mc->residue)
428 for (
i = 0;
i <
mc->submaps;
i++) {
432 mc->coupling_steps = venc->
channels == 2 ? 1 : 0;
435 if (!
mc->magnitude || !
mc->angle)
437 if (
mc->coupling_steps) {
438 mc->magnitude[0] = 0;
474 mant = (
int)ldexp(frexp(
f, &
exp), 20);
480 res |= mant | (
exp << 21);
493 for (
i = 1;
i <
cb->nentries;
i++)
494 if (
cb->lens[
i] <
cb->lens[
i-1])
496 if (
i ==
cb->nentries)
501 int len =
cb->lens[0];
504 while (i < cb->nentries) {
506 for (j = 0; j+
i <
cb->nentries; j++)
515 for (
i = 0;
i <
cb->nentries;
i++)
518 if (
i !=
cb->nentries)
522 for (
i = 0;
i <
cb->nentries;
i++) {
557 for (
i = 0;
i <
fc->partitions;
i++)
560 for (
i = 0;
i <
fc->nclasses;
i++) {
566 if (
fc->classes[
i].subclass)
569 books = (1 <<
fc->classes[
i].subclass);
571 for (j = 0; j < books; j++)
578 for (
i = 2;
i <
fc->values;
i++)
596 for (j = 0; j < 8; j++)
608 for (j = 0; j < 8; j++)
609 if (rc->
books[
i][j] != -1)
619 int buffer_len = 50000;
627 for (
i = 0;
"vorbis"[
i];
i++)
641 buffer_len -= hlens[0];
647 for (
i = 0;
"vorbis"[
i];
i++)
655 buffer_len -= hlens[1];
661 for (
i = 0;
"vorbis"[
i];
i++)
695 if (
mc->coupling_steps) {
697 for (j = 0; j <
mc->coupling_steps; j++) {
706 for (j = 0; j < venc->
channels; j++)
709 for (j = 0; j <
mc->submaps; j++) {
730 len = hlens[0] + hlens[1] + hlens[2];
739 for (
i = 0;
i < 3;
i++) {
740 memcpy(p,
buffer + buffer_len, hlens[
i]);
742 buffer_len += hlens[
i];
751 int begin =
fc->list[
fc->list[
FFMAX(
i-1, 0)].sort].x;
756 for (j = begin; j <
end; j++)
757 average += fabs(coeffs[j]);
758 return average / (
end - begin);
762 float *coeffs, uint16_t *posts,
int samples)
764 int range = 255 /
fc->multiplier + 1;
766 float tot_average = 0.0;
768 for (
i = 0;
i <
fc->values;
i++) {
770 tot_average += averages[
i];
772 tot_average /=
fc->values;
775 for (
i = 0;
i <
fc->values;
i++) {
776 int position =
fc->list[
fc->list[
i].sort].x;
777 float average = averages[
i];
780 average = sqrt(tot_average * average) * pow(1.25
f, position*0.005
f);
781 for (j = 0; j < range - 1; j++)
784 posts[
fc->list[
i].sort] = j;
790 return y0 + (x - x0) * (y1 - y0) / (x1 - x0);
797 int range = 255 /
fc->multiplier + 1;
806 coded[0] = coded[1] = 1;
808 for (
i = 2;
i <
fc->values;
i++) {
810 posts[
fc->list[
i].low],
811 fc->list[
fc->list[
i].high].x,
812 posts[
fc->list[
i].high],
814 int highroom = range - predicted;
815 int lowroom = predicted;
816 int room =
FFMIN(highroom, lowroom);
817 if (predicted == posts[
i]) {
821 if (!coded[
fc->list[
i].low ])
822 coded[
fc->list[
i].low ] = -1;
823 if (!coded[
fc->list[
i].high])
824 coded[
fc->list[
i].high] = -1;
826 if (posts[
i] > predicted) {
827 if (posts[
i] - predicted > room)
828 coded[
i] = posts[
i] - predicted + lowroom;
830 coded[
i] = (posts[
i] - predicted) << 1;
832 if (predicted - posts[
i] > room)
833 coded[
i] = predicted - posts[
i] + highroom - 1;
835 coded[
i] = ((predicted - posts[
i]) << 1) - 1;
840 for (
i = 0;
i <
fc->partitions;
i++) {
842 int k, cval = 0, csub = 1<<
c->subclass;
846 for (k = 0; k <
c->dim; k++) {
848 for (l = 0; l < csub; l++) {
850 if (
c->books[l] != -1)
853 if (coded[counter + k] < maxval)
858 cshift +=
c->subclass;
863 for (k = 0; k <
c->dim; k++) {
864 int book =
c->books[cval & (csub-1)];
865 int entry = coded[counter++];
866 cval >>=
c->subclass;
894 d -= vec[j] * num[j];
909 int pass,
i, j, p, k;
911 int partitions = (rc->
end - rc->
begin) / psize;
918 for (p = 0; p < partitions; p++) {
919 float max1 = 0.0, max2 = 0.0;
920 int s = rc->
begin + p * psize;
921 for (k =
s; k <
s + psize; k += 2) {
922 max1 =
FFMAX(max1, fabs(coeffs[ k / real_ch]));
923 max2 =
FFMAX(max2, fabs(coeffs[
samples + k / real_ch]));
927 if (max1 < rc->maxes[
i][0] && max2 < rc->maxes[
i][1])
934 while (p < partitions) {
939 for (
i = 0;
i < classwords;
i++) {
941 entry += classes[j][p +
i];
946 for (
i = 0;
i < classwords && p < partitions;
i++, p++) {
948 int nbook = rc->
books[classes[j][p]][
pass];
954 assert(rc->
type == 0 || rc->
type == 2);
967 int s = rc->
begin + p * psize,
a1, b1;
975 *
pv++ = coeffs[
a2 + b2];
985 coeffs[
a1 + b1] -= *
pv++;
1003 const float *
win = venc->
win[1];
1045 memset(
f->extended_data[
ch], 0,
bps *
f->nb_samples);
1067 for (sf = 0; sf < subframes; sf++) {
1077 memcpy(save + sf*sf_size,
input,
len);
1089 int i,
ret, need_more;
1108 need_more =
frame && need_more;
1118 for (
i = 0;
i < frames_needed;
i++) {
1149 if (
mode->blockflag) {
1199 *got_packet_ptr = 1;
1273 av_log(avctx,
AV_LOG_ERROR,
"Current FFmpeg Vorbis encoder only supports 2 channels.\n");
int frame_size
Number of samples per channel in an audio frame.
static void put_codebook_header(PutBitContext *pb, vorbis_enc_codebook *cb)
@ AV_SAMPLE_FMT_FLTP
float, planar
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
Encode extradata length to a buffer.
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
uint64_t channel_layout
Audio channel layout.
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
static void put_residue_header(PutBitContext *pb, vorbis_enc_residue *rc)
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int sample_rate
samples per second
static double cb(void *priv, double x, double y)
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
static av_cold int vorbis_encode_init(AVCodecContext *avctx)
static enum AVSampleFormat sample_fmts[]
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
void(* vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len)
Calculate the entry wise product of two vectors of floats, and store the result in a vector of floats...
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
Initialize AudioFrameQueue.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
void(* mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
const float *const ff_vorbis_vwin[8]
void * av_mallocz_array(size_t nmemb, size_t size)
#define fc(width, name, range_min, range_max)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define NUM_FLOOR_PARTITIONS
static const struct @178 floor_classes[]
static void put_floor_header(PutBitContext *pb, vorbis_enc_floor *fc)
unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n)
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
static av_cold int vorbis_encode_close(AVCodecContext *avctx)
static float win(SuperEqualizerContext *s, float n, int N)
vorbis_floor1_entry * list
static float * put_vector(vorbis_enc_codebook *book, PutBitContext *pb, float *num)
static AVFrame * spawn_empty_frame(AVCodecContext *avctx, int channels)
void ff_vorbis_floor1_render_list(vorbis_floor1_entry *list, int values, uint16_t *y_list, int *flag, int multiplier, float *out, int samples)
int flags
AV_CODEC_FLAG_*.
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void put_float(PutBitContext *pb, float f)
#define AV_CODEC_CAP_EXPERIMENTAL
Codec is experimental and is thus avoided in favor of non experimental encoders.
int global_quality
Global quality for codecs which cannot change it per frame.
static int put_main_header(vorbis_enc_context *venc, uint8_t **out)
AVCodec ff_vorbis_encoder
static av_cold int dsp_init(AVCodecContext *avctx, vorbis_enc_context *venc)
static int vorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
vorbis_enc_residue * residues
vorbis_enc_floor_class * classes
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
static float get_floor_average(vorbis_enc_floor *fc, float *coeffs, int i)
static int residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, PutBitContext *pb, float *coeffs, int samples, int real_ch)
void(* vector_fmul_scalar)(float *dst, const float *src, float mul, int len)
Multiply a vector of floats by a scalar float.
int64_t bit_rate
the average bitrate
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
static void floor_fit(vorbis_enc_context *venc, vorbis_enc_floor *fc, float *coeffs, uint16_t *posts, int samples)
static int floor_encode(vorbis_enc_context *venc, vorbis_enc_floor *fc, PutBitContext *pb, uint16_t *posts, float *floor, int samples)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void error(const char *err)
static int put_codeword(PutBitContext *pb, vorbis_enc_codebook *cb, int entry)
int ff_vorbis_ready_floor1_list(AVCodecContext *avctx, vorbis_floor1_entry *list, int values)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
void(* vector_fmul)(float *dst, const float *src0, const float *src1, int len)
Calculate the entry wise product of two vectors of floats and store the result in a vector of floats.
static int render_point(int x0, int y0, int x1, int y1, int x)
enum AVSampleFormat sample_fmt
audio sample format
static int apply_window_and_mdct(vorbis_enc_context *venc)
static void move_audio(vorbis_enc_context *venc, int sf_size)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static int ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
vorbis_enc_floor * floors
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
vorbis_enc_mapping * mappings
int channels
number of audio channels
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
int nb_samples
number of audio samples (per channel) described by this frame
static int ready_codebook(vorbis_enc_codebook *cb)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int put_bits_count(PutBitContext *s)
static int create_vorbis_context(vorbis_enc_context *venc, AVCodecContext *avctx)
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Structure holding the queue.
uint8_t ** extended_data
pointers to the data planes/channels.
#define av_malloc_array(a, b)
unsigned short available
number of available buffers
AVSampleFormat
Audio sample formats.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
@ AV_PKT_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const struct @177 cvectors[]
const float ff_vorbis_floor1_inverse_db_table[256]
#define FF_ARRAY_ELEMS(a)
main external API structure.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Filter the word “frame” indicates either a video frame or a group of audio samples
struct FFBufQueue bufqueue
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
This structure stores compressed data.
vorbis_enc_codebook * codebooks
#define NUM_RESIDUE_PARTITIONS
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
static float distance(float x, float y, int band)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int cb_lookup_vals(int lookup, int dimensions, int entries)