Go to the documentation of this file.
29 #include "config_components.h"
50 for (ch = 0; ch <
s->channels; ch++) {
52 sizeof(**
s->planar_samples))))
68 for (ch = 0; ch <
s->channels; ch++) {
70 memcpy(&
s->planar_samples[ch][0], &
s->planar_samples[ch][
AC3_BLOCK_SIZE *
s->num_blocks],
90 for (ch = 0; ch <
s->channels; ch++) {
95 s->fdsp->vector_fmul(
s->windowed_samples, input_samples,
101 s->tx_fn(
s->tx,
block->mdct_coef[ch+1],
102 s->windowed_samples,
sizeof(
float));
121 int cpl_start, num_cpl_coefs;
125 memset(fixed_cpl_coords, 0,
AC3_MAX_BLOCKS *
sizeof(*cpl_coords));
130 cpl_start =
s->start_freq[
CPL_CH] - 1;
131 num_cpl_coefs =
FFALIGN(
s->num_cpl_subbands * 12 + 1, 32);
132 cpl_start =
FFMIN(256, cpl_start + num_cpl_coefs) - num_cpl_coefs;
138 if (!
block->cpl_in_use)
140 memset(cpl_coef, 0, num_cpl_coefs *
sizeof(*cpl_coef));
141 for (ch = 1; ch <=
s->fbw_channels; ch++) {
143 if (!
block->channel_in_cpl[ch])
145 for (
i = 0;
i < num_cpl_coefs;
i++)
146 cpl_coef[
i] += ch_coef[
i];
158 int band_size =
s->cpl_band_sizes[bnd];
159 for (ch =
CPL_CH; ch <=
s->fbw_channels; ch++) {
164 for (j = 0; j < band_size; j++) {
177 if (!
block->cpl_in_use)
179 for (ch = 1; ch <=
s->fbw_channels; ch++) {
180 if (!
block->channel_in_cpl[ch])
182 for (bnd = 0; bnd <
s->num_cpl_bands; bnd++) {
194 memset(
block->new_cpl_coords, 0,
sizeof(
block->new_cpl_coords));
196 if (
block->cpl_in_use) {
203 for (ch = 1; ch <=
s->fbw_channels; ch++)
204 block->new_cpl_coords[ch] = 1;
206 for (ch = 1; ch <=
s->fbw_channels; ch++) {
207 if (!
block->channel_in_cpl[ch])
210 block->new_cpl_coords[ch] = 1;
213 for (bnd = 0; bnd <
s->num_cpl_bands; bnd++) {
214 coord_diff +=
FFABS(cpl_coords[
blk-1][ch][bnd] -
215 cpl_coords[
blk ][ch][bnd]);
217 coord_diff /=
s->num_cpl_bands;
219 block->new_cpl_coords[ch] = 1;
228 for (bnd = 0; bnd <
s->num_cpl_bands; bnd++) {
234 if (!
block->cpl_in_use) {
239 for (ch = 1; ch <=
s->fbw_channels; ch++) {
241 if (!
block->channel_in_cpl[ch])
244 energy_ch = energy[
blk][ch][bnd];
246 while (blk1 < s->
num_blocks && !
s->blocks[blk1].new_cpl_coords[ch]) {
247 if (
s->blocks[blk1].cpl_in_use) {
248 energy_cpl += energy[blk1][
CPL_CH][bnd];
249 energy_ch += energy[blk1][ch][bnd];
262 if (!
block->cpl_in_use)
266 s->ac3dsp.float_to_fixed24(fixed_cpl_coords[
blk][1],
268 s->fbw_channels * 16);
270 s->ac3dsp.extract_exponents(
block->cpl_coord_exp[1],
271 fixed_cpl_coords[
blk][1],
272 s->fbw_channels * 16);
274 for (ch = 1; ch <=
s->fbw_channels; ch++) {
275 int bnd, min_exp, max_exp, master_exp;
277 if (!
block->new_cpl_coords[ch])
281 min_exp = max_exp =
block->cpl_coord_exp[ch][0];
282 for (bnd = 1; bnd <
s->num_cpl_bands; bnd++) {
283 int exp =
block->cpl_coord_exp[ch][bnd];
287 master_exp = ((max_exp - 15) + 2) / 3;
288 master_exp =
FFMAX(master_exp, 0);
289 while (min_exp < master_exp * 3)
291 for (bnd = 0; bnd <
s->num_cpl_bands; bnd++) {
293 master_exp * 3, 0, 15);
295 block->cpl_master_exp[ch] = master_exp;
298 for (bnd = 0; bnd <
s->num_cpl_bands; bnd++) {
299 int cpl_exp =
block->cpl_coord_exp[ch][bnd];
300 int cpl_mant = (fixed_cpl_coords[
blk][ch][bnd] << (5 + cpl_exp + master_exp * 3)) >> 24;
306 block->cpl_coord_mant[ch][bnd] = cpl_mant;
330 block->new_rematrixing_strategy = !
blk;
332 block->num_rematrixing_bands = 4;
333 if (
block->cpl_in_use) {
334 block->num_rematrixing_bands -= (
s->start_freq[
CPL_CH] <= 61);
335 block->num_rematrixing_bands -= (
s->start_freq[
CPL_CH] == 37);
337 block->new_rematrixing_strategy = 1;
341 if (!
s->rematrixing_enabled) {
346 for (bnd = 0; bnd <
block->num_rematrixing_bands; bnd++) {
352 block->mdct_coef[2] + start, end - start);
355 if (
FFMIN(sum[2], sum[3]) <
FFMIN(sum[0], sum[1]))
356 block->rematrixing_flags[bnd] = 1;
358 block->rematrixing_flags[bnd] = 0;
363 block->new_rematrixing_strategy = 1;
377 if (
s->options.allow_per_frame_metadata) {
390 s->cpl_on =
s->cpl_enabled;
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static int nb_coefs(int length, int level, uint64_t sn)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_ac3_compute_coupling_strategy(AC3EncodeContext *s)
Set the initial coupling strategy parameters prior to coupling analysis.
static void copy_input_samples(AC3EncodeContext *s, SampleType **samples)
uint8_t channel_in_cpl[AC3_MAX_CHANNELS]
channel in coupling (chincpl)
This structure describes decoded (raw) audio or video data.
int AC3_NAME() encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
int ff_ac3_validate_metadata(AC3EncodeContext *s)
Validate metadata options as set by AVOption system.
static int allocate_sample_buffers(AC3EncodeContext *s)
void ff_eac3_set_cpl_states(AC3EncodeContext *s)
Set coupling states.
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
#define LOCAL_ALIGNED_16(t, v,...)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
AC-3 encoder private context.
#define AC3_MAX_CHANNELS
maximum number of channels, including coupling channel
Data for a single audio block.
void ff_ac3_adjust_frame_size(AC3EncodeContext *s)
Adjust the frame size to make the average bit rate match the target bit rate.
static void scale_coefficients(AC3EncodeContext *s)
static void clip_coefficients(AudioDSPContext *adsp, int32_t *coef, unsigned int len)
static void apply_channel_coupling(AC3EncodeContext *s)
static CoefType calc_cpl_coord(CoefSumType energy_ch, CoefSumType energy_cpl)
const uint8_t ff_ac3_rematrix_band_tab[5]
Table of bin locations for rematrixing bands reference: Section 7.5.2 Rematrixing : Frequency Band De...
#define CPL_CH
coupling channel index
#define i(width, name, range_min, range_max)
uint8_t rematrixing_flags[4]
rematrixing flags
static void compute_rematrixing_strategy(AC3EncodeContext *s)
int num_blocks
number of blocks per frame
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void sum_square_butterfly(AC3EncodeContext *s, int64_t sum[4], const int32_t *coef0, const int32_t *coef1, int len)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int cpl_end_freq
coupling channel end frequency bin
int num_rematrixing_bands
number of rematrixing bands
main external API structure.
static void apply_mdct(AC3EncodeContext *s)
Filter the word “frame” indicates either a video frame or a group of audio samples
#define MAC_COEF(d, a, b)
This structure stores compressed data.
AVCodecContext * avctx
parent AVCodecContext
int ff_ac3_encode_frame_common_end(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
The exact code depends on how similar the blocks are and how related they are to the block
int cpl_in_use
coupling in use for this block (cplinu)
#define NEW_CPL_COORD_THRESHOLD