FFmpeg
aptxenc.c
Go to the documentation of this file.
1 /*
2  * Audio Processing Technology codec for Bluetooth (aptX)
3  *
4  * Copyright (C) 2017 Aurelien Jacobs <aurel@gnuage.org>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "aptx.h"
24 #include "encode.h"
25 
26 /*
27  * Half-band QMF analysis filter realized with a polyphase FIR filter.
28  * Split into 2 subbands and downsample by 2.
29  * So for each pair of samples that goes in, one sample goes out,
30  * split into 2 separate subbands.
31  */
34  const int32_t coeffs[NB_FILTERS][FILTER_TAPS],
35  int shift,
37  int32_t *low_subband_output,
38  int32_t *high_subband_output)
39 {
41  int i;
42 
43  for (i = 0; i < NB_FILTERS; i++) {
45  subbands[i] = aptx_qmf_convolution(&signal[i], coeffs[i], shift);
46  }
47 
48  *low_subband_output = av_clip_intp2(subbands[0] + subbands[1], 23);
49  *high_subband_output = av_clip_intp2(subbands[0] - subbands[1], 23);
50 }
51 
52 /*
53  * Two stage QMF analysis tree.
54  * Split 4 input samples into 4 subbands and downsample by 4.
55  * So for each group of 4 samples that goes in, one sample goes out,
56  * split into 4 separate subbands.
57  */
59  int32_t samples[4],
60  int32_t subband_samples[4])
61 {
62  int32_t intermediate_samples[4];
63  int i;
64 
65  /* Split 4 input samples into 2 intermediate subbands downsampled to 2 samples */
66  for (i = 0; i < 2; i++)
69  &samples[2*i],
70  &intermediate_samples[0+i],
71  &intermediate_samples[2+i]);
72 
73  /* Split 2 intermediate subband samples into 4 final subbands downsampled to 1 sample */
74  for (i = 0; i < 2; i++)
77  &intermediate_samples[2*i],
78  &subband_samples[2*i+0],
79  &subband_samples[2*i+1]);
80 }
81 
84  const int32_t *intervals, int32_t nb_intervals)
85 {
86  int32_t idx = 0;
87  int i;
88 
89  for (i = nb_intervals >> 1; i > 0; i >>= 1)
90  if (MUL64(factor, intervals[idx + i]) <= ((int64_t)value << 24))
91  idx += i;
92 
93  return idx;
94 }
95 
97  int32_t sample_difference,
99  int32_t quantization_factor,
101 {
102  const int32_t *intervals = tables->quantize_intervals;
103  int32_t quantized_sample, dithered_sample, parity_change;
104  int32_t d, mean, interval, inv, sample_difference_abs;
105  int64_t error;
106 
107  sample_difference_abs = FFABS(sample_difference);
108  sample_difference_abs = FFMIN(sample_difference_abs, (1 << 23) - 1);
109 
110  quantized_sample = aptx_bin_search(sample_difference_abs >> 4,
111  quantization_factor,
112  intervals, tables->tables_size);
113 
114  d = rshift32_clip24(MULH(dither, dither), 7) - (1 << 23);
115  d = rshift64(MUL64(d, tables->quantize_dither_factors[quantized_sample]), 23);
116 
117  intervals += quantized_sample;
118  mean = (intervals[1] + intervals[0]) / 2;
119  interval = (intervals[1] - intervals[0]) * (-(sample_difference < 0) | 1);
120 
121  dithered_sample = rshift64_clip24(MUL64(dither, interval) + ((int64_t)av_clip_intp2(mean + d, 23) << 32), 32);
122  error = ((int64_t)sample_difference_abs << 20) - MUL64(dithered_sample, quantization_factor);
123  quantize->error = FFABS(rshift64(error, 23));
124 
125  parity_change = quantized_sample;
126  if (error < 0)
127  quantized_sample--;
128  else
129  parity_change--;
130 
131  inv = -(sample_difference < 0);
132  quantize->quantized_sample = quantized_sample ^ inv;
133  quantize->quantized_sample_parity_change = parity_change ^ inv;
134 }
135 
137 {
138  int32_t subband_samples[4];
139  int subband;
140  aptx_qmf_tree_analysis(&channel->qmf, samples, subband_samples);
142  for (subband = 0; subband < NB_SUBBANDS; subband++) {
143  int32_t diff = av_clip_intp2(subband_samples[subband] - channel->prediction[subband].predicted_sample, 23);
144  aptx_quantize_difference(&channel->quantize[subband], diff,
145  channel->dither[subband],
146  channel->invert_quantize[subband].quantization_factor,
147  &ff_aptx_quant_tables[hd][subband]);
148  }
149 }
150 
152 {
153  if (aptx_check_parity(channels, idx)) {
154  int i;
155  Channel *c;
156  static const int map[] = { 1, 2, 0, 3 };
157  Quantize *min = &channels[NB_CHANNELS-1].quantize[map[0]];
158  for (c = &channels[NB_CHANNELS-1]; c >= channels; c--)
159  for (i = 0; i < NB_SUBBANDS; i++)
160  if (c->quantize[map[i]].error < min->error)
161  min = &c->quantize[map[i]];
162 
163  /* Forcing the desired parity is done by offsetting by 1 the quantized
164  * sample from the subband featuring the smallest quantization error. */
165  min->quantized_sample = min->quantized_sample_parity_change;
166  }
167 }
168 
170 {
172  return (((channel->quantize[3].quantized_sample & 0x06) | parity) << 13)
173  | (((channel->quantize[2].quantized_sample & 0x03) ) << 11)
174  | (((channel->quantize[1].quantized_sample & 0x0F) ) << 7)
175  | (((channel->quantize[0].quantized_sample & 0x7F) ) << 0);
176 }
177 
179 {
181  return (((channel->quantize[3].quantized_sample & 0x01E) | parity) << 19)
182  | (((channel->quantize[2].quantized_sample & 0x00F) ) << 15)
183  | (((channel->quantize[1].quantized_sample & 0x03F) ) << 9)
184  | (((channel->quantize[0].quantized_sample & 0x1FF) ) << 0);
185 }
186 
189  uint8_t *output)
190 {
191  int channel;
192  for (channel = 0; channel < NB_CHANNELS; channel++)
193  aptx_encode_channel(&ctx->channels[channel], samples[channel], ctx->hd);
194 
195  aptx_insert_sync(ctx->channels, &ctx->sync_idx);
196 
197  for (channel = 0; channel < NB_CHANNELS; channel++) {
199  if (ctx->hd)
200  AV_WB24(output + 3*channel,
201  aptxhd_pack_codeword(&ctx->channels[channel]));
202  else
203  AV_WB16(output + 2*channel,
204  aptx_pack_codeword(&ctx->channels[channel]));
205  }
206 }
207 
208 static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
209  const AVFrame *frame, int *got_packet_ptr)
210 {
211  AptXContext *s = avctx->priv_data;
212  int pos, ipos, channel, sample, output_size, ret;
213 
214  if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
215  return ret;
216 
217  output_size = s->block_size * frame->nb_samples/4;
218  if ((ret = ff_get_encode_buffer(avctx, avpkt, output_size, 0)) < 0)
219  return ret;
220 
221  for (pos = 0, ipos = 0; pos < output_size; pos += s->block_size, ipos += 4) {
223 
224  for (channel = 0; channel < NB_CHANNELS; channel++)
225  for (sample = 0; sample < 4; sample++)
226  samples[channel][sample] = (int32_t)AV_RN32A(&frame->data[channel][4*(ipos+sample)]) >> 8;
227 
228  aptx_encode_samples(s, samples, avpkt->data + pos);
229  }
230 
231  ff_af_queue_remove(&s->afq, frame->nb_samples, &avpkt->pts, &avpkt->duration);
232  *got_packet_ptr = 1;
233  return 0;
234 }
235 
237 {
238  AptXContext *s = avctx->priv_data;
239  ff_af_queue_close(&s->afq);
240  return 0;
241 }
242 
243 #if CONFIG_APTX_ENCODER
244 const AVCodec ff_aptx_encoder = {
245  .name = "aptx",
246  .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
247  .type = AVMEDIA_TYPE_AUDIO,
248  .id = AV_CODEC_ID_APTX,
250  .priv_data_size = sizeof(AptXContext),
251  .init = ff_aptx_init,
252  .encode2 = aptx_encode_frame,
253  .close = aptx_close,
254  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
255  .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
256  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
258  .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
259 };
260 #endif
261 
262 #if CONFIG_APTX_HD_ENCODER
263 const AVCodec ff_aptx_hd_encoder = {
264  .name = "aptx_hd",
265  .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
266  .type = AVMEDIA_TYPE_AUDIO,
267  .id = AV_CODEC_ID_APTX_HD,
269  .priv_data_size = sizeof(AptXContext),
270  .init = ff_aptx_init,
271  .encode2 = aptx_encode_frame,
272  .close = aptx_close,
273  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
274  .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
275  .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
277  .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
278 };
279 #endif
ff_aptx_quant_tables
ConstTables ff_aptx_quant_tables[2][NB_SUBBANDS]
Definition: aptx.c:312
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:30
AVCodec
AVCodec.
Definition: codec.h:197
Channel
Definition: aptx.h:83
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
FILTER_TAPS
#define FILTER_TAPS
Definition: aptx.h:48
ff_af_queue_remove
void ff_af_queue_remove(AudioFrameQueue *afq, int nb_samples, int64_t *pts, int64_t *duration)
Remove frame(s) from the queue.
Definition: audio_frame_queue.c:75
aptx_quantized_parity
static int32_t aptx_quantized_parity(Channel *channel)
Definition: aptx.h:191
ff_af_queue_close
void ff_af_queue_close(AudioFrameQueue *afq)
Close AudioFrameQueue.
Definition: audio_frame_queue.c:36
aptx_encode_frame
static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Definition: aptxenc.c:208
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
QMFAnalysis
Definition: aptx.h:55
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVPacket::data
uint8_t * data
Definition: packet.h:365
encode.h
ff_aptx_generate_dither
void ff_aptx_generate_dither(Channel *channel)
Definition: aptx.c:384
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:383
subbands
subbands
Definition: aptx.h:39
AptXContext
Definition: aptx.h:94
QMFAnalysis::inner_filter_signal
FilterSignal inner_filter_signal[NB_FILTERS][NB_FILTERS]
Definition: aptx.h:57
aptx_qmf_tree_analysis
static void aptx_qmf_tree_analysis(QMFAnalysis *qmf, int32_t samples[4], int32_t subband_samples[4])
Definition: aptxenc.c:58
NB_FILTERS
@ NB_FILTERS
Definition: vf_waveform.c:49
quantize
static int quantize(CinepakEncContext *s, int h, uint8_t *data[4], int linesize[4], int v1mode, strip_info *info, mb_encoding encoding)
Definition: cinepakenc.c:698
MULH
#define MULH
Definition: mathops.h:42
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
ff_af_queue_add
int ff_af_queue_add(AudioFrameQueue *afq, const AVFrame *f)
Add a frame to the queue.
Definition: audio_frame_queue.c:44
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:91
aptx_pack_codeword
static uint16_t aptx_pack_codeword(Channel *channel)
Definition: aptxenc.c:169
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:257
ConstTables
Definition: aptx.h:102
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
NB_CHANNELS
@ NB_CHANNELS
Definition: aptx.h:36
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
aptxhd_pack_codeword
static uint32_t aptxhd_pack_codeword(Channel *channel)
Definition: aptxenc.c:178
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
QMFAnalysis::outer_filter_signal
FilterSignal outer_filter_signal[NB_FILTERS]
Definition: aptx.h:56
aptx_qmf_convolution
static av_always_inline int32_t aptx_qmf_convolution(FilterSignal *signal, const int32_t coeffs[FILTER_TAPS], int shift)
Definition: aptx.h:177
aptx_qmf_polyphase_analysis
static av_always_inline void aptx_qmf_polyphase_analysis(FilterSignal signal[NB_FILTERS], const int32_t coeffs[NB_FILTERS][FILTER_TAPS], int shift, int32_t samples[NB_FILTERS], int32_t *low_subband_output, int32_t *high_subband_output)
Definition: aptxenc.c:33
av_clip_intp2
#define av_clip_intp2
Definition: common.h:143
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
aptx_qmf_outer_coeffs
static const int32_t aptx_qmf_outer_coeffs[NB_FILTERS][FILTER_TAPS]
Definition: aptx.h:135
FilterSignal
Definition: aptx.h:50
aptx_encode_samples
static void aptx_encode_samples(AptXContext *ctx, int32_t samples[NB_CHANNELS][4], uint8_t *output)
Definition: aptxenc.c:187
aptx.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
aptx_qmf_inner_coeffs
static const int32_t aptx_qmf_inner_coeffs[NB_FILTERS][FILTER_TAPS]
Definition: aptx.h:150
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
parity
mcdeint parity
Definition: vf_mcdeint.c:277
AV_WB24
#define AV_WB24(p, d)
Definition: intreadwrite.h:450
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
aptx_quantize_difference
static void aptx_quantize_difference(Quantize *quantize, int32_t sample_difference, int32_t dither, int32_t quantization_factor, ConstTables *tables)
Definition: aptxenc.c:96
i
int i
Definition: input.c:407
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:358
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
aptx_qmf_filter_signal_push
static av_always_inline void aptx_qmf_filter_signal_push(FilterSignal *signal, int32_t sample)
Definition: aptx.h:165
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
ff_aptx_hd_encoder
const AVCodec ff_aptx_hd_encoder
AV_RN32A
#define AV_RN32A(p)
Definition: intreadwrite.h:526
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
ff_aptx_init
av_cold int ff_aptx_init(AVCodecContext *avctx)
Definition: aptx.c:507
AVCodecContext
main external API structure.
Definition: avcodec.h:501
ff_aptx_invert_quantize_and_prediction
void ff_aptx_invert_quantize_and_prediction(Channel *channel, int hd)
Definition: aptx.c:496
aptx_bin_search
static av_always_inline int32_t aptx_bin_search(int32_t value, int32_t factor, const int32_t *intervals, int32_t nb_intervals)
Definition: aptxenc.c:83
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:81
MUL64
#define MUL64(a, b)
Definition: mathops.h:54
ff_aptx_encoder
const AVCodec ff_aptx_encoder
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:864
factor
static const int factor[16]
Definition: vf_pp7.c:77
shift
static int shift(int a, int b)
Definition: sonic.c:82
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:528
AVPacket
This structure stores compressed data.
Definition: packet.h:342
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:114
d
d
Definition: ffmpeg_filter.c:156
int32_t
int32_t
Definition: audioconvert.c:56
aptx_encode_channel
static void aptx_encode_channel(Channel *channel, int32_t samples[4], int hd)
Definition: aptxenc.c:136
aptx_close
static av_cold int aptx_close(AVCodecContext *avctx)
Definition: aptxenc.c:236
AV_CODEC_ID_APTX
@ AV_CODEC_ID_APTX
Definition: codec_id.h:505
NB_SUBBANDS
@ NB_SUBBANDS
Definition: aptx.h:44
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:82
AV_CODEC_ID_APTX_HD
@ AV_CODEC_ID_APTX_HD
Definition: codec_id.h:506
channel
channel
Definition: ebur128.h:39
Quantize
Definition: aptx.h:60
aptx_insert_sync
static void aptx_insert_sync(Channel channels[NB_CHANNELS], int32_t *idx)
Definition: aptxenc.c:151
aptx_check_parity
static int aptx_check_parity(Channel channels[NB_CHANNELS], int32_t *idx)
Definition: aptx.h:204
min
float min
Definition: vorbis_enc_data.h:456
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:59