FFmpeg
af_dialoguenhance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public License
8  * as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
22 #include "libavutil/opt.h"
23 #include "libavutil/tx.h"
24 #include "audio.h"
25 #include "avfilter.h"
26 #include "filters.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "window_func.h"
30 
31 #include <float.h>
32 
34  const AVClass *class;
35 
37 
38  int fft_size;
39  int overlap;
40 
41  float *window;
42  float prev_vad;
43 
51 
54 } AudioDialogueEnhanceContext;
55 
56 #define OFFSET(x) offsetof(AudioDialogueEnhanceContext, x)
57 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
58 
59 static const AVOption dialoguenhance_options[] = {
60  { "original", "set original center factor", OFFSET(original), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
61  { "enhance", "set dialogue enhance factor",OFFSET(enhance), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 3, FLAGS },
62  { "voice", "set voice detection factor", OFFSET(voice), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 2,32, FLAGS },
63  {NULL}
64 };
65 
66 AVFILTER_DEFINE_CLASS(dialoguenhance);
67 
69 {
71  AVFilterChannelLayouts *in_layout = NULL, *out_layout = NULL;
72  int ret;
73 
74  if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLTP )) < 0 ||
75  (ret = ff_set_common_formats (ctx , formats )) < 0 ||
77  (ret = ff_channel_layouts_ref(in_layout, &ctx->inputs[0]->outcfg.channel_layouts)) < 0 ||
79  (ret = ff_channel_layouts_ref(out_layout, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
80  return ret;
81 
83 }
84 
86 {
87  AVFilterContext *ctx = inlink->dst;
88  AudioDialogueEnhanceContext *s = ctx->priv;
89  float scale = 1.f, iscale, overlap;
90  int ret;
91 
92  s->fft_size = inlink->sample_rate > 100000 ? 8192 : inlink->sample_rate > 50000 ? 4096 : 2048;
93  s->overlap = s->fft_size / 4;
94 
95  s->window = av_calloc(s->fft_size, sizeof(*s->window));
96  if (!s->window)
97  return AVERROR(ENOMEM);
98 
99  s->in_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
100  s->center_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
101  s->out_dist_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
102  s->windowed_frame = ff_get_audio_buffer(inlink, s->fft_size * 4);
103  s->windowed_out = ff_get_audio_buffer(inlink, s->fft_size * 4);
104  s->windowed_prev = ff_get_audio_buffer(inlink, s->fft_size * 4);
105  if (!s->in_frame || !s->windowed_out || !s->windowed_prev ||
106  !s->out_dist_frame || !s->windowed_frame || !s->center_frame)
107  return AVERROR(ENOMEM);
108 
109  generate_window_func(s->window, s->fft_size, WFUNC_SINE, &overlap);
110 
111  iscale = 1.f / s->fft_size;
112 
113  ret = av_tx_init(&s->tx_ctx[0], &s->tx_fn, AV_TX_FLOAT_RDFT, 0, s->fft_size, &scale, 0);
114  if (ret < 0)
115  return ret;
116 
117  ret = av_tx_init(&s->tx_ctx[1], &s->tx_fn, AV_TX_FLOAT_RDFT, 0, s->fft_size, &scale, 0);
118  if (ret < 0)
119  return ret;
120 
121  ret = av_tx_init(&s->itx_ctx, &s->itx_fn, AV_TX_FLOAT_RDFT, 1, s->fft_size, &iscale, 0);
122  if (ret < 0)
123  return ret;
124 
125  return 0;
126 }
127 
128 static void apply_window(AudioDialogueEnhanceContext *s,
129  const float *in_frame, float *out_frame, const int add_to_out_frame)
130 {
131  const float *window = s->window;
132 
133  if (add_to_out_frame) {
134  for (int i = 0; i < s->fft_size; i++)
135  out_frame[i] += in_frame[i] * window[i];
136  } else {
137  for (int i = 0; i < s->fft_size; i++)
138  out_frame[i] = in_frame[i] * window[i];
139  }
140 }
141 
142 static float sqrf(float x)
143 {
144  return x * x;
145 }
146 
148  AVComplexFloat *center, int N)
149 {
150  for (int i = 0; i < N; i++) {
151  const float l_re = left[i].re;
152  const float l_im = left[i].im;
153  const float r_re = right[i].re;
154  const float r_im = right[i].im;
155  const float a = 0.5f * (1.f - sqrtf((sqrf(l_re - r_re) + sqrf(l_im - r_im))/
156  (sqrf(l_re + r_re) + sqrf(l_im + r_im) + FLT_EPSILON)));
157 
158  center[i].re = a * (l_re + r_re);
159  center[i].im = a * (l_im + r_im);
160  }
161 }
162 
163 static float flux(float *curf, float *prevf, int N)
164 {
165  AVComplexFloat *cur = (AVComplexFloat *)curf;
166  AVComplexFloat *prev = (AVComplexFloat *)prevf;
167  float sum = 0.f;
168 
169  for (int i = 0; i < N; i++) {
170  float c_re = cur[i].re;
171  float c_im = cur[i].im;
172  float p_re = prev[i].re;
173  float p_im = prev[i].im;
174 
175  sum += sqrf(hypotf(c_re, c_im) - hypotf(p_re, p_im));
176  }
177 
178  return sum;
179 }
180 
181 static float fluxlr(float *lf, float *lpf,
182  float *rf, float *rpf,
183  int N)
184 {
185  AVComplexFloat *l = (AVComplexFloat *)lf;
188  AVComplexFloat *rp = (AVComplexFloat *)rpf;
189  float sum = 0.f;
190 
191  for (int i = 0; i < N; i++) {
192  float c_re = l[i].re - r[i].re;
193  float c_im = l[i].im - r[i].im;
194  float p_re = lp[i].re - rp[i].re;
195  float p_im = lp[i].im - rp[i].im;
196 
197  sum += sqrf(hypotf(c_re, c_im) - hypotf(p_re, p_im));
198  }
199 
200  return sum;
201 }
202 
203 static float calc_vad(float fc, float flr, float a)
204 {
205  const float vad = a * (fc / (fc + flr) - 0.5f);
206 
207  return av_clipf(vad, 0.f, 1.f);
208 }
209 
210 static void get_final(float *c, float *l,
211  float *r, float vad, int N,
212  float original, float enhance)
213 {
214  AVComplexFloat *center = (AVComplexFloat *)c;
216  AVComplexFloat *right = (AVComplexFloat *)r;
217 
218  for (int i = 0; i < N; i++) {
219  float cP = sqrf(center[i].re) + sqrf(center[i].im);
220  float lrP = sqrf(left[i].re - right[i].re) + sqrf(left[i].im - right[i].im);
221  float G = cP / (cP + lrP + FLT_EPSILON);
222  float re, im;
223 
224  re = center[i].re * (original + vad * G * enhance);
225  im = center[i].im * (original + vad * G * enhance);
226 
227  center[i].re = re;
228  center[i].im = im;
229  }
230 }
231 
233 {
234  AudioDialogueEnhanceContext *s = ctx->priv;
235  float *center = (float *)s->center_frame->extended_data[0];
236  float *center_prev = (float *)s->center_frame->extended_data[1];
237  float *left_in = (float *)s->in_frame->extended_data[0];
238  float *right_in = (float *)s->in_frame->extended_data[1];
239  float *left_out = (float *)s->out_dist_frame->extended_data[0];
240  float *right_out = (float *)s->out_dist_frame->extended_data[1];
241  float *left_samples = (float *)s->in->extended_data[0];
242  float *right_samples = (float *)s->in->extended_data[1];
243  float *windowed_left = (float *)s->windowed_frame->extended_data[0];
244  float *windowed_right = (float *)s->windowed_frame->extended_data[1];
245  float *windowed_oleft = (float *)s->windowed_out->extended_data[0];
246  float *windowed_oright = (float *)s->windowed_out->extended_data[1];
247  float *windowed_pleft = (float *)s->windowed_prev->extended_data[0];
248  float *windowed_pright = (float *)s->windowed_prev->extended_data[1];
249  float *left_osamples = (float *)out->extended_data[0];
250  float *right_osamples = (float *)out->extended_data[1];
251  float *center_osamples = (float *)out->extended_data[2];
252  const int offset = s->fft_size - s->overlap;
253  float vad;
254 
255  // shift in/out buffers
256  memmove(left_in, &left_in[s->overlap], offset * sizeof(float));
257  memmove(right_in, &right_in[s->overlap], offset * sizeof(float));
258  memmove(left_out, &left_out[s->overlap], offset * sizeof(float));
259  memmove(right_out, &right_out[s->overlap], offset * sizeof(float));
260 
261  memcpy(&left_in[offset], left_samples, s->overlap * sizeof(float));
262  memcpy(&right_in[offset], right_samples, s->overlap * sizeof(float));
263  memset(&left_out[offset], 0, s->overlap * sizeof(float));
264  memset(&right_out[offset], 0, s->overlap * sizeof(float));
265 
266  apply_window(s, left_in, windowed_left, 0);
267  apply_window(s, right_in, windowed_right, 0);
268 
269  s->tx_fn(s->tx_ctx[0], windowed_oleft, windowed_left, sizeof(float));
270  s->tx_fn(s->tx_ctx[1], windowed_oright, windowed_right, sizeof(float));
271 
272  get_centere((AVComplexFloat *)windowed_oleft,
273  (AVComplexFloat *)windowed_oright,
274  (AVComplexFloat *)center,
275  s->fft_size / 2 + 1);
276 
277  vad = calc_vad(flux(center, center_prev, s->fft_size / 2 + 1),
278  fluxlr(windowed_oleft, windowed_pleft,
279  windowed_oright, windowed_pright, s->fft_size / 2 + 1), s->voice);
280  vad = vad * 0.1 + 0.9 * s->prev_vad;
281  s->prev_vad = vad;
282 
283  memcpy(center_prev, center, s->fft_size * sizeof(float));
284  memcpy(windowed_pleft, windowed_oleft, s->fft_size * sizeof(float));
285  memcpy(windowed_pright, windowed_oright, s->fft_size * sizeof(float));
286 
287  get_final(center, windowed_oleft, windowed_oright, vad, s->fft_size / 2 + 1,
288  s->original, s->enhance);
289 
290  s->itx_fn(s->itx_ctx, windowed_oleft, center, sizeof(AVComplexFloat));
291 
292  apply_window(s, windowed_oleft, left_out, 1);
293 
294  for (int i = 0; i < s->overlap; i++) {
295  // 4 times overlap with squared hanning window results in 1.5 time increase in amplitude
296  if (!ctx->is_disabled)
297  center_osamples[i] = left_out[i] / 1.5f;
298  else
299  center_osamples[i] = 0.f;
300  left_osamples[i] = left_in[i];
301  right_osamples[i] = right_in[i];
302  }
303 
304  return 0;
305 }
306 
308 {
309  AVFilterContext *ctx = inlink->dst;
310  AVFilterLink *outlink = ctx->outputs[0];
311  AudioDialogueEnhanceContext *s = ctx->priv;
312  AVFrame *out;
313  int ret;
314 
315  out = ff_get_audio_buffer(outlink, s->overlap);
316  if (!out) {
317  ret = AVERROR(ENOMEM);
318  goto fail;
319  }
320 
321  s->in = in;
322  de_stereo(ctx, out);
323 
325  out->nb_samples = in->nb_samples;
326  ret = ff_filter_frame(outlink, out);
327 fail:
328  av_frame_free(&in);
329  s->in = NULL;
330  return ret < 0 ? ret : 0;
331 }
332 
334 {
335  AVFilterLink *inlink = ctx->inputs[0];
336  AVFilterLink *outlink = ctx->outputs[0];
337  AudioDialogueEnhanceContext *s = ctx->priv;
338  AVFrame *in = NULL;
339  int ret = 0, status;
340  int64_t pts;
341 
343 
344  ret = ff_inlink_consume_samples(inlink, s->overlap, s->overlap, &in);
345  if (ret < 0)
346  return ret;
347 
348  if (ret > 0) {
349  return filter_frame(inlink, in);
350  } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
351  ff_outlink_set_status(outlink, status, pts);
352  return 0;
353  } else {
354  if (ff_inlink_queued_samples(inlink) >= s->overlap) {
356  } else if (ff_outlink_frame_wanted(outlink)) {
358  }
359  return 0;
360  }
361 }
362 
364 {
365  AudioDialogueEnhanceContext *s = ctx->priv;
366 
367  av_freep(&s->window);
368 
369  av_frame_free(&s->in_frame);
370  av_frame_free(&s->center_frame);
371  av_frame_free(&s->out_dist_frame);
372  av_frame_free(&s->windowed_frame);
373  av_frame_free(&s->windowed_out);
374  av_frame_free(&s->windowed_prev);
375 
376  av_tx_uninit(&s->tx_ctx[0]);
377  av_tx_uninit(&s->tx_ctx[1]);
378  av_tx_uninit(&s->itx_ctx);
379 }
380 
381 static const AVFilterPad inputs[] = {
382  {
383  .name = "default",
384  .type = AVMEDIA_TYPE_AUDIO,
385  .config_props = config_input,
386  },
387 };
388 
390  .name = "dialoguenhance",
391  .description = NULL_IF_CONFIG_SMALL("Audio Dialogue Enhancement."),
392  .priv_size = sizeof(AudioDialogueEnhanceContext),
393  .priv_class = &dialoguenhance_class,
394  .uninit = uninit,
399  .activate = activate,
400  .process_command = ff_filter_process_command,
401 };
formats
formats
Definition: signature.h:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:107
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:383
inputs
static const AVFilterPad inputs[]
Definition: af_dialoguenhance.c:381
AudioDialogueEnhancementContext::in_frame
AVFrame * in_frame
Definition: af_dialoguenhance.c:45
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:978
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:612
apply_window
static void apply_window(AudioDialogueEnhanceContext *s, const float *in_frame, float *out_frame, const int add_to_out_frame)
Definition: af_dialoguenhance.c:128
AVTXContext
Definition: tx_priv.h:235
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVOption
AVOption.
Definition: opt.h:251
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:169
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_dialoguenhance.c:307
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:760
float.h
calc_vad
static float calc_vad(float fc, float flr, float a)
Definition: af_dialoguenhance.c:203
AVComplexFloat
Definition: tx.h:27
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:464
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AudioDialogueEnhancementContext::overlap
int overlap
Definition: af_dialoguenhance.c:39
AudioDialogueEnhancementContext::window
float * window
Definition: af_dialoguenhance.c:41
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:901
get_centere
static void get_centere(AVComplexFloat *left, AVComplexFloat *right, AVComplexFloat *center, int N)
Definition: af_dialoguenhance.c:147
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_dialoguenhance.c:363
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
lpf
static float * lpf(float Fn, float Fc, float tbw, int *num_taps, float att, float *beta, int round)
Definition: asrc_sinc.c:161
AVComplexFloat::im
float im
Definition: tx.h:28
window
static SDL_Window * window
Definition: ffplay.c:360
AudioDialogueEnhancementContext::windowed_prev
AVFrame * windowed_prev
Definition: af_dialoguenhance.c:49
AudioDialogueEnhancementContext::voice
double voice
Definition: af_dialoguenhance.c:36
fail
#define fail()
Definition: checkasm.h:138
de_stereo
static int de_stereo(AVFilterContext *ctx, AVFrame *out)
Definition: af_dialoguenhance.c:232
ff_af_dialoguenhance
const AVFilter ff_af_dialoguenhance
Definition: af_dialoguenhance.c:389
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
pts
static int64_t pts
Definition: transcode_aac.c:643
AudioDialogueEnhancementContext::windowed_out
AVFrame * windowed_out
Definition: af_dialoguenhance.c:48
AV_CHANNEL_LAYOUT_SURROUND
#define AV_CHANNEL_LAYOUT_SURROUND
Definition: channel_layout.h:386
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
AudioDialogueEnhancementContext::tx_ctx
AVTXContext * tx_ctx[2]
Definition: af_dialoguenhance.c:52
AudioDialogueEnhancementContext::windowed_frame
AVFrame * windowed_frame
Definition: af_dialoguenhance.c:47
AudioDialogueEnhancementContext::itx_fn
av_tx_fn itx_fn
Definition: af_dialoguenhance.c:53
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:770
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1506
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
filters.h
AudioDialogueEnhancementContext::out_dist_frame
AVFrame * out_dist_frame
Definition: af_dialoguenhance.c:46
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FLAGS
#define FLAGS
Definition: af_dialoguenhance.c:57
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:192
AudioDialogueEnhancementContext::original
double original
Definition: af_dialoguenhance.c:36
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1402
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:736
OFFSET
#define OFFSET(x)
Definition: af_dialoguenhance.c:56
AudioDialogueEnhancementContext::prev_vad
float prev_vad
Definition: af_dialoguenhance.c:42
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:470
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:32
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
generate_window_func
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
Definition: window_func.h:63
get_final
static void get_final(float *c, float *l, float *r, float vad, int N, float original, float enhance)
Definition: af_dialoguenhance.c:210
av_clipf
av_clipf
Definition: af_crystalizer.c:121
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:487
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1337
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
f
f
Definition: af_crystalizer.c:121
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
AudioDialogueEnhancementContext::fft_size
int fft_size
Definition: af_dialoguenhance.c:38
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:307
AVComplexFloat::re
float re
Definition: tx.h:28
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:851
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
N
#define N
Definition: af_mcompand.c:53
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
AudioDialogueEnhancementContext
Definition: af_dialoguenhance.c:33
internal.h
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
dialoguenhance_options
static const AVOption dialoguenhance_options[]
Definition: af_dialoguenhance.c:59
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:53
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1362
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
AudioDialogueEnhancementContext::in
AVFrame * in
Definition: af_dialoguenhance.c:44
AudioDialogueEnhancementContext::tx_fn
av_tx_fn tx_fn
Definition: af_dialoguenhance.c:53
flux
static float flux(float *curf, float *prevf, int N)
Definition: af_dialoguenhance.c:163
AVFilter
Filter definition.
Definition: avfilter.h:166
fluxlr
static float fluxlr(float *lf, float *lpf, float *rf, float *rpf, int N)
Definition: af_dialoguenhance.c:181
ret
ret
Definition: filter_design.txt:187
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
window_func.h
status
ov_status_e status
Definition: dnn_backend_openvino.c:119
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_dialoguenhance.c:85
channel_layout.h
WFUNC_SINE
@ WFUNC_SINE
Definition: window_func.h:31
avfilter.h
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_dialoguenhance.c:68
G
#define G
Definition: huffyuv.h:43
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
activate
static int activate(AVFilterContext *ctx)
Definition: af_dialoguenhance.c:333
audio.h
AudioDialogueEnhancementContext::itx_ctx
AVTXContext * itx_ctx
Definition: af_dialoguenhance.c:52
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:193
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:155
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AudioDialogueEnhancementContext::center_frame
AVFrame * center_frame
Definition: af_dialoguenhance.c:50
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dialoguenhance)
sqrf
static float sqrf(float x)
Definition: af_dialoguenhance.c:142
AudioDialogueEnhancementContext::enhance
double enhance
Definition: af_dialoguenhance.c:36
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:216
tx.h