64 #define MAX_DURATION (24*60*60*1000000LL) 65 #define OFFSET(x) offsetof(AudioPhaseMeterContext, x) 66 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM 67 #define get_duration(index) (index[1] - index[0]) 153 if (!strcmp(s->
mpc_str,
"none"))
165 return (phase + 1.) / 2. * (w - 1);
172 snprintf(buf,
sizeof(buf),
"lavfi.aphasemeter.%s", key);
178 int64_t mono_duration;
179 if (!s->
is_mono && mono_measurement) {
193 if (s->
is_mono && !mono_measurement) {
209 int64_t out_phase_duration;
218 if (out_phase_duration >= s->
duration) {
227 if (out_phase_duration >= s->
duration) {
252 int mono_measurement;
253 int out_phase_measurement;
267 for (i = 0; i < outlink->
h; i++)
268 memset(out->
data[0] + i * out->
linesize[0], 0, outlink->
w * 4);
271 for (i = outlink->
h - 1; i >= 10; i--)
275 for (i = 0; i < outlink->
w; i++)
280 const float *
src = (
float *)in->
data[0] + i * 2;
281 const float f = src[0] * src[1] / (src[0]*src[0] + src[1] * src[1]) * 2;
283 const int x =
get_x(phase, s->
w);
286 dst = out->
data[0] + x * 4;
287 dst[0] =
FFMIN(255, dst[0] + rc);
288 dst[1] =
FFMIN(255, dst[1] + gc);
289 dst[2] =
FFMIN(255, dst[2] + bc);
303 for (i = 1; i < 10 && i < outlink->
h; i++)
311 snprintf(value,
sizeof(value),
"%f", fphase);
320 mono_measurement = (tolerance - fphase) < FLT_EPSILON;
321 out_phase_measurement = (angle - fphase) > FLT_EPSILON;
389 .
name =
"aphasemeter",
397 .priv_class = &aphasemeter_class,
static void add_metadata(AVFrame *insamples, const char *key, char *value)
This structure describes decoded (raw) audio or video data.
AVFILTER_DEFINE_CLASS(aphasemeter)
Main libavfilter public API header.
int max_samples
Maximum number of samples to filter at once.
static int get_x(float phase, int w)
int h
agreed upon image height
static av_cold int init(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
static const AVOption aphasemeter_options[]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
AVFilterFormatsConfig outcfg
Lists of supported formats / etc.
#define AV_CH_LAYOUT_STEREO
int start_out_phase_presence
static int query_formats(AVFilterContext *ctx)
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
timestamp utils, mostly useful for debugging/logging purposes
static int config_video_output(AVFilterLink *outlink)
GLsizei GLboolean const GLfloat * value
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
AVDictionary * metadata
metadata.
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
A filter pad used for either input or output.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
A link between two filters.
int min_samples
Minimum number of samples to filter at once.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0, will be automatically copied from the first input of the source filter if it exists.
int sample_rate
samples per second
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
simple assert() macros that are a bit more flexible than ISO C assert().
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int w
agreed upon image width
static void update_out_phase_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int out_phase_measurement)
audio channel layout utility functions
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
#define AV_TIME_BASE
Internal time base represented as integer.
AVFilterContext * src
source filter
int partial_buf_size
Size of the partial buffer to allocate.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
static const AVFilterPad outputs[]
A list of supported channel layouts.
#define get_duration(index)
#define AV_LOG_INFO
Standard information.
AVSampleFormat
Audio sample formats.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Rational number (pair of numerator and denominator).
offset must point to AVRational
const char * name
Filter name.
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
offset must point to two consecutive integers
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
static const AVFilterPad inputs[]
#define flags(name, subs,...)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
AVFilterFormatsConfig incfg
Lists of supported formats / etc.
AVFilterContext * dst
dest filter
static enum AVSampleFormat sample_fmts[]
static av_cold void uninit(AVFilterContext *ctx)
static void update_mono_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int mono_measurement)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilter ff_avf_aphasemeter
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
AVPixelFormat
Pixel format.
int nb_samples
number of audio samples (per channel) described by this frame
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout