Go to the documentation of this file.
66 #define MAX_DURATION (24*60*60*1000000LL)
67 #define OFFSET(x) offsetof(AudioPhaseMeterContext, x)
68 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
69 #define get_duration(index) (index[1] - index[0])
81 {
"phasing",
"set mono and out-of-phase detection output",
OFFSET(do_phasing_detection),
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1,
FLAGS },
153 if (!strcmp(
s->mpc_str,
"none"))
154 s->draw_median_phase = 0;
156 s->draw_median_phase = 1;
163 static inline int get_x(
float phase,
int w)
165 return (phase + 1.
f) / 2.f * (
w - 1.f);
172 snprintf(buf,
sizeof(buf),
"lavfi.aphasemeter.%s",
key);
179 if (!
s->is_mono && mono_measurement) {
181 s->start_mono_presence = 1;
182 s->mono_idx[0] = insamples->
pts;
184 if (
s->is_mono && mono_measurement &&
s->start_mono_presence) {
185 s->mono_idx[1] =
s->frame_end;
187 if (mono_duration >=
s->duration) {
190 s->start_mono_presence = 0;
193 if (
s->is_mono && !mono_measurement) {
194 s->mono_idx[1] = insamples ? insamples->
pts :
s->frame_end;
196 if (mono_duration >=
s->duration) {
210 if (!
s->is_out_phase && out_phase_measurement) {
212 s->start_out_phase_presence = 1;
213 s->out_phase_idx[0] = insamples->
pts;
215 if (
s->is_out_phase && out_phase_measurement &&
s->start_out_phase_presence) {
216 s->out_phase_idx[1] =
s->frame_end;
218 if (out_phase_duration >=
s->duration) {
221 s->start_out_phase_presence = 0;
224 if (
s->is_out_phase && !out_phase_measurement) {
225 s->out_phase_idx[1] = insamples ? insamples->
pts :
s->frame_end;
227 if (out_phase_duration >=
s->duration) {
245 const int rc =
s->contrast[0];
246 const int gc =
s->contrast[1];
247 const int bc =
s->contrast[2];
252 int mono_measurement;
253 int out_phase_measurement;
254 float tolerance = 1.0f -
s->tolerance;
258 if (
s->do_video && (!
s->out ||
s->out->width != outlink->
w ||
259 s->out->height != outlink->
h)) {
268 for (
i = 0;
i < outlink->
h;
i++)
269 memset(
out->data[0] +
i *
out->linesize[0], 0, outlink->
w * 4);
270 }
else if (
s->do_video) {
275 for (
i = outlink->
h - 1;
i >= 10;
i--)
276 memmove(
out->data[0] + (
i ) *
out->linesize[0],
277 out->data[0] + (
i-1) *
out->linesize[0],
279 for (
i = 0;
i < outlink->
w;
i++)
284 const float *
src = (
float *)in->
data[0] +
i * 2;
286 const float phase =
isnan(
f) ? 1 :
f;
287 const int x =
get_x(phase,
s->w);
290 dst =
out->data[0] + x * 4;
291 dst[0] =
FFMIN(255, dst[0] + rc);
292 dst[1] =
FFMIN(255, dst[1] + gc);
293 dst[2] =
FFMIN(255, dst[2] + bc);
302 if (
s->draw_median_phase) {
303 dst =
out->data[0] +
get_x(fphase,
s->w) * 4;
307 for (
i = 1;
i < 10 &&
i < outlink->
h;
i++)
308 memcpy(
out->data[0] +
i *
out->linesize[0],
out->data[0], outlink->
w * 4);
319 if (
s->do_phasing_detection) {
320 s->time_base =
inlink->time_base;
324 mono_measurement = (tolerance - fphase) < FLT_EPSILON;
325 out_phase_measurement = (angle - fphase) > FLT_EPSILON;
333 if (
s->do_video && new_pts !=
s->last_pts) {
336 s->out->pts =
s->last_pts = new_pts;
337 s->out->duration = 1;
368 if (
s->nb_samples > 0)
390 if (
s->do_phasing_detection) {
434 .
name =
"aphasemeter",
443 .priv_class = &aphasemeter_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
static const AVFilterPad inputs[]
static int config_video_output(AVFilterLink *outlink)
A list of supported channel layouts.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_CHANNEL_LAYOUT_STEREO
static av_cold int init(AVFilterContext *ctx)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static int config_input(AVFilterLink *inlink)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define FILTER_QUERY_FUNC(func)
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
int start_out_phase_presence
A filter pad used for either input or output.
static void update_out_phase_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int out_phase_measurement)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
static int query_formats(AVFilterContext *ctx)
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
#define FILTER_INPUTS(array)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
Make sure a frame is writable.
Describe the class of an AVClass context structure.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Rational number (pair of numerator and denominator).
static const AVOption aphasemeter_options[]
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
static void update_mono_detection(AudioPhaseMeterContext *s, AVFrame *insamples, int mono_measurement)
AVFILTER_DEFINE_CLASS(aphasemeter)
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
static int get_x(float phase, int w)
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
An AVChannelLayout holds information about the channel layout of audio data.
#define AV_NOPTS_VALUE
Undefined timestamp value.
AVFilterContext * src
source filter
AVFilterFormatsConfig incfg
Lists of supported formats / etc.
FF_FILTER_FORWARD_WANTED(outlink, inlink)
#define AV_LOG_INFO
Standard information.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
int nb_samples
number of audio samples (per channel) described by this frame
#define i(width, name, range_min, range_max)
int w
agreed upon image width
#define AV_TIME_BASE
Internal time base represented as integer.
AVSampleFormat
Audio sample formats.
const AVFilter ff_avf_aphasemeter
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
static int activate(AVFilterContext *ctx)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static av_cold void uninit(AVFilterContext *ctx)
int h
agreed upon image height
AVDictionary * metadata
metadata.
static void add_metadata(AVFrame *insamples, const char *key, char *value)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
#define get_duration(index)
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define FF_FILTER_FORWARD_STATUS_ALL(inlink, filter)
Acknowledge the status on an input link and forward it to an output link.