Go to the documentation of this file.
70 #define OFFSET(x) offsetof(SignalstatsContext, x)
71 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
95 s->filters |= 1 <<
s->outfilter;
100 s->yuv_color[0] = (( 66*
r + 129*
g + 25*
b + (1<<7)) >> 8) + 16;
101 s->yuv_color[1] = ((-38*
r + -74*
g + 112*
b + (1<<7)) >> 8) + 128;
102 s->yuv_color[2] = ((112*
r + -94*
g + -18*
b + (1<<7)) >> 8) + 128;
159 s->hsub =
desc->log2_chroma_w;
160 s->vsub =
desc->log2_chroma_h;
161 s->depth =
desc->comp[0].depth;
162 s->maxsize = 1 <<
s->depth;
168 if (!
s->histy || !
s->histu || !
s->histv || !
s->histsat)
178 s->cfs =
s->chromaw *
s->chromah;
187 if (!
s->frame_sat || !
s->frame_hue)
195 const int chromax = x >>
s->hsub;
196 const int chromay = y >>
s->vsub;
197 f->data[0][y *
f->linesize[0] + x] =
s->yuv_color[0];
198 f->data[1][chromay *
f->linesize[1] + chromax] =
s->yuv_color[1];
199 f->data[2][chromay *
f->linesize[2] + chromax] =
s->yuv_color[2];
204 const int chromax = x >>
s->hsub;
205 const int chromay = y >>
s->vsub;
206 const int mult = 1 << (
s->depth - 8);
207 AV_WN16(
f->data[0] + y *
f->linesize[0] + x * 2,
s->yuv_color[0] *
mult);
208 AV_WN16(
f->data[1] + chromay *
f->linesize[1] + chromax * 2,
s->yuv_color[1] *
mult);
209 AV_WN16(
f->data[2] + chromay *
f->linesize[2] + chromax * 2,
s->yuv_color[2] *
mult);
220 const int slice_start = (
h * jobnr ) / nb_jobs;
221 const int slice_end = (
h * (jobnr+1)) / nb_jobs;
224 for (y = slice_start; y <
slice_end; y++) {
225 const int yc = y >>
s->vsub;
226 const uint8_t *pluma = &in->
data[0][y * in->
linesize[0]];
227 const uint8_t *pchromau = &in->
data[1][yc * in->
linesize[1]];
228 const uint8_t *pchromav = &in->
data[2][yc * in->
linesize[2]];
230 for (x = 0; x <
w; x++) {
231 const int xc = x >>
s->hsub;
232 const int luma = pluma[x];
233 const int chromau = pchromau[xc];
234 const int chromav = pchromav[xc];
235 const int filt = luma < 16 || luma > 235 ||
236 chromau < 16 || chromau > 240 ||
237 chromav < 16 || chromav > 240;
252 const int mult = 1 << (
s->depth - 8);
255 const int slice_start = (
h * jobnr ) / nb_jobs;
256 const int slice_end = (
h * (jobnr+1)) / nb_jobs;
259 for (y = slice_start; y <
slice_end; y++) {
260 const int yc = y >>
s->vsub;
261 const uint16_t *pluma = (uint16_t *)&in->
data[0][y * in->
linesize[0]];
262 const uint16_t *pchromau = (uint16_t *)&in->
data[1][yc * in->
linesize[1]];
263 const uint16_t *pchromav = (uint16_t *)&in->
data[2][yc * in->
linesize[2]];
265 for (x = 0; x <
w; x++) {
266 const int xc = x >>
s->hsub;
267 const int luma = pluma[x];
268 const int chromau = pchromau[xc];
269 const int chromav = pchromav[xc];
270 const int filt = luma < 16 * mult || luma > 235 *
mult ||
271 chromau < 16 * mult || chromau > 240 *
mult ||
272 chromav < 16 * mult || chromav > 240 *
mult;
283 return ((
abs(x - y) +
abs (z - y)) / 2) -
abs(z - x) > 4;
294 const int slice_start = (
h * jobnr ) / nb_jobs;
295 const int slice_end = (
h * (jobnr+1)) / nb_jobs;
296 const uint8_t *p = in->
data[0];
298 int x, y, score = 0,
filt;
300 for (y = slice_start; y <
slice_end; y++) {
302 if (y - 1 < 0 || y + 1 >=
h)
308 #define FILTER(i, j) \
309 filter_tout_outlier(p[(y-j) * lw + x + i], \
310 p[ y * lw + x + i], \
311 p[(y+j) * lw + x + i])
313 #define FILTER3(j) (FILTER(-1, j) && FILTER(0, j) && FILTER(1, j))
315 if (y - 2 >= 0 && y + 2 <
h) {
316 for (x = 1; x <
w - 1; x++) {
323 for (x = 1; x <
w - 1; x++) {
342 const int slice_start = (
h * jobnr ) / nb_jobs;
343 const int slice_end = (
h * (jobnr+1)) / nb_jobs;
344 const uint16_t *p = (uint16_t *)in->
data[0];
346 int x, y, score = 0,
filt;
350 if (y - 1 < 0 || y + 1 >=
h)
356 if (y - 2 >= 0 && y + 2 <
h) {
357 for (x = 1; x <
w - 1; x++) {
364 for (x = 1; x <
w - 1; x++) {
385 const int slice_start = (
h * jobnr ) / nb_jobs;
386 const int slice_end = (
h * (jobnr+1)) / nb_jobs;
387 const uint8_t *p = in->
data[0];
391 for (y = slice_start; y <
slice_end; y++) {
393 const int ylw = y * lw;
394 int filt, totdiff = 0;
399 for (x = 0; x <
w; x++)
400 totdiff +=
abs(p[y2lw + x] - p[ylw + x]);
405 for (x = 0; x <
w; x++)
419 const int slice_start = (
h * jobnr ) / nb_jobs;
420 const int slice_end = (
h * (jobnr+1)) / nb_jobs;
421 const uint16_t *p = (uint16_t *)in->
data[0];
427 const int ylw = y * lw;
434 for (x = 0; x <
w; x++)
435 totdiff +=
abs(p[y2lw + x] - p[ylw + x]);
440 for (x = 0; x <
w; x++)
446 static const struct {
466 const int slice_start = (
s->chromah * jobnr ) / nb_jobs;
467 const int slice_end = (
s->chromah * (jobnr+1)) / nb_jobs;
469 const int lsz_u =
src->linesize[1];
470 const int lsz_v =
src->linesize[2];
471 const uint8_t *p_u =
src->data[1] + slice_start * lsz_u;
472 const uint8_t *p_v =
src->data[2] + slice_start * lsz_v;
474 const int lsz_sat = dst_sat->
linesize[0];
475 const int lsz_hue = dst_hue->
linesize[0];
476 uint8_t *p_sat = dst_sat->
data[0] + slice_start * lsz_sat;
477 uint8_t *p_hue = dst_hue->
data[0] + slice_start * lsz_hue;
479 for (j = slice_start; j <
slice_end; j++) {
480 for (
i = 0;
i <
s->chromaw;
i++) {
481 const int yuvu = p_u[
i];
482 const int yuvv = p_v[
i];
483 p_sat[
i] = hypotf(yuvu - 128, yuvv - 128);
484 ((int16_t*)p_hue)[
i] = fmodf(
floorf((180.
f /
M_PI) *
atan2f(yuvu-128, yuvv-128) + 180.
f), 360.
f);
503 const int mid = 1 << (
s->depth - 1);
505 const int slice_start = (
s->chromah * jobnr ) / nb_jobs;
506 const int slice_end = (
s->chromah * (jobnr+1)) / nb_jobs;
508 const int lsz_u =
src->linesize[1] / 2;
509 const int lsz_v =
src->linesize[2] / 2;
510 const uint16_t *p_u = (uint16_t*)
src->data[1] + slice_start * lsz_u;
511 const uint16_t *p_v = (uint16_t*)
src->data[2] + slice_start * lsz_v;
513 const int lsz_sat = dst_sat->
linesize[0] / 2;
514 const int lsz_hue = dst_hue->
linesize[0] / 2;
515 uint16_t *p_sat = (uint16_t*)dst_sat->
data[0] + slice_start * lsz_sat;
516 uint16_t *p_hue = (uint16_t*)dst_hue->
data[0] + slice_start * lsz_hue;
518 for (j = slice_start; j <
slice_end; j++) {
519 for (
i = 0;
i <
s->chromaw;
i++) {
520 const int yuvu = p_u[
i];
521 const int yuvv = p_v[
i];
522 p_sat[
i] = hypotf(yuvu - mid, yuvv - mid);
523 ((int16_t*)p_hue)[
i] = fmodf(
floorf((180.
f /
M_PI) *
atan2f(yuvu-mid, yuvv-mid) + 180.
f), 360.
f);
550 unsigned int *histy =
s->histy,
554 *histsat =
s->histsat;
555 int miny = -1, minu = -1, minv = -1;
556 int maxy = -1, maxu = -1, maxv = -1;
557 int lowy = -1, lowu = -1, lowv = -1;
558 int highy = -1, highu = -1, highv = -1;
559 int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
560 int lowp, highp, clowp, chighp;
561 int accy, accu, accv;
562 int accsat, acchue = 0;
564 int toty = 0, totu = 0, totv = 0, totsat=0;
566 int dify = 0, difu = 0, difv = 0;
567 uint16_t masky = 0, masku = 0, maskv = 0;
574 const uint8_t *p_sat = sat->
data[0];
575 const uint8_t *p_hue = hue->
data[0];
576 const int lsz_sat = sat->
linesize[0];
577 const int lsz_hue = hue->
linesize[0];
587 prev =
s->frame_prev;
598 memset(
s->histy, 0,
s->maxsize *
sizeof(*
s->histy));
599 for (j = 0; j <
link->h; j++) {
600 for (
i = 0;
i <
link->w;
i++) {
601 const int yuv = in->
data[0][
w +
i];
605 dify +=
abs(yuv - prev->
data[0][pw +
i]);
612 memset(
s->histu, 0,
s->maxsize *
sizeof(*
s->histu));
613 memset(
s->histv, 0,
s->maxsize *
sizeof(*
s->histv));
614 memset(
s->histsat, 0,
s->maxsize *
sizeof(*
s->histsat));
615 for (j = 0; j <
s->chromah; j++) {
616 for (
i = 0;
i <
s->chromaw;
i++) {
617 const int yuvu = in->
data[1][cw+
i];
618 const int yuvv = in->
data[2][cw+
i];
623 difu +=
abs(yuvu - prev->
data[1][cpw+
i]);
625 difv +=
abs(yuvv - prev->
data[2][cpw+
i]);
628 histhue[((int16_t*)p_hue)[
i]]++;
637 if (
s->filters & 1<<fil) {
640 .out =
out != in &&
s->outfilter == fil ?
out :
NULL,
642 memset(
s->jobs_rets, 0,
s->nb_jobs *
sizeof(*
s->jobs_rets));
644 &
td,
s->jobs_rets,
s->nb_jobs);
645 for (
i = 0;
i <
s->nb_jobs;
i++)
646 filtot[fil] +=
s->jobs_rets[
i];
653 lowp =
lrint(
s->fs * 10 / 100.);
654 highp =
lrint(
s->fs * 90 / 100.);
655 clowp =
lrint(
s->cfs * 10 / 100.);
656 chighp =
lrint(
s->cfs * 90 / 100.);
658 accy = accu = accv = accsat = 0;
659 for (fil = 0; fil <
s->maxsize; fil++) {
660 if (miny < 0 && histy[fil]) miny = fil;
661 if (minu < 0 && histu[fil]) minu = fil;
662 if (minv < 0 && histv[fil]) minv = fil;
663 if (minsat < 0 && histsat[fil]) minsat = fil;
665 if (histy[fil]) maxy = fil;
666 if (histu[fil]) maxu = fil;
667 if (histv[fil]) maxv = fil;
668 if (histsat[fil]) maxsat = fil;
670 toty += histy[fil] * fil;
671 totu += histu[fil] * fil;
672 totv += histv[fil] * fil;
673 totsat += histsat[fil] * fil;
678 accsat += histsat[fil];
680 if (lowy == -1 && accy >= lowp) lowy = fil;
681 if (lowu == -1 && accu >= clowp) lowu = fil;
682 if (lowv == -1 && accv >= clowp) lowv = fil;
683 if (lowsat == -1 && accsat >= clowp) lowsat = fil;
685 if (highy == -1 && accy >= highp) highy = fil;
686 if (highu == -1 && accu >= chighp) highu = fil;
687 if (highv == -1 && accv >= chighp) highv = fil;
688 if (highsat == -1 && accsat >= chighp) highsat = fil;
693 for (fil = 0; fil < 360; fil++) {
694 tothue += histhue[fil] * fil;
695 acchue += histhue[fil];
697 if (medhue == -1 && acchue >
s->cfs / 2)
699 if (histhue[fil] > maxhue) {
700 maxhue = histhue[fil];
707 #define SET_META(key, fmt, val) do { \
708 snprintf(metabuf, sizeof(metabuf), fmt, val); \
709 av_dict_set(&out->metadata, "lavfi.signalstats." key, metabuf, 0); \
714 SET_META(
"YAVG",
"%g", 1.0 * toty /
s->fs);
720 SET_META(
"UAVG",
"%g", 1.0 * totu /
s->cfs);
726 SET_META(
"VAVG",
"%g", 1.0 * totv /
s->cfs);
732 SET_META(
"SATAVG",
"%g", 1.0 * totsat /
s->cfs);
737 SET_META(
"HUEAVG",
"%g", 1.0 * tothue /
s->cfs);
739 SET_META(
"YDIF",
"%g", 1.0 * dify /
s->fs);
740 SET_META(
"UDIF",
"%g", 1.0 * difu /
s->cfs);
741 SET_META(
"VDIF",
"%g", 1.0 * difv /
s->cfs);
748 if (
s->filters & 1<<fil) {
750 snprintf(metabuf,
sizeof(metabuf),
"%g", 1.0 * filtot[fil] /
s->fs);
772 unsigned int *histy =
s->histy,
776 *histsat =
s->histsat;
777 int miny = -1, minu = -1, minv = -1;
778 int maxy = -1, maxu = -1, maxv = -1;
779 int lowy = -1, lowu = -1, lowv = -1;
780 int highy = -1, highu = -1, highv = -1;
781 int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
782 int lowp, highp, clowp, chighp;
783 int accy, accu, accv;
784 int accsat, acchue = 0;
786 int64_t toty = 0, totu = 0, totv = 0, totsat=0;
788 int64_t dify = 0, difu = 0, difv = 0;
789 uint16_t masky = 0, masku = 0, maskv = 0;
796 const uint16_t *p_sat = (uint16_t *)sat->
data[0];
797 const uint16_t *p_hue = (uint16_t *)hue->
data[0];
798 const int lsz_sat = sat->
linesize[0] / 2;
799 const int lsz_hue = hue->
linesize[0] / 2;
809 prev =
s->frame_prev;
820 memset(
s->histy, 0,
s->maxsize *
sizeof(*
s->histy));
821 for (j = 0; j <
link->h; j++) {
822 for (
i = 0;
i <
link->w;
i++) {
834 memset(
s->histu, 0,
s->maxsize *
sizeof(*
s->histu));
835 memset(
s->histv, 0,
s->maxsize *
sizeof(*
s->histv));
836 memset(
s->histsat, 0,
s->maxsize *
sizeof(*
s->histsat));
837 for (j = 0; j <
s->chromah; j++) {
838 for (
i = 0;
i <
s->chromaw;
i++) {
850 histhue[((int16_t*)p_hue)[
i]]++;
859 if (
s->filters & 1<<fil) {
862 .out =
out != in &&
s->outfilter == fil ?
out :
NULL,
864 memset(
s->jobs_rets, 0,
s->nb_jobs *
sizeof(*
s->jobs_rets));
866 &
td,
s->jobs_rets,
s->nb_jobs);
867 for (
i = 0;
i <
s->nb_jobs;
i++)
868 filtot[fil] +=
s->jobs_rets[
i];
875 lowp =
lrint(
s->fs * 10 / 100.);
876 highp =
lrint(
s->fs * 90 / 100.);
877 clowp =
lrint(
s->cfs * 10 / 100.);
878 chighp =
lrint(
s->cfs * 90 / 100.);
880 accy = accu = accv = accsat = 0;
881 for (fil = 0; fil <
s->maxsize; fil++) {
882 if (miny < 0 && histy[fil]) miny = fil;
883 if (minu < 0 && histu[fil]) minu = fil;
884 if (minv < 0 && histv[fil]) minv = fil;
885 if (minsat < 0 && histsat[fil]) minsat = fil;
887 if (histy[fil]) maxy = fil;
888 if (histu[fil]) maxu = fil;
889 if (histv[fil]) maxv = fil;
890 if (histsat[fil]) maxsat = fil;
892 toty += histy[fil] * fil;
893 totu += histu[fil] * fil;
894 totv += histv[fil] * fil;
895 totsat += histsat[fil] * fil;
900 accsat += histsat[fil];
902 if (lowy == -1 && accy >= lowp) lowy = fil;
903 if (lowu == -1 && accu >= clowp) lowu = fil;
904 if (lowv == -1 && accv >= clowp) lowv = fil;
905 if (lowsat == -1 && accsat >= clowp) lowsat = fil;
907 if (highy == -1 && accy >= highp) highy = fil;
908 if (highu == -1 && accu >= chighp) highu = fil;
909 if (highv == -1 && accv >= chighp) highv = fil;
910 if (highsat == -1 && accsat >= chighp) highsat = fil;
915 for (fil = 0; fil < 360; fil++) {
916 tothue += histhue[fil] * fil;
917 acchue += histhue[fil];
919 if (medhue == -1 && acchue >
s->cfs / 2)
921 if (histhue[fil] > maxhue) {
922 maxhue = histhue[fil];
931 SET_META(
"YAVG",
"%g", 1.0 * toty /
s->fs);
937 SET_META(
"UAVG",
"%g", 1.0 * totu /
s->cfs);
943 SET_META(
"VAVG",
"%g", 1.0 * totv /
s->cfs);
949 SET_META(
"SATAVG",
"%g", 1.0 * totsat /
s->cfs);
954 SET_META(
"HUEAVG",
"%g", 1.0 * tothue /
s->cfs);
956 SET_META(
"YDIF",
"%g", 1.0 * dify /
s->fs);
957 SET_META(
"UDIF",
"%g", 1.0 * difu /
s->cfs);
958 SET_META(
"VDIF",
"%g", 1.0 * difv /
s->cfs);
965 if (
s->filters & 1<<fil) {
967 snprintf(metabuf,
sizeof(metabuf),
"%g", 1.0 * filtot[fil] /
s->fs);
1006 .
name =
"signalstats",
1007 .description =
"Generate statistics from video analysis.",
1014 .priv_class = &signalstats_class,
AVPixelFormat
Pixel format.
static int filter_frame8(AVFilterLink *link, AVFrame *in)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int compute_sat_hue_metrics16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static av_cold int init(AVFilterContext *ctx)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define SET_META(key, fmt, val)
#define FILTER_PIXFMTS_ARRAY(array)
static __device__ float floorf(float a)
static int filter8_brng(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
AVFILTER_DEFINE_CLASS(signalstats)
const char * name
Filter name.
A link between two filters.
static int filter_frame16(AVFilterLink *link, AVFrame *in)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static const struct @241 filters_def[]
#define AV_PIX_FMT_YUV422P9
static int filter16_tout(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_GRAY16
static int filter_tout_outlier(uint8_t x, uint8_t y, uint8_t z)
static enum AVPixelFormat pix_fmts[]
A filter pad used for either input or output.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static av_cold void uninit(AVFilterContext *ctx)
static int16_t mult(Float11 *f1, Float11 *f2)
#define AV_PIX_FMT_YUV422P16
static const uint16_t mask[17]
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static AVFrame * alloc_frame(enum AVPixelFormat pixfmt, int w, int h)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
#define filters(fmt, type, inverse, clp, inverset, clip, one, clip_fn, packed)
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P16
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FILTER_INPUTS(array)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Describe the class of an AVClass context structure.
static void burn_frame16(const SignalstatsContext *s, AVFrame *f, int x, int y)
static int filter8_tout(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
AVFilterLink ** inputs
array of pointers to input links
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
const AVFilter ff_vf_signalstats
static const AVFilterPad signalstats_inputs[]
static void burn_frame8(const SignalstatsContext *s, AVFrame *f, int x, int y)
static int filter_frame(AVFilterLink *link, AVFrame *in)
int format
agreed upon media format
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV444P12
static const AVFilterPad signalstats_outputs[]
AVFilterContext * src
source filter
static int compute_sat_hue_metrics8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define i(width, name, range_min, range_max)
int(* process8)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
int w
agreed upon image width
#define av_malloc_array(a, b)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
static int config_output(AVFilterLink *outlink)
Used for passing data between threads.
int(* process16)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
static const int8_t filt[NUMTAPS *2]
const char * name
Pad name.
#define AV_PIX_FMT_YUV444P9
enum AVPixelFormat pixfmt
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_YUV420P12
static int filter16_brng(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUV422P14
int h
agreed upon image height
static const AVOption signalstats_options[]
static int filter8_vrep(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int filter16_vrep(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define FILTER_OUTPUTS(array)
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV444P14
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
static unsigned compute_bit_depth(uint16_t mask)
#define AV_PIX_FMT_YUV420P14