Go to the documentation of this file.
46 #define INPUT_CLEANSRC 1
116 #define OFFSET(x) offsetof(FieldMatchContext, x)
117 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
128 {
"pc_n_ub",
"2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0,
AV_OPT_TYPE_CONST, {.i64=
MODE_PC_N_UB}, INT_MIN, INT_MAX,
FLAGS,
"mode" },
131 {
"ppsrc",
"mark main input as a pre-processed input and activate clean source input stream",
OFFSET(ppsrc),
AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1,
FLAGS },
136 {
"mchroma",
"set whether or not chroma is included during the match comparisons",
OFFSET(mchroma),
AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1,
FLAGS },
137 {
"y0",
"define an exclusion band which excludes the lines between y0 and y1 from the field matching decision",
OFFSET(y0),
AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX,
FLAGS },
138 {
"y1",
"define an exclusion band which excludes the lines between y0 and y1 from the field matching decision",
OFFSET(y1),
AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX,
FLAGS },
148 {
"cthresh",
"set the area combing threshold used for combed frame detection",
OFFSET(cthresh),
AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff,
FLAGS },
150 {
"blockx",
"set the x-axis size of the window used during combed frame detection",
OFFSET(blockx),
AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9,
FLAGS },
151 {
"blocky",
"set the y-axis size of the window used during combed frame detection",
OFFSET(blocky),
AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9,
FLAGS },
152 {
"combpel",
"set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed",
OFFSET(combpel),
AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX,
FLAGS },
171 const uint8_t *srcp1 = f1->
data[0];
172 const uint8_t *srcp2 = f2->
data[0];
173 const int src1_linesize = f1->
linesize[0];
174 const int src2_linesize = f2->
linesize[0];
179 for (y = 0; y <
height; y++) {
180 for (x = 0; x <
width; x++)
181 acc +=
abs(srcp1[x] - srcp2[x]);
182 srcp1 += src1_linesize;
183 srcp2 += src2_linesize;
192 for (y = 0; y <
h; y++) {
200 int x, y, plane, max_v = 0;
201 const int cthresh = fm->
cthresh;
202 const int cthresh6 = cthresh * 6;
204 for (plane = 0; plane < (fm->
chroma ? 3 : 1); plane++) {
205 const uint8_t *srcp =
src->data[plane];
206 const int src_linesize =
src->linesize[plane];
219 #define FILTER(xm2, xm1, xp1, xp2) \
221 -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
222 + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
225 for (x = 0; x <
width; x++) {
226 const int s1 =
abs(srcp[x] - srcp[x + src_linesize]);
227 if (
s1 > cthresh &&
FILTER(2, 1, 1, 2))
230 srcp += src_linesize;
231 cmkp += cmk_linesize;
234 for (x = 0; x <
width; x++) {
235 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
236 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
237 if (
s1 > cthresh &&
s2 > cthresh &&
FILTER(2, -1, 1, 2))
240 srcp += src_linesize;
241 cmkp += cmk_linesize;
244 for (y = 2; y <
height-2; y++) {
245 for (x = 0; x <
width; x++) {
246 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
247 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
248 if (
s1 > cthresh &&
s2 > cthresh &&
FILTER(-2, -1, 1, 2))
251 srcp += src_linesize;
252 cmkp += cmk_linesize;
256 for (x = 0; x <
width; x++) {
257 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
258 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
259 if (
s1 > cthresh &&
s2 > cthresh &&
FILTER(-2, -1, 1, -2))
262 srcp += src_linesize;
263 cmkp += cmk_linesize;
266 for (x = 0; x <
width; x++) {
267 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
268 if (
s1 > cthresh &&
FILTER(-2, -1, -1, -2))
281 uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
282 uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
283 uint8_t *cmkpnn = cmkp + cmk_linesize;
284 for (y = 1; y <
height - 1; y++) {
285 cmkpp += cmk_linesize;
286 cmkp += cmk_linesize;
287 cmkpn += cmk_linesize;
288 cmkpnn += cmk_linesize;
289 cmkpV += cmk_linesizeUV;
290 cmkpU += cmk_linesizeUV;
291 for (x = 1; x <
width - 1; x++) {
292 #define HAS_FF_AROUND(p, lz) (p[(x)-1 - (lz)] == 0xff || p[(x) - (lz)] == 0xff || p[(x)+1 - (lz)] == 0xff || \
293 p[(x)-1 ] == 0xff || p[(x)+1 ] == 0xff || \
294 p[(x)-1 + (lz)] == 0xff || p[(x) + (lz)] == 0xff || p[(x)+1 + (lz)] == 0xff)
295 if ((cmkpV[x] == 0xff &&
HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
296 (cmkpU[x] == 0xff &&
HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
297 ((uint16_t*)cmkp)[x] = 0xffff;
298 ((uint16_t*)cmkpn)[x] = 0xffff;
299 if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
300 else ((uint16_t*)cmkpnn)[x] = 0xffff;
307 const int blockx = fm->
blockx;
308 const int blocky = fm->
blocky;
309 const int xhalf = blockx/2;
310 const int yhalf = blocky/2;
312 const uint8_t *cmkp = fm->
cmask_data[0] + cmk_linesize;
315 const int xblocks = ((
width+xhalf)/blockx) + 1;
316 const int xblocks4 = xblocks<<2;
317 const int yblocks = ((
height+yhalf)/blocky) + 1;
319 const int arraysize = (xblocks*yblocks)<<2;
320 int heighta = (
height/(blocky/2))*(blocky/2);
321 const int widtha = (
width /(blockx/2))*(blockx/2);
324 memset(c_array, 0, arraysize *
sizeof(*c_array));
326 #define C_ARRAY_ADD(v) do { \
327 const int box1 = (x / blockx) * 4; \
328 const int box2 = ((x + xhalf) / blockx) * 4; \
329 c_array[temp1 + box1 ] += v; \
330 c_array[temp1 + box2 + 1] += v; \
331 c_array[temp2 + box1 + 2] += v; \
332 c_array[temp2 + box2 + 3] += v; \
335 #define VERTICAL_HALF(y_start, y_end) do { \
336 for (y = y_start; y < y_end; y++) { \
337 const int temp1 = (y / blocky) * xblocks4; \
338 const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
339 for (x = 0; x < width; x++) \
340 if (cmkp[x - cmk_linesize] == 0xff && \
341 cmkp[x ] == 0xff && \
342 cmkp[x + cmk_linesize] == 0xff) \
344 cmkp += cmk_linesize; \
350 for (y = yhalf; y < heighta; y += yhalf) {
351 const int temp1 = (y / blocky) * xblocks4;
352 const int temp2 = ((y + yhalf) / blocky) * xblocks4;
354 for (x = 0; x < widtha; x += xhalf) {
355 const uint8_t *cmkp_tmp = cmkp + x;
357 for (
u = 0;
u < yhalf;
u++) {
358 for (v = 0; v < xhalf; v++)
359 if (cmkp_tmp[v - cmk_linesize] == 0xff &&
360 cmkp_tmp[v ] == 0xff &&
361 cmkp_tmp[v + cmk_linesize] == 0xff)
363 cmkp_tmp += cmk_linesize;
369 for (x = widtha; x <
width; x++) {
370 const uint8_t *cmkp_tmp = cmkp + x;
372 for (
u = 0;
u < yhalf;
u++) {
373 if (cmkp_tmp[-cmk_linesize] == 0xff &&
374 cmkp_tmp[ 0] == 0xff &&
375 cmkp_tmp[ cmk_linesize] == 0xff)
377 cmkp_tmp += cmk_linesize;
383 cmkp += cmk_linesize * yhalf;
388 for (x = 0; x < arraysize; x++)
389 if (c_array[x] > max_v)
397 const uint8_t *nxtp,
int nxt_linesize,
398 uint8_t *tbuffer,
int tbuf_linesize,
403 prvp -= prv_linesize;
404 nxtp -= nxt_linesize;
405 for (y = 0; y <
height; y++) {
406 for (x = 0; x <
width; x++)
407 tbuffer[x] =
FFABS(prvp[x] - nxtp[x]);
408 prvp += prv_linesize;
409 nxtp += nxt_linesize;
410 tbuffer += tbuf_linesize;
418 const uint8_t *prvp,
int prv_linesize,
419 const uint8_t *nxtp,
int nxt_linesize,
420 uint8_t *dstp,
int dst_linesize,
int height,
421 int width,
int plane)
423 int x, y,
u,
diff, count;
425 const uint8_t *dp = fm->
tbuffer + tpitch;
430 for (y = 2; y <
height - 2; y += 2) {
431 for (x = 1; x <
width - 1; x++) {
434 for (count = 0,
u = x-1;
u < x+2 && count < 2;
u++) {
435 count += dp[
u-tpitch] > 3;
437 count += dp[
u+tpitch] > 3;
442 int upper = 0, lower = 0;
443 for (count = 0,
u = x-1;
u < x+2 && count < 6;
u++) {
444 if (dp[
u-tpitch] > 19) { count++; upper = 1; }
445 if (dp[
u ] > 19) count++;
446 if (dp[
u+tpitch] > 19) { count++; lower = 1; }
449 if (upper && lower) {
452 int upper2 = 0, lower2 = 0;
454 if (y != 2 && dp[
u-2*tpitch] > 19) upper2 = 1;
455 if ( dp[
u- tpitch] > 19) upper = 1;
456 if ( dp[
u+ tpitch] > 19) lower = 1;
457 if (y !=
height-4 && dp[
u+2*tpitch] > 19) lower2 = 1;
459 if ((upper && (lower || upper2)) ||
460 (lower && (upper || lower2)))
471 dstp += dst_linesize;
484 if (match ==
mP || match ==
mB)
return fm->
prv;
485 else if (match ==
mN || match ==
mU)
return fm->
nxt;
492 uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
493 uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
494 int norm1, norm2, mtn1, mtn2;
498 for (plane = 0; plane < (fm->
mchroma ? 3 : 1); plane++) {
499 int x, y, temp1, temp2, fbase;
501 uint8_t *mapp = fm->
map_data[plane];
503 const uint8_t *srcp =
src->data[plane];
504 const int src_linesize =
src->linesize[plane];
505 const int srcf_linesize = src_linesize << 1;
506 int prv_linesize, nxt_linesize;
507 int prvf_linesize, nxtf_linesize;
513 const int stopx =
width - startx;
514 const uint8_t *srcpf, *srcf, *srcnf;
515 const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
521 srcf = srcp + (fbase + 1) * src_linesize;
522 srcpf = srcf - srcf_linesize;
523 srcnf = srcf + srcf_linesize;
524 mapp = mapp + fbase * map_linesize;
526 prv_linesize = prev->
linesize[plane];
527 prvf_linesize = prv_linesize << 1;
528 prvpf = prev->
data[plane] + fbase * prv_linesize;
529 prvnf = prvpf + prvf_linesize;
534 nxt_linesize = next->
linesize[plane];
535 nxtf_linesize = nxt_linesize << 1;
536 nxtpf = next->
data[plane] + fbase * nxt_linesize;
537 nxtnf = nxtpf + nxtf_linesize;
540 if ((match1 >= 3 &&
field == 1) || (match1 < 3 &&
field != 1))
545 mapp + map_linesize, map_linesize,
height,
width, plane);
547 for (y = 2; y <
height - 2; y += 2) {
548 if (y0a == y1a || y < y0a || y > y1a) {
549 for (x = startx; x < stopx; x++) {
550 if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
551 temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x];
553 temp2 =
abs(3 * (prvpf[x] + prvnf[x]) - temp1);
554 if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
557 if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
559 if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
563 temp2 =
abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
564 if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
567 if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
569 if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
575 prvpf += prvf_linesize;
576 prvnf += prvf_linesize;
577 srcpf += srcf_linesize;
578 srcf += srcf_linesize;
579 srcnf += srcf_linesize;
580 nxtpf += nxtf_linesize;
581 nxtnf += nxtf_linesize;
582 mapp += map_linesize;
586 if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
587 FFMAX(accumPml,accumNml) > 3*
FFMIN(accumPml,accumNml)) {
592 norm1 = (
int)((accumPc / 6.0
f) + 0.5f);
593 norm2 = (
int)((accumNc / 6.0
f) + 0.5f);
594 mtn1 = (
int)((accumPm / 6.0
f) + 0.5f);
595 mtn2 = (
int)((accumNm / 6.0
f) + 0.5f);
599 if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
600 ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
601 ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
602 ((mtn1 >= 4000 || mtn2 >= 4000) &&
c2 >
c1))
603 ret = mtn1 > mtn2 ? match2 : match1;
604 else if (mr > 0.005 &&
FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
605 ret = mtn1 > mtn2 ? match2 : match1;
607 ret = norm1 > norm2 ? match2 : match1;
615 for (plane = 0; plane < 4 &&
src->data[plane] &&
src->linesize[plane]; plane++) {
617 const int nb_copy_fields = (plane_h >> 1) + (
field ? 0 : (plane_h & 1));
619 src->data[plane] +
field*
src->linesize[plane],
src->linesize[plane] << 1,
656 #define LOAD_COMB(mid) do { \
657 if (combs[mid] < 0) { \
658 if (!gen_frames[mid]) \
659 gen_frames[mid] = create_weave_frame(ctx, mid, field, \
660 fm->prv, fm->src, fm->nxt, \
662 combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
669 if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->
combpel)) &&
670 abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->
combpel)
684 int combs[] = { -1, -1, -1, -1, -1 };
685 int order,
field,
i, match, interlaced_frame, sc = 0,
ret = 0;
691 #define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
693 av_frame_free(&prv); \
702 av_assert0(prv && src && nxt); \
732 if (!gen_frames[
i]) {
739 combs[0], combs[1], combs[2], combs[3], combs[4]);
742 if (!gen_frames[
mC]) {
800 interlaced_frame = combs[match] >= fm->
combpel;
811 if (!gen_frames[match]) {
814 dst = gen_frames[match];
815 gen_frames[match] =
NULL;
825 #if FF_API_INTERLACED_FRAME
830 if (interlaced_frame) {
834 #if FF_API_INTERLACED_FRAME
847 " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
1006 pad.
name =
"clean_src";
1054 fm->
bpc = (
desc->comp[0].depth + 7) / 8;
1072 .
name =
"fieldmatch",
1081 .priv_class = &fieldmatch_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
static void build_diff_map(FieldMatchContext *fm, const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *dstp, int dst_linesize, int height, int width, int plane)
Build a map over which pixels differ a lot/a little.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define u(width, name, range_min, range_max)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *tbuffer, int tbuf_linesize, int width, int height)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define FILTER_QUERY_FUNC(func)
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static int activate(AVFilterContext *ctx)
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
const char * name
Filter name.
A link between two filters.
static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
Append a new input/output pad to the filter's list of such pads.
static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2, AVFrame **gen_frames, int field)
static void copy_fields(const FieldMatchContext *fm, AVFrame *dst, const AVFrame *src, int field, int input)
#define AV_PIX_FMT_YUV422P9
#define HAS_FF_AROUND(p, lz)
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
A filter pad used for either input or output.
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
attribute_deprecated int interlaced_frame
The content of the picture is interlaced.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define AV_PIX_FMT_YUV422P16
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane, int input)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
static int config_output(AVFilterLink *outlink)
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
int(* init)(AVBSFContext *ctx)
static const AVFilterPad fieldmatch_outputs[]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV420P9
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUV420P16
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
AVFrame * nxt
main sliding window of 3 frames
static const AVOption fieldmatch_options[]
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
int combmatch
comb_matching_mode
static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int bpc
bytes per component
#define AV_PIX_FMT_YUV440P10
int got_frame[2]
frame request flag for each input stream
#define AV_PIX_FMT_YUV422P10
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
int vsub[2]
chroma subsampling values
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
int(* config_props)(AVFilterLink *link)
Link configuration callback.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AV_PIX_FMT_YUV422P12
const AVFilter ff_vf_fieldmatch
#define AV_PIX_FMT_YUV444P12
static int config_input(AVFilterLink *inlink)
AVFilterContext * src
source filter
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static int query_formats(AVFilterContext *ctx)
AVFrame * nxt2
sliding window of the optional second stream
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_LOG_INFO
Standard information.
static int get_field_base(int match, int field)
#define SLIDING_FRAME_WINDOW(prv, src, nxt)
#define i(width, name, range_min, range_max)
attribute_deprecated int top_field_first
If the content is interlaced, is top field displayed first.
int w
agreed upon image width
#define av_malloc_array(a, b)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
const char * name
Pad name.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
void * av_calloc(size_t nmemb, size_t size)
#define AV_PIX_FMT_YUV444P9
static AVFrame * create_weave_frame(AVFilterContext *ctx, int match, int field, const AVFrame *prv, AVFrame *src, const AVFrame *nxt, int input)
#define VERTICAL_HALF(y_start, y_end)
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
int h
agreed upon image height
#define FF_INLINK_IDX(link)
Find the index of a link.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
AVFILTER_DEFINE_CLASS(fieldmatch)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
int64_t frame_count_in
Number of past frames sent through the link.
static av_cold int fieldmatch_init(AVFilterContext *ctx)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static AVFrame * select_frame(FieldMatchContext *fm, int match)
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define FILTER(xm2, xm1, xp1, xp2)
static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane, int input)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static av_cold int uninit(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV440P12
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
#define AV_PIX_FMT_YUV444P14
uint32_t eof
bitmask for end of stream
static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
#define AV_PIX_FMT_YUV420P14