Go to the documentation of this file.
44 #define INPUT_CLEANSRC 1
114 #define OFFSET(x) offsetof(FieldMatchContext, x)
115 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
126 {
"pc_n_ub",
"2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0,
AV_OPT_TYPE_CONST, {.i64=
MODE_PC_N_UB}, INT_MIN, INT_MAX,
FLAGS,
"mode" },
129 {
"ppsrc",
"mark main input as a pre-processed input and activate clean source input stream",
OFFSET(ppsrc),
AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1,
FLAGS },
134 {
"mchroma",
"set whether or not chroma is included during the match comparisons",
OFFSET(mchroma),
AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1,
FLAGS },
135 {
"y0",
"define an exclusion band which excludes the lines between y0 and y1 from the field matching decision",
OFFSET(y0),
AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX,
FLAGS },
136 {
"y1",
"define an exclusion band which excludes the lines between y0 and y1 from the field matching decision",
OFFSET(y1),
AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX,
FLAGS },
146 {
"cthresh",
"set the area combing threshold used for combed frame detection",
OFFSET(cthresh),
AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff,
FLAGS },
148 {
"blockx",
"set the x-axis size of the window used during combed frame detection",
OFFSET(blockx),
AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9,
FLAGS },
149 {
"blocky",
"set the y-axis size of the window used during combed frame detection",
OFFSET(blocky),
AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9,
FLAGS },
150 {
"combpel",
"set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed",
OFFSET(combpel),
AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX,
FLAGS },
169 const uint8_t *srcp1 = f1->
data[0];
170 const uint8_t *srcp2 = f2->
data[0];
171 const int src1_linesize = f1->
linesize[0];
172 const int src2_linesize = f2->
linesize[0];
177 for (y = 0; y <
height; y++) {
178 for (x = 0; x <
width; x++)
179 acc +=
abs(srcp1[x] - srcp2[x]);
180 srcp1 += src1_linesize;
181 srcp2 += src2_linesize;
190 for (y = 0; y <
h; y++) {
198 int x, y, plane, max_v = 0;
199 const int cthresh = fm->
cthresh;
200 const int cthresh6 = cthresh * 6;
202 for (plane = 0; plane < (fm->
chroma ? 3 : 1); plane++) {
203 const uint8_t *srcp =
src->data[plane];
204 const int src_linesize =
src->linesize[plane];
217 #define FILTER(xm2, xm1, xp1, xp2) \
219 -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
220 + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
223 for (x = 0; x <
width; x++) {
224 const int s1 =
abs(srcp[x] - srcp[x + src_linesize]);
225 if (
s1 > cthresh &&
FILTER(2, 1, 1, 2))
228 srcp += src_linesize;
229 cmkp += cmk_linesize;
232 for (x = 0; x <
width; x++) {
233 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
234 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
235 if (
s1 > cthresh &&
s2 > cthresh &&
FILTER(2, -1, 1, 2))
238 srcp += src_linesize;
239 cmkp += cmk_linesize;
242 for (y = 2; y <
height-2; y++) {
243 for (x = 0; x <
width; x++) {
244 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
245 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
246 if (
s1 > cthresh &&
s2 > cthresh &&
FILTER(-2, -1, 1, 2))
249 srcp += src_linesize;
250 cmkp += cmk_linesize;
254 for (x = 0; x <
width; x++) {
255 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
256 const int s2 =
abs(srcp[x] - srcp[x + src_linesize]);
257 if (
s1 > cthresh &&
s2 > cthresh &&
FILTER(-2, -1, 1, -2))
260 srcp += src_linesize;
261 cmkp += cmk_linesize;
264 for (x = 0; x <
width; x++) {
265 const int s1 =
abs(srcp[x] - srcp[x - src_linesize]);
266 if (
s1 > cthresh &&
FILTER(-2, -1, -1, -2))
279 uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
280 uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
281 uint8_t *cmkpnn = cmkp + cmk_linesize;
282 for (y = 1; y <
height - 1; y++) {
283 cmkpp += cmk_linesize;
284 cmkp += cmk_linesize;
285 cmkpn += cmk_linesize;
286 cmkpnn += cmk_linesize;
287 cmkpV += cmk_linesizeUV;
288 cmkpU += cmk_linesizeUV;
289 for (x = 1; x <
width - 1; x++) {
290 #define HAS_FF_AROUND(p, lz) (p[(x)-1 - (lz)] == 0xff || p[(x) - (lz)] == 0xff || p[(x)+1 - (lz)] == 0xff || \
291 p[(x)-1 ] == 0xff || p[(x)+1 ] == 0xff || \
292 p[(x)-1 + (lz)] == 0xff || p[(x) + (lz)] == 0xff || p[(x)+1 + (lz)] == 0xff)
293 if ((cmkpV[x] == 0xff &&
HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
294 (cmkpU[x] == 0xff &&
HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
295 ((uint16_t*)cmkp)[x] = 0xffff;
296 ((uint16_t*)cmkpn)[x] = 0xffff;
297 if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
298 else ((uint16_t*)cmkpnn)[x] = 0xffff;
305 const int blockx = fm->
blockx;
306 const int blocky = fm->
blocky;
307 const int xhalf = blockx/2;
308 const int yhalf = blocky/2;
310 const uint8_t *cmkp = fm->
cmask_data[0] + cmk_linesize;
313 const int xblocks = ((
width+xhalf)/blockx) + 1;
314 const int xblocks4 = xblocks<<2;
315 const int yblocks = ((
height+yhalf)/blocky) + 1;
317 const int arraysize = (xblocks*yblocks)<<2;
318 int heighta = (
height/(blocky/2))*(blocky/2);
319 const int widtha = (
width /(blockx/2))*(blockx/2);
322 memset(c_array, 0, arraysize *
sizeof(*c_array));
324 #define C_ARRAY_ADD(v) do { \
325 const int box1 = (x / blockx) * 4; \
326 const int box2 = ((x + xhalf) / blockx) * 4; \
327 c_array[temp1 + box1 ] += v; \
328 c_array[temp1 + box2 + 1] += v; \
329 c_array[temp2 + box1 + 2] += v; \
330 c_array[temp2 + box2 + 3] += v; \
333 #define VERTICAL_HALF(y_start, y_end) do { \
334 for (y = y_start; y < y_end; y++) { \
335 const int temp1 = (y / blocky) * xblocks4; \
336 const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
337 for (x = 0; x < width; x++) \
338 if (cmkp[x - cmk_linesize] == 0xff && \
339 cmkp[x ] == 0xff && \
340 cmkp[x + cmk_linesize] == 0xff) \
342 cmkp += cmk_linesize; \
348 for (y = yhalf; y < heighta; y += yhalf) {
349 const int temp1 = (y / blocky) * xblocks4;
350 const int temp2 = ((y + yhalf) / blocky) * xblocks4;
352 for (x = 0; x < widtha; x += xhalf) {
353 const uint8_t *cmkp_tmp = cmkp + x;
355 for (
u = 0;
u < yhalf;
u++) {
356 for (v = 0; v < xhalf; v++)
357 if (cmkp_tmp[v - cmk_linesize] == 0xff &&
358 cmkp_tmp[v ] == 0xff &&
359 cmkp_tmp[v + cmk_linesize] == 0xff)
361 cmkp_tmp += cmk_linesize;
367 for (x = widtha; x <
width; x++) {
368 const uint8_t *cmkp_tmp = cmkp + x;
370 for (
u = 0;
u < yhalf;
u++) {
371 if (cmkp_tmp[-cmk_linesize] == 0xff &&
372 cmkp_tmp[ 0] == 0xff &&
373 cmkp_tmp[ cmk_linesize] == 0xff)
375 cmkp_tmp += cmk_linesize;
381 cmkp += cmk_linesize * yhalf;
386 for (x = 0; x < arraysize; x++)
387 if (c_array[x] > max_v)
395 const uint8_t *nxtp,
int nxt_linesize,
396 uint8_t *tbuffer,
int tbuf_linesize,
401 prvp -= prv_linesize;
402 nxtp -= nxt_linesize;
403 for (y = 0; y <
height; y++) {
404 for (x = 0; x <
width; x++)
405 tbuffer[x] =
FFABS(prvp[x] - nxtp[x]);
406 prvp += prv_linesize;
407 nxtp += nxt_linesize;
408 tbuffer += tbuf_linesize;
416 const uint8_t *prvp,
int prv_linesize,
417 const uint8_t *nxtp,
int nxt_linesize,
418 uint8_t *dstp,
int dst_linesize,
int height,
419 int width,
int plane)
421 int x, y,
u,
diff, count;
423 const uint8_t *dp = fm->
tbuffer + tpitch;
428 for (y = 2; y <
height - 2; y += 2) {
429 for (x = 1; x <
width - 1; x++) {
432 for (count = 0,
u = x-1;
u < x+2 && count < 2;
u++) {
433 count += dp[
u-tpitch] > 3;
435 count += dp[
u+tpitch] > 3;
440 int upper = 0, lower = 0;
441 for (count = 0,
u = x-1;
u < x+2 && count < 6;
u++) {
442 if (dp[
u-tpitch] > 19) { count++; upper = 1; }
443 if (dp[
u ] > 19) count++;
444 if (dp[
u+tpitch] > 19) { count++; lower = 1; }
447 if (upper && lower) {
450 int upper2 = 0, lower2 = 0;
452 if (y != 2 && dp[
u-2*tpitch] > 19) upper2 = 1;
453 if ( dp[
u- tpitch] > 19) upper = 1;
454 if ( dp[
u+ tpitch] > 19) lower = 1;
455 if (y !=
height-4 && dp[
u+2*tpitch] > 19) lower2 = 1;
457 if ((upper && (lower || upper2)) ||
458 (lower && (upper || lower2)))
469 dstp += dst_linesize;
482 if (match ==
mP || match ==
mB)
return fm->
prv;
483 else if (match ==
mN || match ==
mU)
return fm->
nxt;
490 uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
491 uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
492 int norm1, norm2, mtn1, mtn2;
496 for (plane = 0; plane < (fm->
mchroma ? 3 : 1); plane++) {
497 int x, y, temp1, temp2, fbase;
499 uint8_t *mapp = fm->
map_data[plane];
501 const uint8_t *srcp =
src->data[plane];
502 const int src_linesize =
src->linesize[plane];
503 const int srcf_linesize = src_linesize << 1;
504 int prv_linesize, nxt_linesize;
505 int prvf_linesize, nxtf_linesize;
511 const int stopx =
width - startx;
512 const uint8_t *srcpf, *srcf, *srcnf;
513 const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
519 srcf = srcp + (fbase + 1) * src_linesize;
520 srcpf = srcf - srcf_linesize;
521 srcnf = srcf + srcf_linesize;
522 mapp = mapp + fbase * map_linesize;
524 prv_linesize = prev->
linesize[plane];
525 prvf_linesize = prv_linesize << 1;
526 prvpf = prev->
data[plane] + fbase * prv_linesize;
527 prvnf = prvpf + prvf_linesize;
532 nxt_linesize = next->
linesize[plane];
533 nxtf_linesize = nxt_linesize << 1;
534 nxtpf = next->
data[plane] + fbase * nxt_linesize;
535 nxtnf = nxtpf + nxtf_linesize;
538 if ((match1 >= 3 &&
field == 1) || (match1 < 3 &&
field != 1))
543 mapp + map_linesize, map_linesize,
height,
width, plane);
545 for (y = 2; y <
height - 2; y += 2) {
546 if (y0a == y1a || y < y0a || y > y1a) {
547 for (x = startx; x < stopx; x++) {
548 if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
549 temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x];
551 temp2 =
abs(3 * (prvpf[x] + prvnf[x]) - temp1);
552 if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
555 if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
557 if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
561 temp2 =
abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
562 if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
565 if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
567 if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
573 prvpf += prvf_linesize;
574 prvnf += prvf_linesize;
575 srcpf += srcf_linesize;
576 srcf += srcf_linesize;
577 srcnf += srcf_linesize;
578 nxtpf += nxtf_linesize;
579 nxtnf += nxtf_linesize;
580 mapp += map_linesize;
584 if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
585 FFMAX(accumPml,accumNml) > 3*
FFMIN(accumPml,accumNml)) {
590 norm1 = (
int)((accumPc / 6.0
f) + 0.5f);
591 norm2 = (
int)((accumNc / 6.0
f) + 0.5f);
592 mtn1 = (
int)((accumPm / 6.0
f) + 0.5f);
593 mtn2 = (
int)((accumNm / 6.0
f) + 0.5f);
597 if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
598 ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
599 ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
600 ((mtn1 >= 4000 || mtn2 >= 4000) &&
c2 >
c1))
601 ret = mtn1 > mtn2 ? match2 : match1;
602 else if (mr > 0.005 &&
FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
603 ret = mtn1 > mtn2 ? match2 : match1;
605 ret = norm1 > norm2 ? match2 : match1;
613 for (plane = 0; plane < 4 &&
src->data[plane] &&
src->linesize[plane]; plane++) {
615 const int nb_copy_fields = (plane_h >> 1) + (
field ? 0 : (plane_h & 1));
617 src->data[plane] +
field*
src->linesize[plane],
src->linesize[plane] << 1,
654 #define LOAD_COMB(mid) do { \
655 if (combs[mid] < 0) { \
656 if (!gen_frames[mid]) \
657 gen_frames[mid] = create_weave_frame(ctx, mid, field, \
658 fm->prv, fm->src, fm->nxt, \
660 combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
667 if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->
combpel)) &&
668 abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->
combpel)
682 int combs[] = { -1, -1, -1, -1, -1 };
683 int order,
field,
i, match, sc = 0,
ret = 0;
689 #define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
691 av_frame_free(&prv); \
700 av_assert0(prv && src && nxt); \
729 if (!gen_frames[
i]) {
736 combs[0], combs[1], combs[2], combs[3], combs[4]);
739 if (!gen_frames[
mC]) {
802 if (!gen_frames[match]) {
805 dst = gen_frames[match];
806 gen_frames[match] =
NULL;
824 " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
983 pad.
name =
"clean_src";
1031 fm->
bpc = (
desc->comp[0].depth + 7) / 8;
1049 .
name =
"fieldmatch",
1058 .priv_class = &fieldmatch_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
static void build_diff_map(FieldMatchContext *fm, const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *dstp, int dst_linesize, int height, int width, int plane)
Build a map over which pixels differ a lot/a little.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define u(width, name, range_min, range_max)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *tbuffer, int tbuf_linesize, int width, int height)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int top_field_first
If the content is interlaced, is top field displayed first.
#define FILTER_QUERY_FUNC(func)
#define AV_PIX_FMT_YUV420P10
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static int activate(AVFilterContext *ctx)
const char * name
Filter name.
A link between two filters.
static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
Append a new input/output pad to the filter's list of such pads.
static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2, AVFrame **gen_frames, int field)
static void copy_fields(const FieldMatchContext *fm, AVFrame *dst, const AVFrame *src, int field, int input)
#define AV_PIX_FMT_YUV422P9
#define HAS_FF_AROUND(p, lz)
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
A filter pad used for either input or output.
#define AV_PIX_FMT_YUV444P10
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define AV_PIX_FMT_YUV422P16
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane, int input)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
static int config_output(AVFilterLink *outlink)
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
static const AVFilterPad fieldmatch_outputs[]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV420P9
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUV420P16
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
AVFrame * nxt
main sliding window of 3 frames
static const AVOption fieldmatch_options[]
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
int combmatch
comb_matching_mode
static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int bpc
bytes per component
#define AV_PIX_FMT_YUV440P10
int got_frame[2]
frame request flag for each input stream
#define AV_PIX_FMT_YUV422P10
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
int vsub[2]
chroma subsampling values
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
int(* config_props)(AVFilterLink *link)
Link configuration callback.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AV_PIX_FMT_YUV422P12
const AVFilter ff_vf_fieldmatch
#define AV_PIX_FMT_YUV444P12
static int config_input(AVFilterLink *inlink)
AVFilterContext * src
source filter
static int query_formats(AVFilterContext *ctx)
AVFrame * nxt2
sliding window of the optional second stream
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_LOG_INFO
Standard information.
static int get_field_base(int match, int field)
int interlaced_frame
The content of the picture is interlaced.
#define SLIDING_FRAME_WINDOW(prv, src, nxt)
#define i(width, name, range_min, range_max)
int w
agreed upon image width
#define av_malloc_array(a, b)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
const char * name
Pad name.
void * av_calloc(size_t nmemb, size_t size)
#define AV_PIX_FMT_YUV444P9
static AVFrame * create_weave_frame(AVFilterContext *ctx, int match, int field, const AVFrame *prv, AVFrame *src, const AVFrame *nxt, int input)
#define VERTICAL_HALF(y_start, y_end)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
int h
agreed upon image height
#define FF_INLINK_IDX(link)
Find the index of a link.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
AVFILTER_DEFINE_CLASS(fieldmatch)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
int64_t frame_count_in
Number of past frames sent through the link.
static av_cold int fieldmatch_init(AVFilterContext *ctx)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static AVFrame * select_frame(FieldMatchContext *fm, int match)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define FILTER(xm2, xm1, xp1, xp2)
static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane, int input)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static av_cold int uninit(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV440P12
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
#define AV_PIX_FMT_YUV444P14
uint32_t eof
bitmask for end of stream
static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
#define AV_PIX_FMT_YUV420P14