FFmpeg
vf_fieldmatch.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Fredrik Mellbin
3  * Copyright (c) 2013 Clément Bœsch
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Fieldmatching filter, ported from VFM filter (VapourSynth) by Clément.
25  * Fredrik Mellbin is the author of the VIVTC/VFM filter, which is itself a
26  * light clone of the TIVTC/TFM (AviSynth) filter written by Kevin Stone
27  * (tritical), the original author.
28  *
29  * @see http://bengal.missouri.edu/~kes25c/
30  * @see http://www.vapoursynth.com/about/
31  */
32 
33 #include <inttypes.h>
34 
35 #include "libavutil/avassert.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/timestamp.h"
39 #include "avfilter.h"
40 #include "filters.h"
41 #include "internal.h"
42 
43 #define INPUT_MAIN 0
44 #define INPUT_CLEANSRC 1
45 
50 };
51 
60 };
61 
67 };
68 
69 enum comb_dbg {
74 };
75 
76 typedef struct FieldMatchContext {
77  const AVClass *class;
78 
79  AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
80  AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
81  int got_frame[2]; ///< frame request flag for each input stream
82  int hsub, vsub; ///< chroma subsampling values
83  int bpc; ///< bytes per component
84  uint32_t eof; ///< bitmask for end of stream
85  int64_t lastscdiff;
86  int64_t lastn;
87 
88  /* options */
89  int order;
90  int ppsrc;
91  int mode; ///< matching_mode
92  int field;
93  int mchroma;
94  int y0, y1;
95  int64_t scthresh;
96  double scthresh_flt;
97  int combmatch; ///< comb_matching_mode
98  int combdbg;
99  int cthresh;
100  int chroma;
102  int combpel;
103 
104  /* misc buffers */
106  int map_linesize[4];
109  int *c_array;
113 
114 #define OFFSET(x) offsetof(FieldMatchContext, x)
115 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
116 
117 static const AVOption fieldmatch_options[] = {
118  { "order", "specify the assumed field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "order" },
119  { "auto", "auto detect parity", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "order" },
120  { "bff", "assume bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "order" },
121  { "tff", "assume top field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "order" },
122  { "mode", "set the matching mode or strategy to use", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_PC_N}, MODE_PC, NB_MODE-1, FLAGS, "mode" },
123  { "pc", "2-way match (p/c)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC}, INT_MIN, INT_MAX, FLAGS, "mode" },
124  { "pc_n", "2-way match + 3rd match on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N}, INT_MIN, INT_MAX, FLAGS, "mode" },
125  { "pc_u", "2-way match + 3rd match (same order) on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_U}, INT_MIN, INT_MAX, FLAGS, "mode" },
126  { "pc_n_ub", "2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
127  { "pcn", "3-way match (p/c/n)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN}, INT_MIN, INT_MAX, FLAGS, "mode" },
128  { "pcn_ub", "3-way match + 4th/5th matches on combed (p/c/n + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
129  { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
130  { "field", "set the field to match from", OFFSET(field), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "field" },
131  { "auto", "automatic (same value as 'order')", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "field" },
132  { "bottom", "bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field" },
133  { "top", "top field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "field" },
134  { "mchroma", "set whether or not chroma is included during the match comparisons", OFFSET(mchroma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
135  { "y0", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y0), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
136  { "y1", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y1), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
137  { "scthresh", "set scene change detection threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl=12}, 0, 100, FLAGS },
138  { "combmatch", "set combmatching mode", OFFSET(combmatch), AV_OPT_TYPE_INT, {.i64=COMBMATCH_SC}, COMBMATCH_NONE, NB_COMBMATCH-1, FLAGS, "combmatching" },
139  { "none", "disable combmatching", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_NONE}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
140  { "sc", "enable combmatching only on scene change", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_SC}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
141  { "full", "enable combmatching all the time", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_FULL}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
142  { "combdbg", "enable comb debug", OFFSET(combdbg), AV_OPT_TYPE_INT, {.i64=COMBDBG_NONE}, COMBDBG_NONE, NB_COMBDBG-1, FLAGS, "dbglvl" },
143  { "none", "no forced calculation", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_NONE}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
144  { "pcn", "calculate p/c/n", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCN}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
145  { "pcnub", "calculate p/c/n/u/b", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCNUB}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
146  { "cthresh", "set the area combing threshold used for combed frame detection", OFFSET(cthresh), AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS },
147  { "chroma", "set whether or not chroma is considered in the combed frame decision", OFFSET(chroma), AV_OPT_TYPE_BOOL,{.i64= 0}, 0, 1, FLAGS },
148  { "blockx", "set the x-axis size of the window used during combed frame detection", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
149  { "blocky", "set the y-axis size of the window used during combed frame detection", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
150  { "combpel", "set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed", OFFSET(combpel), AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS },
151  { NULL }
152 };
153 
154 AVFILTER_DEFINE_CLASS(fieldmatch);
155 
156 static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
157 {
158  return plane ? AV_CEIL_RSHIFT(f->width, fm->hsub) : f->width;
159 }
160 
161 static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
162 {
163  return plane ? AV_CEIL_RSHIFT(f->height, fm->vsub) : f->height;
164 }
165 
166 static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
167 {
168  int x, y;
169  const uint8_t *srcp1 = f1->data[0];
170  const uint8_t *srcp2 = f2->data[0];
171  const int src1_linesize = f1->linesize[0];
172  const int src2_linesize = f2->linesize[0];
173  const int width = f1->width;
174  const int height = f1->height;
175  int64_t acc = 0;
176 
177  for (y = 0; y < height; y++) {
178  for (x = 0; x < width; x++)
179  acc += abs(srcp1[x] - srcp2[x]);
180  srcp1 += src1_linesize;
181  srcp2 += src2_linesize;
182  }
183  return acc;
184 }
185 
186 static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
187 {
188  int y;
189 
190  for (y = 0; y < h; y++) {
191  memset(data, v, w);
192  data += linesize;
193  }
194 }
195 
196 static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
197 {
198  int x, y, plane, max_v = 0;
199  const int cthresh = fm->cthresh;
200  const int cthresh6 = cthresh * 6;
201 
202  for (plane = 0; plane < (fm->chroma ? 3 : 1); plane++) {
203  const uint8_t *srcp = src->data[plane];
204  const int src_linesize = src->linesize[plane];
205  const int width = get_width (fm, src, plane);
206  const int height = get_height(fm, src, plane);
207  uint8_t *cmkp = fm->cmask_data[plane];
208  const int cmk_linesize = fm->cmask_linesize[plane];
209 
210  if (cthresh < 0) {
211  fill_buf(cmkp, width, height, cmk_linesize, 0xff);
212  continue;
213  }
214  fill_buf(cmkp, width, height, cmk_linesize, 0);
215 
216  /* [1 -3 4 -3 1] vertical filter */
217 #define FILTER(xm2, xm1, xp1, xp2) \
218  abs( 4 * srcp[x] \
219  -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
220  + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
221 
222  /* first line */
223  for (x = 0; x < width; x++) {
224  const int s1 = abs(srcp[x] - srcp[x + src_linesize]);
225  if (s1 > cthresh && FILTER(2, 1, 1, 2))
226  cmkp[x] = 0xff;
227  }
228  srcp += src_linesize;
229  cmkp += cmk_linesize;
230 
231  /* second line */
232  for (x = 0; x < width; x++) {
233  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
234  const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
235  if (s1 > cthresh && s2 > cthresh && FILTER(2, -1, 1, 2))
236  cmkp[x] = 0xff;
237  }
238  srcp += src_linesize;
239  cmkp += cmk_linesize;
240 
241  /* all lines minus first two and last two */
242  for (y = 2; y < height-2; y++) {
243  for (x = 0; x < width; x++) {
244  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
245  const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
246  if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, 2))
247  cmkp[x] = 0xff;
248  }
249  srcp += src_linesize;
250  cmkp += cmk_linesize;
251  }
252 
253  /* before-last line */
254  for (x = 0; x < width; x++) {
255  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
256  const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
257  if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, -2))
258  cmkp[x] = 0xff;
259  }
260  srcp += src_linesize;
261  cmkp += cmk_linesize;
262 
263  /* last line */
264  for (x = 0; x < width; x++) {
265  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
266  if (s1 > cthresh && FILTER(-2, -1, -1, -2))
267  cmkp[x] = 0xff;
268  }
269  }
270 
271  if (fm->chroma) {
272  uint8_t *cmkp = fm->cmask_data[0];
273  uint8_t *cmkpU = fm->cmask_data[1];
274  uint8_t *cmkpV = fm->cmask_data[2];
275  const int width = AV_CEIL_RSHIFT(src->width, fm->hsub);
276  const int height = AV_CEIL_RSHIFT(src->height, fm->vsub);
277  const int cmk_linesize = fm->cmask_linesize[0] << 1;
278  const int cmk_linesizeUV = fm->cmask_linesize[2];
279  uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
280  uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
281  uint8_t *cmkpnn = cmkp + cmk_linesize;
282  for (y = 1; y < height - 1; y++) {
283  cmkpp += cmk_linesize;
284  cmkp += cmk_linesize;
285  cmkpn += cmk_linesize;
286  cmkpnn += cmk_linesize;
287  cmkpV += cmk_linesizeUV;
288  cmkpU += cmk_linesizeUV;
289  for (x = 1; x < width - 1; x++) {
290 #define HAS_FF_AROUND(p, lz) (p[(x)-1 - (lz)] == 0xff || p[(x) - (lz)] == 0xff || p[(x)+1 - (lz)] == 0xff || \
291  p[(x)-1 ] == 0xff || p[(x)+1 ] == 0xff || \
292  p[(x)-1 + (lz)] == 0xff || p[(x) + (lz)] == 0xff || p[(x)+1 + (lz)] == 0xff)
293  if ((cmkpV[x] == 0xff && HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
294  (cmkpU[x] == 0xff && HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
295  ((uint16_t*)cmkp)[x] = 0xffff;
296  ((uint16_t*)cmkpn)[x] = 0xffff;
297  if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
298  else ((uint16_t*)cmkpnn)[x] = 0xffff;
299  }
300  }
301  }
302  }
303 
304  {
305  const int blockx = fm->blockx;
306  const int blocky = fm->blocky;
307  const int xhalf = blockx/2;
308  const int yhalf = blocky/2;
309  const int cmk_linesize = fm->cmask_linesize[0];
310  const uint8_t *cmkp = fm->cmask_data[0] + cmk_linesize;
311  const int width = src->width;
312  const int height = src->height;
313  const int xblocks = ((width+xhalf)/blockx) + 1;
314  const int xblocks4 = xblocks<<2;
315  const int yblocks = ((height+yhalf)/blocky) + 1;
316  int *c_array = fm->c_array;
317  const int arraysize = (xblocks*yblocks)<<2;
318  int heighta = (height/(blocky/2))*(blocky/2);
319  const int widtha = (width /(blockx/2))*(blockx/2);
320  if (heighta == height)
321  heighta = height - yhalf;
322  memset(c_array, 0, arraysize * sizeof(*c_array));
323 
324 #define C_ARRAY_ADD(v) do { \
325  const int box1 = (x / blockx) * 4; \
326  const int box2 = ((x + xhalf) / blockx) * 4; \
327  c_array[temp1 + box1 ] += v; \
328  c_array[temp1 + box2 + 1] += v; \
329  c_array[temp2 + box1 + 2] += v; \
330  c_array[temp2 + box2 + 3] += v; \
331 } while (0)
332 
333 #define VERTICAL_HALF(y_start, y_end) do { \
334  for (y = y_start; y < y_end; y++) { \
335  const int temp1 = (y / blocky) * xblocks4; \
336  const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
337  for (x = 0; x < width; x++) \
338  if (cmkp[x - cmk_linesize] == 0xff && \
339  cmkp[x ] == 0xff && \
340  cmkp[x + cmk_linesize] == 0xff) \
341  C_ARRAY_ADD(1); \
342  cmkp += cmk_linesize; \
343  } \
344 } while (0)
345 
346  VERTICAL_HALF(1, yhalf);
347 
348  for (y = yhalf; y < heighta; y += yhalf) {
349  const int temp1 = (y / blocky) * xblocks4;
350  const int temp2 = ((y + yhalf) / blocky) * xblocks4;
351 
352  for (x = 0; x < widtha; x += xhalf) {
353  const uint8_t *cmkp_tmp = cmkp + x;
354  int u, v, sum = 0;
355  for (u = 0; u < yhalf; u++) {
356  for (v = 0; v < xhalf; v++)
357  if (cmkp_tmp[v - cmk_linesize] == 0xff &&
358  cmkp_tmp[v ] == 0xff &&
359  cmkp_tmp[v + cmk_linesize] == 0xff)
360  sum++;
361  cmkp_tmp += cmk_linesize;
362  }
363  if (sum)
364  C_ARRAY_ADD(sum);
365  }
366 
367  for (x = widtha; x < width; x++) {
368  const uint8_t *cmkp_tmp = cmkp + x;
369  int u, sum = 0;
370  for (u = 0; u < yhalf; u++) {
371  if (cmkp_tmp[-cmk_linesize] == 0xff &&
372  cmkp_tmp[ 0] == 0xff &&
373  cmkp_tmp[ cmk_linesize] == 0xff)
374  sum++;
375  cmkp_tmp += cmk_linesize;
376  }
377  if (sum)
378  C_ARRAY_ADD(sum);
379  }
380 
381  cmkp += cmk_linesize * yhalf;
382  }
383 
384  VERTICAL_HALF(heighta, height - 1);
385 
386  for (x = 0; x < arraysize; x++)
387  if (c_array[x] > max_v)
388  max_v = c_array[x];
389  }
390  return max_v;
391 }
392 
393 // the secret is that tbuffer is an interlaced, offset subset of all the lines
394 static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize,
395  const uint8_t *nxtp, int nxt_linesize,
396  uint8_t *tbuffer, int tbuf_linesize,
397  int width, int height)
398 {
399  int y, x;
400 
401  prvp -= prv_linesize;
402  nxtp -= nxt_linesize;
403  for (y = 0; y < height; y++) {
404  for (x = 0; x < width; x++)
405  tbuffer[x] = FFABS(prvp[x] - nxtp[x]);
406  prvp += prv_linesize;
407  nxtp += nxt_linesize;
408  tbuffer += tbuf_linesize;
409  }
410 }
411 
412 /**
413  * Build a map over which pixels differ a lot/a little
414  */
416  const uint8_t *prvp, int prv_linesize,
417  const uint8_t *nxtp, int nxt_linesize,
418  uint8_t *dstp, int dst_linesize, int height,
419  int width, int plane)
420 {
421  int x, y, u, diff, count;
422  int tpitch = plane ? fm->tpitchuv : fm->tpitchy;
423  const uint8_t *dp = fm->tbuffer + tpitch;
424 
425  build_abs_diff_mask(prvp, prv_linesize, nxtp, nxt_linesize,
426  fm->tbuffer, tpitch, width, height>>1);
427 
428  for (y = 2; y < height - 2; y += 2) {
429  for (x = 1; x < width - 1; x++) {
430  diff = dp[x];
431  if (diff > 3) {
432  for (count = 0, u = x-1; u < x+2 && count < 2; u++) {
433  count += dp[u-tpitch] > 3;
434  count += dp[u ] > 3;
435  count += dp[u+tpitch] > 3;
436  }
437  if (count > 1) {
438  dstp[x] = 1;
439  if (diff > 19) {
440  int upper = 0, lower = 0;
441  for (count = 0, u = x-1; u < x+2 && count < 6; u++) {
442  if (dp[u-tpitch] > 19) { count++; upper = 1; }
443  if (dp[u ] > 19) count++;
444  if (dp[u+tpitch] > 19) { count++; lower = 1; }
445  }
446  if (count > 3) {
447  if (upper && lower) {
448  dstp[x] |= 1<<1;
449  } else {
450  int upper2 = 0, lower2 = 0;
451  for (u = FFMAX(x-4,0); u < FFMIN(x+5,width); u++) {
452  if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1;
453  if ( dp[u- tpitch] > 19) upper = 1;
454  if ( dp[u+ tpitch] > 19) lower = 1;
455  if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1;
456  }
457  if ((upper && (lower || upper2)) ||
458  (lower && (upper || lower2)))
459  dstp[x] |= 1<<1;
460  else if (count > 5)
461  dstp[x] |= 1<<2;
462  }
463  }
464  }
465  }
466  }
467  }
468  dp += tpitch;
469  dstp += dst_linesize;
470  }
471 }
472 
473 enum { mP, mC, mN, mB, mU };
474 
475 static int get_field_base(int match, int field)
476 {
477  return match < 3 ? 2 - field : 1 + field;
478 }
479 
480 static AVFrame *select_frame(FieldMatchContext *fm, int match)
481 {
482  if (match == mP || match == mB) return fm->prv;
483  else if (match == mN || match == mU) return fm->nxt;
484  else /* match == mC */ return fm->src;
485 }
486 
487 static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
488 {
489  int plane, ret;
490  uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
491  uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
492  int norm1, norm2, mtn1, mtn2;
493  float c1, c2, mr;
494  const AVFrame *src = fm->src;
495 
496  for (plane = 0; plane < (fm->mchroma ? 3 : 1); plane++) {
497  int x, y, temp1, temp2, fbase;
498  const AVFrame *prev, *next;
499  uint8_t *mapp = fm->map_data[plane];
500  int map_linesize = fm->map_linesize[plane];
501  const uint8_t *srcp = src->data[plane];
502  const int src_linesize = src->linesize[plane];
503  const int srcf_linesize = src_linesize << 1;
504  int prv_linesize, nxt_linesize;
505  int prvf_linesize, nxtf_linesize;
506  const int width = get_width (fm, src, plane);
507  const int height = get_height(fm, src, plane);
508  const int y0a = fm->y0 >> (plane ? fm->vsub : 0);
509  const int y1a = fm->y1 >> (plane ? fm->vsub : 0);
510  const int startx = (plane == 0 ? 8 : 8 >> fm->hsub);
511  const int stopx = width - startx;
512  const uint8_t *srcpf, *srcf, *srcnf;
513  const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
514 
515  fill_buf(mapp, width, height, map_linesize, 0);
516 
517  /* match1 */
518  fbase = get_field_base(match1, field);
519  srcf = srcp + (fbase + 1) * src_linesize;
520  srcpf = srcf - srcf_linesize;
521  srcnf = srcf + srcf_linesize;
522  mapp = mapp + fbase * map_linesize;
523  prev = select_frame(fm, match1);
524  prv_linesize = prev->linesize[plane];
525  prvf_linesize = prv_linesize << 1;
526  prvpf = prev->data[plane] + fbase * prv_linesize; // previous frame, previous field
527  prvnf = prvpf + prvf_linesize; // previous frame, next field
528 
529  /* match2 */
530  fbase = get_field_base(match2, field);
531  next = select_frame(fm, match2);
532  nxt_linesize = next->linesize[plane];
533  nxtf_linesize = nxt_linesize << 1;
534  nxtpf = next->data[plane] + fbase * nxt_linesize; // next frame, previous field
535  nxtnf = nxtpf + nxtf_linesize; // next frame, next field
536 
537  map_linesize <<= 1;
538  if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1))
539  build_diff_map(fm, prvpf, prvf_linesize, nxtpf, nxtf_linesize,
540  mapp, map_linesize, height, width, plane);
541  else
542  build_diff_map(fm, prvnf, prvf_linesize, nxtnf, nxtf_linesize,
543  mapp + map_linesize, map_linesize, height, width, plane);
544 
545  for (y = 2; y < height - 2; y += 2) {
546  if (y0a == y1a || y < y0a || y > y1a) {
547  for (x = startx; x < stopx; x++) {
548  if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
549  temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x]; // [1 4 1]
550 
551  temp2 = abs(3 * (prvpf[x] + prvnf[x]) - temp1);
552  if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
553  accumPc += temp2;
554  if (temp2 > 42) {
555  if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
556  accumPm += temp2;
557  if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
558  accumPml += temp2;
559  }
560 
561  temp2 = abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
562  if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
563  accumNc += temp2;
564  if (temp2 > 42) {
565  if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
566  accumNm += temp2;
567  if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
568  accumNml += temp2;
569  }
570  }
571  }
572  }
573  prvpf += prvf_linesize;
574  prvnf += prvf_linesize;
575  srcpf += srcf_linesize;
576  srcf += srcf_linesize;
577  srcnf += srcf_linesize;
578  nxtpf += nxtf_linesize;
579  nxtnf += nxtf_linesize;
580  mapp += map_linesize;
581  }
582  }
583 
584  if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
585  FFMAX(accumPml,accumNml) > 3*FFMIN(accumPml,accumNml)) {
586  accumPm = accumPml;
587  accumNm = accumNml;
588  }
589 
590  norm1 = (int)((accumPc / 6.0f) + 0.5f);
591  norm2 = (int)((accumNc / 6.0f) + 0.5f);
592  mtn1 = (int)((accumPm / 6.0f) + 0.5f);
593  mtn2 = (int)((accumNm / 6.0f) + 0.5f);
594  c1 = ((float)FFMAX(norm1,norm2)) / ((float)FFMAX(FFMIN(norm1,norm2),1));
595  c2 = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMIN(mtn1, mtn2), 1));
596  mr = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMAX(norm1,norm2),1));
597  if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
598  ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
599  ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
600  ((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1))
601  ret = mtn1 > mtn2 ? match2 : match1;
602  else if (mr > 0.005 && FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
603  ret = mtn1 > mtn2 ? match2 : match1;
604  else
605  ret = norm1 > norm2 ? match2 : match1;
606  return ret;
607 }
608 
609 static void copy_fields(const FieldMatchContext *fm, AVFrame *dst,
610  const AVFrame *src, int field)
611 {
612  int plane;
613  for (plane = 0; plane < 4 && src->data[plane] && src->linesize[plane]; plane++) {
614  const int plane_h = get_height(fm, src, plane);
615  const int nb_copy_fields = (plane_h >> 1) + (field ? 0 : (plane_h & 1));
616  av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1,
617  src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1,
618  get_width(fm, src, plane) * fm->bpc, nb_copy_fields);
619  }
620 }
621 
623  const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
624 {
625  AVFrame *dst;
626  FieldMatchContext *fm = ctx->priv;
627 
628  if (match == mC) {
629  dst = av_frame_clone(src);
630  } else {
631  AVFilterLink *outlink = ctx->outputs[0];
632 
633  dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
634  if (!dst)
635  return NULL;
636  av_frame_copy_props(dst, src);
637 
638  switch (match) {
639  case mP: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, prv, field); break;
640  case mN: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, nxt, field); break;
641  case mB: copy_fields(fm, dst, src, field); copy_fields(fm, dst, prv, 1-field); break;
642  case mU: copy_fields(fm, dst, src, field); copy_fields(fm, dst, nxt, 1-field); break;
643  default: av_assert0(0);
644  }
645  }
646  return dst;
647 }
648 
649 static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2,
650  AVFrame **gen_frames, int field)
651 {
652  const FieldMatchContext *fm = ctx->priv;
653 
654 #define LOAD_COMB(mid) do { \
655  if (combs[mid] < 0) { \
656  if (!gen_frames[mid]) \
657  gen_frames[mid] = create_weave_frame(ctx, mid, field, \
658  fm->prv, fm->src, fm->nxt); \
659  combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
660  } \
661 } while (0)
662 
663  LOAD_COMB(m1);
664  LOAD_COMB(m2);
665 
666  if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->combpel)) &&
667  abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->combpel)
668  return m2;
669  else
670  return m1;
671 }
672 
673 static const int fxo0m[] = { mP, mC, mN, mB, mU };
674 static const int fxo1m[] = { mN, mC, mP, mU, mB };
675 
677 {
678  AVFilterContext *ctx = inlink->dst;
679  AVFilterLink *outlink = ctx->outputs[0];
680  FieldMatchContext *fm = ctx->priv;
681  int combs[] = { -1, -1, -1, -1, -1 };
682  int order, field, i, match, sc = 0;
683  const int *fxo;
684  AVFrame *gen_frames[] = { NULL, NULL, NULL, NULL, NULL };
685  AVFrame *dst;
686 
687  /* update frames queue(s) */
688 #define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
689  if (prv != src) /* 2nd loop exception (1st has prv==src and we don't want to loose src) */ \
690  av_frame_free(&prv); \
691  prv = src; \
692  src = nxt; \
693  if (in) \
694  nxt = in; \
695  if (!prv) \
696  prv = src; \
697  if (!prv) /* received only one frame at that point */ \
698  return 0; \
699  av_assert0(prv && src && nxt); \
700 } while (0)
701  if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
702  av_assert0(fm->got_frame[INPUT_MAIN] == 0);
703  SLIDING_FRAME_WINDOW(fm->prv, fm->src, fm->nxt);
704  fm->got_frame[INPUT_MAIN] = 1;
705  } else {
707  SLIDING_FRAME_WINDOW(fm->prv2, fm->src2, fm->nxt2);
708  fm->got_frame[INPUT_CLEANSRC] = 1;
709  }
710  if (!fm->got_frame[INPUT_MAIN] || (fm->ppsrc && !fm->got_frame[INPUT_CLEANSRC]))
711  return 0;
713  in = fm->src;
714 
715  /* parity */
716  order = fm->order != FM_PARITY_AUTO ? fm->order : (in->interlaced_frame ? in->top_field_first : 1);
717  field = fm->field != FM_PARITY_AUTO ? fm->field : order;
718  av_assert0(order == 0 || order == 1 || field == 0 || field == 1);
719  fxo = field ^ order ? fxo1m : fxo0m;
720 
721  /* debug mode: we generate all the fields combinations and their associated
722  * combed score. XXX: inject as frame metadata? */
723  if (fm->combdbg) {
724  for (i = 0; i < FF_ARRAY_ELEMS(combs); i++) {
725  if (i > mN && fm->combdbg == COMBDBG_PCN)
726  break;
727  gen_frames[i] = create_weave_frame(ctx, i, field, fm->prv, fm->src, fm->nxt);
728  if (!gen_frames[i])
729  return AVERROR(ENOMEM);
730  combs[i] = calc_combed_score(fm, gen_frames[i]);
731  }
732  av_log(ctx, AV_LOG_INFO, "COMBS: %3d %3d %3d %3d %3d\n",
733  combs[0], combs[1], combs[2], combs[3], combs[4]);
734  } else {
735  gen_frames[mC] = av_frame_clone(fm->src);
736  if (!gen_frames[mC])
737  return AVERROR(ENOMEM);
738  }
739 
740  /* p/c selection and optional 3-way p/c/n matches */
741  match = compare_fields(fm, fxo[mC], fxo[mP], field);
742  if (fm->mode == MODE_PCN || fm->mode == MODE_PCN_UB)
743  match = compare_fields(fm, match, fxo[mN], field);
744 
745  /* scene change check */
746  if (fm->combmatch == COMBMATCH_SC) {
747  if (fm->lastn == outlink->frame_count_in - 1) {
748  if (fm->lastscdiff > fm->scthresh)
749  sc = 1;
750  } else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
751  sc = 1;
752  }
753 
754  if (!sc) {
755  fm->lastn = outlink->frame_count_in;
756  fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
757  sc = fm->lastscdiff > fm->scthresh;
758  }
759  }
760 
761  if (fm->combmatch == COMBMATCH_FULL || (fm->combmatch == COMBMATCH_SC && sc)) {
762  switch (fm->mode) {
763  /* 2-way p/c matches */
764  case MODE_PC:
765  match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
766  break;
767  case MODE_PC_N:
768  match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
769  break;
770  case MODE_PC_U:
771  match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
772  break;
773  case MODE_PC_N_UB:
774  match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
775  match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
776  match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
777  break;
778  /* 3-way p/c/n matches */
779  case MODE_PCN:
780  match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
781  break;
782  case MODE_PCN_UB:
783  match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
784  match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
785  break;
786  default:
787  av_assert0(0);
788  }
789  }
790 
791  /* get output frame and drop the others */
792  if (fm->ppsrc) {
793  /* field matching was based on a filtered/post-processed input, we now
794  * pick the untouched fields from the clean source */
795  dst = create_weave_frame(ctx, match, field, fm->prv2, fm->src2, fm->nxt2);
796  } else {
797  if (!gen_frames[match]) { // XXX: is that possible?
798  dst = create_weave_frame(ctx, match, field, fm->prv, fm->src, fm->nxt);
799  } else {
800  dst = gen_frames[match];
801  gen_frames[match] = NULL;
802  }
803  }
804  if (!dst)
805  return AVERROR(ENOMEM);
806  for (i = 0; i < FF_ARRAY_ELEMS(gen_frames); i++)
807  av_frame_free(&gen_frames[i]);
808 
809  /* mark the frame we are unable to match properly as interlaced so a proper
810  * de-interlacer can take the relay */
811  dst->interlaced_frame = combs[match] >= fm->combpel;
812  if (dst->interlaced_frame) {
813  av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
814  outlink->frame_count_in, av_ts2timestr(in->pts, &inlink->time_base));
815  dst->top_field_first = field;
816  }
817 
818  av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
819  " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
820  fm->combpel, match, dst->interlaced_frame ? "YES" : "NO");
821 
822  return ff_filter_frame(outlink, dst);
823 }
824 
826 {
827  FieldMatchContext *fm = ctx->priv;
828  AVFrame *frame = NULL;
829  int ret = 0, status;
830  int64_t pts;
831 
832  if ((fm->got_frame[INPUT_MAIN] == 0) &&
833  (ret = ff_inlink_consume_frame(ctx->inputs[INPUT_MAIN], &frame)) > 0) {
834  ret = filter_frame(ctx->inputs[INPUT_MAIN], frame);
835  if (ret < 0)
836  return ret;
837  }
838  if (ret < 0)
839  return ret;
840  if (fm->ppsrc &&
841  (fm->got_frame[INPUT_CLEANSRC] == 0) &&
842  (ret = ff_inlink_consume_frame(ctx->inputs[INPUT_CLEANSRC], &frame)) > 0) {
843  ret = filter_frame(ctx->inputs[INPUT_CLEANSRC], frame);
844  if (ret < 0)
845  return ret;
846  }
847  if (ret < 0) {
848  return ret;
849  } else if (ff_inlink_acknowledge_status(ctx->inputs[INPUT_MAIN], &status, &pts)) {
850  if (status == AVERROR_EOF) { // flushing
851  fm->eof |= 1 << INPUT_MAIN;
852  ret = filter_frame(ctx->inputs[INPUT_MAIN], NULL);
853  }
854  ff_outlink_set_status(ctx->outputs[0], status, pts);
855  return ret;
856  } else if (fm->ppsrc && ff_inlink_acknowledge_status(ctx->inputs[INPUT_CLEANSRC], &status, &pts)) {
857  if (status == AVERROR_EOF) { // flushing
858  fm->eof |= 1 << INPUT_CLEANSRC;
859  ret = filter_frame(ctx->inputs[INPUT_CLEANSRC], NULL);
860  }
861  ff_outlink_set_status(ctx->outputs[0], status, pts);
862  return ret;
863  } else {
864  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
865  if (fm->got_frame[INPUT_MAIN] == 0)
867  if (fm->ppsrc && (fm->got_frame[INPUT_CLEANSRC] == 0))
869  }
870  return 0;
871  }
872 }
873 
875 {
876  FieldMatchContext *fm = ctx->priv;
877 
878  static const enum AVPixelFormat pix_fmts[] = {
882  };
883  static const enum AVPixelFormat unproc_pix_fmts[] = {
898  };
899  int ret;
900 
902  if (!fmts_list)
903  return AVERROR(ENOMEM);
904  if (!fm->ppsrc) {
905  return ff_set_common_formats(ctx, fmts_list);
906  }
907 
908  if ((ret = ff_formats_ref(fmts_list, &ctx->inputs[INPUT_MAIN]->out_formats)) < 0)
909  return ret;
910  fmts_list = ff_make_format_list(unproc_pix_fmts);
911  if (!fmts_list)
912  return AVERROR(ENOMEM);
913  if ((ret = ff_formats_ref(fmts_list, &ctx->outputs[0]->in_formats)) < 0)
914  return ret;
915  if ((ret = ff_formats_ref(fmts_list, &ctx->inputs[INPUT_CLEANSRC]->out_formats)) < 0)
916  return ret;
917  return 0;
918 }
919 
921 {
922  int ret;
923  AVFilterContext *ctx = inlink->dst;
924  FieldMatchContext *fm = ctx->priv;
925  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
926  const int w = inlink->w;
927  const int h = inlink->h;
928 
929  fm->scthresh = (int64_t)((w * h * 255.0 * fm->scthresh_flt) / 100.0);
930 
931  if ((ret = av_image_alloc(fm->map_data, fm->map_linesize, w, h, inlink->format, 32)) < 0 ||
932  (ret = av_image_alloc(fm->cmask_data, fm->cmask_linesize, w, h, inlink->format, 32)) < 0)
933  return ret;
934 
935  fm->hsub = pix_desc->log2_chroma_w;
936  fm->vsub = pix_desc->log2_chroma_h;
937 
938  fm->tpitchy = FFALIGN(w, 16);
939  fm->tpitchuv = FFALIGN(w >> 1, 16);
940 
941  fm->tbuffer = av_calloc((h/2 + 4) * fm->tpitchy, sizeof(*fm->tbuffer));
942  fm->c_array = av_malloc((((w + fm->blockx/2)/fm->blockx)+1) *
943  (((h + fm->blocky/2)/fm->blocky)+1) *
944  4 * sizeof(*fm->c_array));
945  if (!fm->tbuffer || !fm->c_array)
946  return AVERROR(ENOMEM);
947 
948  return 0;
949 }
950 
952 {
953  const FieldMatchContext *fm = ctx->priv;
954  AVFilterPad pad = {
955  .name = av_strdup("main"),
956  .type = AVMEDIA_TYPE_VIDEO,
957  .config_props = config_input,
958  };
959  int ret;
960 
961  if (!pad.name)
962  return AVERROR(ENOMEM);
963  if ((ret = ff_insert_inpad(ctx, INPUT_MAIN, &pad)) < 0) {
964  av_freep(&pad.name);
965  return ret;
966  }
967 
968  if (fm->ppsrc) {
969  pad.name = av_strdup("clean_src");
970  pad.config_props = NULL;
971  if (!pad.name)
972  return AVERROR(ENOMEM);
973  if ((ret = ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad)) < 0) {
974  av_freep(&pad.name);
975  return ret;
976  }
977  }
978 
979  if ((fm->blockx & (fm->blockx - 1)) ||
980  (fm->blocky & (fm->blocky - 1))) {
981  av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
982  return AVERROR(EINVAL);
983  }
984 
985  if (fm->combpel > fm->blockx * fm->blocky) {
986  av_log(ctx, AV_LOG_ERROR, "Combed pixel should not be larger than blockx x blocky\n");
987  return AVERROR(EINVAL);
988  }
989 
990  return 0;
991 }
992 
994 {
995  int i;
996  FieldMatchContext *fm = ctx->priv;
997 
998  if (fm->prv != fm->src)
999  av_frame_free(&fm->prv);
1000  if (fm->nxt != fm->src)
1001  av_frame_free(&fm->nxt);
1002  if (fm->prv2 != fm->src2)
1003  av_frame_free(&fm->prv2);
1004  if (fm->nxt2 != fm->src2)
1005  av_frame_free(&fm->nxt2);
1006  av_frame_free(&fm->src);
1007  av_frame_free(&fm->src2);
1008  av_freep(&fm->map_data[0]);
1009  av_freep(&fm->cmask_data[0]);
1010  av_freep(&fm->tbuffer);
1011  av_freep(&fm->c_array);
1012  for (i = 0; i < ctx->nb_inputs; i++)
1013  av_freep(&ctx->input_pads[i].name);
1014 }
1015 
1016 static int config_output(AVFilterLink *outlink)
1017 {
1018  AVFilterContext *ctx = outlink->src;
1019  FieldMatchContext *fm = ctx->priv;
1020  const AVFilterLink *inlink =
1021  ctx->inputs[fm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
1023 
1024  fm->bpc = (desc->comp[0].depth + 7) / 8;
1025  outlink->time_base = inlink->time_base;
1026  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
1027  outlink->frame_rate = inlink->frame_rate;
1028  outlink->w = inlink->w;
1029  outlink->h = inlink->h;
1030  return 0;
1031 }
1032 
1034  {
1035  .name = "default",
1036  .type = AVMEDIA_TYPE_VIDEO,
1037  .config_props = config_output,
1038  },
1039  { NULL }
1040 };
1041 
1043  .name = "fieldmatch",
1044  .description = NULL_IF_CONFIG_SMALL("Field matching for inverse telecine."),
1045  .query_formats = query_formats,
1046  .priv_size = sizeof(FieldMatchContext),
1047  .init = fieldmatch_init,
1048  .activate = activate,
1050  .inputs = NULL,
1052  .priv_class = &fieldmatch_class,
1054 };
create_weave_frame
static AVFrame * create_weave_frame(AVFilterContext *ctx, int match, int field, const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
Definition: vf_fieldmatch.c:622
FieldMatchContext::order
int order
Definition: vf_fieldmatch.c:89
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
FieldMatchContext::mode
int mode
matching_mode
Definition: vf_fieldmatch.c:91
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
build_diff_map
static void build_diff_map(FieldMatchContext *fm, const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *dstp, int dst_linesize, int height, int width, int plane)
Build a map over which pixels differ a lot/a little.
Definition: vf_fieldmatch.c:415
FLAGS
#define FLAGS
Definition: vf_fieldmatch.c:115
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_vf_fieldmatch
AVFilter ff_vf_fieldmatch
Definition: vf_fieldmatch.c:1042
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
FieldMatchContext::blocky
int blocky
Definition: vf_fieldmatch.c:101
NB_COMBMATCH
@ NB_COMBMATCH
Definition: vf_fieldmatch.c:66
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
FieldMatchContext::mchroma
int mchroma
Definition: vf_fieldmatch.c:93
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
count
void INT64 INT64 count
Definition: avisynth_c.h:767
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
build_abs_diff_mask
static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *tbuffer, int tbuf_linesize, int width, int height)
Definition: vf_fieldmatch.c:394
FieldMatchContext::src
AVFrame * src
Definition: vf_fieldmatch.c:79
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVFrame::width
int width
Definition: frame.h:353
w
uint8_t w
Definition: llviddspenc.c:38
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
AVOption
AVOption.
Definition: opt.h:246
chroma
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1511
COMBDBG_PCN
@ COMBDBG_PCN
Definition: vf_fieldmatch.c:71
data
const char data[16]
Definition: mxf.c:91
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
LOAD_COMB
#define LOAD_COMB(mid)
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
activate
static int activate(AVFilterContext *ctx)
Definition: vf_fieldmatch.c:825
get_width
static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
Definition: vf_fieldmatch.c:156
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
c1
static const uint64_t c1
Definition: murmur3.c:49
MODE_PC_U
@ MODE_PC_U
Definition: vf_fieldmatch.c:55
srcp
BYTE int const BYTE * srcp
Definition: avisynth_c.h:908
compare_fields
static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
Definition: vf_fieldmatch.c:487
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
FieldMatchContext::map_data
uint8_t * map_data[4]
Definition: vf_fieldmatch.c:105
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
ff_insert_inpad
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:277
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1481
MODE_PCN_UB
@ MODE_PCN_UB
Definition: vf_fieldmatch.c:58
fxo0m
static const int fxo0m[]
Definition: vf_fieldmatch.c:673
dstp
BYTE * dstp
Definition: avisynth_c.h:908
MODE_PC
@ MODE_PC
Definition: vf_fieldmatch.c:53
checkmm
static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2, AVFrame **gen_frames, int field)
Definition: vf_fieldmatch.c:649
FieldMatchContext::src2
AVFrame * src2
Definition: vf_fieldmatch.c:80
plane
int plane
Definition: avisynth_c.h:384
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
NB_COMBDBG
@ NB_COMBDBG
Definition: vf_fieldmatch.c:73
FieldMatchContext::cmask_data
uint8_t * cmask_data[4]
Definition: vf_fieldmatch.c:107
HAS_FF_AROUND
#define HAS_FF_AROUND(p, lz)
pts
static int64_t pts
Definition: transcode_aac.c:647
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
src
#define src
Definition: vp8dsp.c:254
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
mN
@ mN
Definition: vf_fieldmatch.c:473
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
FieldMatchContext::y1
int y1
Definition: vf_fieldmatch.c:94
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
mB
@ mB
Definition: vf_fieldmatch.c:473
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
FieldMatchContext::map_linesize
int map_linesize[4]
Definition: vf_fieldmatch.c:106
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1607
width
#define width
FieldMatchContext::scthresh_flt
double scthresh_flt
Definition: vf_fieldmatch.c:96
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
OFFSET
#define OFFSET(x)
Definition: vf_fieldmatch.c:114
fieldmatch_parity
fieldmatch_parity
Definition: vf_fieldmatch.c:46
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_fieldmatch.c:1016
s1
#define s1
Definition: regdef.h:38
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
fieldmatch_outputs
static const AVFilterPad fieldmatch_outputs[]
Definition: vf_fieldmatch.c:1033
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
FieldMatchContext::chroma
int chroma
Definition: vf_fieldmatch.c:100
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
MODE_PC_N_UB
@ MODE_PC_N_UB
Definition: vf_fieldmatch.c:56
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
FM_PARITY_AUTO
@ FM_PARITY_AUTO
Definition: vf_fieldmatch.c:47
FieldMatchContext::nxt
AVFrame * nxt
main sliding window of 3 frames
Definition: vf_fieldmatch.c:79
COMBMATCH_SC
@ COMBMATCH_SC
Definition: vf_fieldmatch.c:64
fieldmatch_options
static const AVOption fieldmatch_options[]
Definition: vf_fieldmatch.c:117
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
FieldMatchContext::scthresh
int64_t scthresh
Definition: vf_fieldmatch.c:95
FieldMatchContext::combmatch
int combmatch
comb_matching_mode
Definition: vf_fieldmatch.c:97
fill_buf
static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
Definition: vf_fieldmatch.c:186
mU
@ mU
Definition: vf_fieldmatch.c:473
FieldMatchContext::field
int field
Definition: vf_fieldmatch.c:92
FieldMatchContext::hsub
int hsub
Definition: vf_fieldmatch.c:82
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
FieldMatchContext::bpc
int bpc
bytes per component
Definition: vf_fieldmatch.c:83
FieldMatchContext::combpel
int combpel
Definition: vf_fieldmatch.c:102
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:389
FieldMatchContext::ppsrc
int ppsrc
Definition: vf_fieldmatch.c:90
FieldMatchContext::got_frame
int got_frame[2]
frame request flag for each input stream
Definition: vf_fieldmatch.c:81
abs
#define abs(x)
Definition: cuda_runtime.h:35
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
COMBMATCH_FULL
@ COMBMATCH_FULL
Definition: vf_fieldmatch.c:65
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1436
FieldMatchContext
Definition: vf_fieldmatch.c:76
comb_dbg
comb_dbg
Definition: vf_fieldmatch.c:69
av_image_alloc
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
Definition: imgutils.c:192
AVFilterPad::config_props
int(* config_props)(AVFilterLink *link)
Link configuration callback.
Definition: internal.h:129
s2
#define s2
Definition: regdef.h:39
NB_MODE
@ NB_MODE
Definition: vf_fieldmatch.c:59
desc
const char * desc
Definition: nvenc.c:68
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
MODE_PC_N
@ MODE_PC_N
Definition: vf_fieldmatch.c:54
FieldMatchContext::vsub
int vsub
chroma subsampling values
Definition: vf_fieldmatch.c:82
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
comb_matching_mode
comb_matching_mode
Definition: vf_fieldmatch.c:62
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_fieldmatch.c:920
FieldMatchContext::tbuffer
uint8_t * tbuffer
Definition: vf_fieldmatch.c:111
FieldMatchContext::cthresh
int cthresh
Definition: vf_fieldmatch.c:99
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
FM_PARITY_BOTTOM
@ FM_PARITY_BOTTOM
Definition: vf_fieldmatch.c:48
INPUT_MAIN
#define INPUT_MAIN
Definition: vf_fieldmatch.c:43
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_fieldmatch.c:874
FieldMatchContext::nxt2
AVFrame * nxt2
sliding window of the optional second stream
Definition: vf_fieldmatch.c:80
FieldMatchContext::blockx
int blockx
Definition: vf_fieldmatch.c:101
get_height
static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
Definition: vf_fieldmatch.c:161
C_ARRAY_ADD
#define C_ARRAY_ADD(v)
matching_mode
matching_mode
Definition: vf_fieldmatch.c:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
internal.h
get_field_base
static int get_field_base(int match, int field)
Definition: vf_fieldmatch.c:475
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
COMBDBG_PCNUB
@ COMBDBG_PCNUB
Definition: vf_fieldmatch.c:72
SLIDING_FRAME_WINDOW
#define SLIDING_FRAME_WINDOW(prv, src, nxt)
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
FieldMatchContext::tpitchuv
int tpitchuv
Definition: vf_fieldmatch.c:110
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
MODE_PCN
@ MODE_PCN
Definition: vf_fieldmatch.c:57
INPUT_CLEANSRC
#define INPUT_CLEANSRC
Definition: vf_fieldmatch.c:44
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
AVFilter
Filter definition.
Definition: avfilter.h:144
VERTICAL_HALF
#define VERTICAL_HALF(y_start, y_end)
mP
@ mP
Definition: vf_fieldmatch.c:473
ret
ret
Definition: filter_design.txt:187
FieldMatchContext::combdbg
int combdbg
Definition: vf_fieldmatch.c:98
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
FieldMatchContext::c_array
int * c_array
Definition: vf_fieldmatch.c:109
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
FieldMatchContext::prv
AVFrame * prv
Definition: vf_fieldmatch.c:79
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
fxo1m
static const int fxo1m[]
Definition: vf_fieldmatch.c:674
AVFrame::height
int height
Definition: frame.h:353
c2
static const uint64_t c2
Definition: murmur3.c:50
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
luma_abs_diff
static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
Definition: vf_fieldmatch.c:166
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
FF_INLINK_IDX
#define FF_INLINK_IDX(link)
Find the index of a link.
Definition: internal.h:348
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
FieldMatchContext::prv2
AVFrame * prv2
Definition: vf_fieldmatch.c:80
FieldMatchContext::cmask_linesize
int cmask_linesize[4]
Definition: vf_fieldmatch.c:108
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_fieldmatch.c:676
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
FM_PARITY_TOP
@ FM_PARITY_TOP
Definition: vf_fieldmatch.c:49
copy_fields
static void copy_fields(const FieldMatchContext *fm, AVFrame *dst, const AVFrame *src, int field)
Definition: vf_fieldmatch.c:609
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(fieldmatch)
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
FieldMatchContext::y0
int y0
Definition: vf_fieldmatch.c:94
fieldmatch_uninit
static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
Definition: vf_fieldmatch.c:993
fieldmatch_init
static av_cold int fieldmatch_init(AVFilterContext *ctx)
Definition: vf_fieldmatch.c:951
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
FieldMatchContext::lastscdiff
int64_t lastscdiff
Definition: vf_fieldmatch.c:85
select_frame
static AVFrame * select_frame(FieldMatchContext *fm, int match)
Definition: vf_fieldmatch.c:480
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
FILTER
#define FILTER(xm2, xm1, xp1, xp2)
imgutils.h
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
FieldMatchContext::tpitchy
int tpitchy
Definition: vf_fieldmatch.c:110
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
COMBMATCH_NONE
@ COMBMATCH_NONE
Definition: vf_fieldmatch.c:63
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
h
h
Definition: vp9dsp_template.c:2038
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
FieldMatchContext::eof
uint32_t eof
bitmask for end of stream
Definition: vf_fieldmatch.c:84
int
int
Definition: ffmpeg_filter.c:191
FieldMatchContext::lastn
int64_t lastn
Definition: vf_fieldmatch.c:86
calc_combed_score
static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
Definition: vf_fieldmatch.c:196
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
mC
@ mC
Definition: vf_fieldmatch.c:473
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
COMBDBG_NONE
@ COMBDBG_NONE
Definition: vf_fieldmatch.c:70
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395