FFmpeg
vf_dedot.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/opt.h"
23 #include "libavutil/pixdesc.h"
24 
25 #include "avfilter.h"
26 #include "filters.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "video.h"
30 
31 typedef struct DedotContext {
32  const AVClass *class;
33  int m;
34  float lt;
35  float tl;
36  float tc;
37  float ct;
38 
40  int depth;
41  int max;
42  int luma2d;
43  int lumaT;
44  int chromaT1;
45  int chromaT2;
46 
47  int eof;
49  int nb_planes;
50  int planewidth[4];
51  int planeheight[4];
52 
54 
55  int (*dedotcrawl)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
56  int (*derainbow)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
57 } DedotContext;
58 
60 {
61  static const enum AVPixelFormat pixel_fmts[] = {
77  };
78  return ff_set_common_formats_from_list(ctx, pixel_fmts);
79 }
80 
81 #define DEFINE_DEDOTCRAWL(name, type, div) \
82 static int dedotcrawl##name(AVFilterContext *ctx, void *arg, \
83  int jobnr, int nb_jobs) \
84 { \
85  DedotContext *s = ctx->priv; \
86  AVFrame *out = arg; \
87  int src_linesize = s->frames[2]->linesize[0] / div; \
88  int dst_linesize = out->linesize[0] / div; \
89  int p0_linesize = s->frames[0]->linesize[0] / div; \
90  int p1_linesize = s->frames[1]->linesize[0] / div; \
91  int p3_linesize = s->frames[3]->linesize[0] / div; \
92  int p4_linesize = s->frames[4]->linesize[0] / div; \
93  const int h = s->planeheight[0]; \
94  int slice_start = (h * jobnr) / nb_jobs; \
95  int slice_end = (h * (jobnr+1)) / nb_jobs; \
96  type *p0 = (type *)s->frames[0]->data[0]; \
97  type *p1 = (type *)s->frames[1]->data[0]; \
98  type *p3 = (type *)s->frames[3]->data[0]; \
99  type *p4 = (type *)s->frames[4]->data[0]; \
100  type *src = (type *)s->frames[2]->data[0]; \
101  type *dst = (type *)out->data[0]; \
102  const int luma2d = s->luma2d; \
103  const int lumaT = s->lumaT; \
104  \
105  if (!slice_start) { \
106  slice_start++; \
107  } \
108  p0 += p0_linesize * slice_start; \
109  p1 += p1_linesize * slice_start; \
110  p3 += p3_linesize * slice_start; \
111  p4 += p4_linesize * slice_start; \
112  src += src_linesize * slice_start; \
113  dst += dst_linesize * slice_start; \
114  if (slice_end == h) { \
115  slice_end--; \
116  } \
117  for (int y = slice_start; y < slice_end; y++) { \
118  for (int x = 1; x < s->planewidth[0] - 1; x++) { \
119  int above = src[x - src_linesize]; \
120  int bellow = src[x + src_linesize]; \
121  int cur = src[x]; \
122  int left = src[x - 1]; \
123  int right = src[x + 1]; \
124  \
125  if (FFABS(above + bellow - 2 * cur) <= luma2d && \
126  FFABS(left + right - 2 * cur) <= luma2d) \
127  continue; \
128  \
129  if (FFABS(cur - p0[x]) <= lumaT && \
130  FFABS(cur - p4[x]) <= lumaT && \
131  FFABS(p1[x] - p3[x]) <= lumaT) { \
132  int diff1 = FFABS(cur - p1[x]); \
133  int diff2 = FFABS(cur - p3[x]); \
134  \
135  if (diff1 < diff2) \
136  dst[x] = (src[x] + p1[x] + 1) >> 1; \
137  else \
138  dst[x] = (src[x] + p3[x] + 1) >> 1; \
139  } \
140  } \
141  \
142  dst += dst_linesize; \
143  src += src_linesize; \
144  p0 += p0_linesize; \
145  p1 += p1_linesize; \
146  p3 += p3_linesize; \
147  p4 += p4_linesize; \
148  } \
149  return 0; \
150 }
151 
152 DEFINE_DEDOTCRAWL(8, uint8_t, 1)
153 DEFINE_DEDOTCRAWL(16, uint16_t, 2)
154 
155 typedef struct ThreadData {
156  AVFrame *out;
157  int plane;
158 } ThreadData;
159 
160 #define DEFINE_DERAINBOW(name, type, div) \
161 static int derainbow##name(AVFilterContext *ctx, void *arg, \
162  int jobnr, int nb_jobs) \
163 { \
164  DedotContext *s = ctx->priv; \
165  ThreadData *td = arg; \
166  AVFrame *out = td->out; \
167  const int plane = td->plane; \
168  const int h = s->planeheight[plane]; \
169  int slice_start = (h * jobnr) / nb_jobs; \
170  int slice_end = (h * (jobnr+1)) / nb_jobs; \
171  int src_linesize = s->frames[2]->linesize[plane] / div; \
172  int dst_linesize = out->linesize[plane] / div; \
173  int p0_linesize = s->frames[0]->linesize[plane] / div; \
174  int p1_linesize = s->frames[1]->linesize[plane] / div; \
175  int p3_linesize = s->frames[3]->linesize[plane] / div; \
176  int p4_linesize = s->frames[4]->linesize[plane] / div; \
177  type *p0 = (type *)s->frames[0]->data[plane]; \
178  type *p1 = (type *)s->frames[1]->data[plane]; \
179  type *p3 = (type *)s->frames[3]->data[plane]; \
180  type *p4 = (type *)s->frames[4]->data[plane]; \
181  type *src = (type *)s->frames[2]->data[plane]; \
182  type *dst = (type *)out->data[plane]; \
183  const int chromaT1 = s->chromaT1; \
184  const int chromaT2 = s->chromaT2; \
185  \
186  p0 += slice_start * p0_linesize; \
187  p1 += slice_start * p1_linesize; \
188  p3 += slice_start * p3_linesize; \
189  p4 += slice_start * p4_linesize; \
190  src += slice_start * src_linesize; \
191  dst += slice_start * dst_linesize; \
192  for (int y = slice_start; y < slice_end; y++) { \
193  for (int x = 0; x < s->planewidth[plane]; x++) { \
194  int cur = src[x]; \
195  \
196  if (FFABS(cur - p0[x]) <= chromaT1 && \
197  FFABS(cur - p4[x]) <= chromaT1 && \
198  FFABS(p1[x] - p3[x]) <= chromaT1 && \
199  FFABS(cur - p1[x]) > chromaT2 && \
200  FFABS(cur - p3[x]) > chromaT2) { \
201  int diff1 = FFABS(cur - p1[x]); \
202  int diff2 = FFABS(cur - p3[x]); \
203  \
204  if (diff1 < diff2) \
205  dst[x] = (src[x] + p1[x] + 1) >> 1; \
206  else \
207  dst[x] = (src[x] + p3[x] + 1) >> 1; \
208  } \
209  } \
210  \
211  dst += dst_linesize; \
212  src += src_linesize; \
213  p0 += p0_linesize; \
214  p1 += p1_linesize; \
215  p3 += p3_linesize; \
216  p4 += p4_linesize; \
217  } \
218  return 0; \
219 }
220 
221 DEFINE_DERAINBOW(8, uint8_t, 1)
222 DEFINE_DERAINBOW(16, uint16_t, 2)
223 
224 static int config_output(AVFilterLink *outlink)
225 {
226  AVFilterContext *ctx = outlink->src;
227  DedotContext *s = ctx->priv;
228  AVFilterLink *inlink = ctx->inputs[0];
229 
230  s->desc = av_pix_fmt_desc_get(outlink->format);
231  if (!s->desc)
232  return AVERROR_BUG;
233  s->nb_planes = av_pix_fmt_count_planes(outlink->format);
234  s->depth = s->desc->comp[0].depth;
235  s->max = (1 << s->depth) - 1;
236  s->luma2d = s->lt * s->max;
237  s->lumaT = s->tl * s->max;
238  s->chromaT1 = s->tc * s->max;
239  s->chromaT2 = s->ct * s->max;
240 
241  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, s->desc->log2_chroma_w);
242  s->planewidth[0] = s->planewidth[3] = inlink->w;
243 
244  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
245  s->planeheight[0] = s->planeheight[3] = inlink->h;
246 
247  if (s->depth <= 8) {
248  s->dedotcrawl = dedotcrawl8;
249  s->derainbow = derainbow8;
250  } else {
251  s->dedotcrawl = dedotcrawl16;
252  s->derainbow = derainbow16;
253  }
254 
255  return 0;
256 }
257 
259 {
260  AVFilterLink *inlink = ctx->inputs[0];
261  AVFilterLink *outlink = ctx->outputs[0];
262  DedotContext *s = ctx->priv;
263  AVFrame *frame = NULL;
264  int64_t pts;
265  int status;
266  int ret = 0;
267 
269 
270  if (s->eof == 0) {
272  if (ret < 0)
273  return ret;
274  }
275  if (frame || s->eof_frames > 0) {
276  AVFrame *out = NULL;
277 
278  if (frame) {
279  for (int i = 2; i < 5; i++) {
280  if (!s->frames[i])
281  s->frames[i] = av_frame_clone(frame);
282  }
284  } else if (s->frames[3]) {
285  s->eof_frames--;
286  s->frames[4] = av_frame_clone(s->frames[3]);
287  }
288 
289  if (s->frames[0] &&
290  s->frames[1] &&
291  s->frames[2] &&
292  s->frames[3] &&
293  s->frames[4]) {
294  out = av_frame_clone(s->frames[2]);
295  if (out && !ctx->is_disabled) {
297  if (ret >= 0) {
298  if (s->m & 1)
299  ff_filter_execute(ctx, s->dedotcrawl, out, NULL,
301  s->planeheight[0]));
302  if (s->m & 2) {
303  ThreadData td;
304  td.out = out; td.plane = 1;
305  ff_filter_execute(ctx, s->derainbow, &td, NULL,
307  s->planeheight[1]));
308  td.plane = 2;
309  ff_filter_execute(ctx, s->derainbow, &td, NULL,
311  s->planeheight[2]));
312  }
313  } else
314  av_frame_free(&out);
315  } else if (!out) {
316  ret = AVERROR(ENOMEM);
317  }
318  }
319 
320  av_frame_free(&s->frames[0]);
321  s->frames[0] = s->frames[1];
322  s->frames[1] = s->frames[2];
323  s->frames[2] = s->frames[3];
324  s->frames[3] = s->frames[4];
325  s->frames[4] = NULL;
326 
327  if (ret < 0)
328  return ret;
329  if (out)
330  return ff_filter_frame(outlink, out);
331  }
332 
333  if (s->eof) {
334  if (s->eof_frames <= 0) {
335  ff_outlink_set_status(outlink, AVERROR_EOF, s->frames[2]->pts);
336  } else {
338  }
339  return 0;
340  }
341 
342  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
343  if (status == AVERROR_EOF) {
344  s->eof = 1;
345  s->eof_frames = !!s->frames[0] + !!s->frames[1];
346  if (s->eof_frames <= 0) {
348  return 0;
349  }
351  return 0;
352  }
353  }
354 
356 
357  return FFERROR_NOT_READY;
358 }
359 
361 {
362  DedotContext *s = ctx->priv;
363 
364  for (int i = 0; i < 5; i++)
365  av_frame_free(&s->frames[i]);
366 }
367 
368 #define OFFSET(x) offsetof(DedotContext, x)
369 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
370 
371 static const AVOption dedot_options[] = {
372  { "m", "set filtering mode", OFFSET( m), AV_OPT_TYPE_FLAGS, {.i64=3}, 0, 3, FLAGS, "m" },
373  { "dotcrawl", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "m" },
374  { "rainbows", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "m" },
375  { "lt", "set spatial luma threshold", OFFSET(lt), AV_OPT_TYPE_FLOAT, {.dbl=.079}, 0, 1, FLAGS },
376  { "tl", "set tolerance for temporal luma", OFFSET(tl), AV_OPT_TYPE_FLOAT, {.dbl=.079}, 0, 1, FLAGS },
377  { "tc", "set tolerance for chroma temporal variation", OFFSET(tc), AV_OPT_TYPE_FLOAT, {.dbl=.058}, 0, 1, FLAGS },
378  { "ct", "set temporal chroma threshold", OFFSET(ct), AV_OPT_TYPE_FLOAT, {.dbl=.019}, 0, 1, FLAGS },
379  { NULL },
380 };
381 
382 static const AVFilterPad inputs[] = {
383  {
384  .name = "default",
385  .type = AVMEDIA_TYPE_VIDEO,
386  },
387 };
388 
389 static const AVFilterPad outputs[] = {
390  {
391  .name = "default",
392  .type = AVMEDIA_TYPE_VIDEO,
393  .config_props = config_output,
394  },
395 };
396 
397 AVFILTER_DEFINE_CLASS(dedot);
398 
400  .name = "dedot",
401  .description = NULL_IF_CONFIG_SMALL("Reduce cross-luminance and cross-color."),
402  .priv_size = sizeof(DedotContext),
403  .priv_class = &dedot_class,
405  .activate = activate,
406  .uninit = uninit,
410 };
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:432
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
DedotContext
Definition: vf_dedot.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_vf_dedot
const AVFilter ff_vf_dedot
Definition: vf_dedot.c:399
opt.h
outputs
static const AVFilterPad outputs[]
Definition: vf_dedot.c:389
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
DedotContext::depth
int depth
Definition: vf_dedot.c:40
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
DedotContext::nb_planes
int nb_planes
Definition: vf_dedot.c:49
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:424
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:490
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:431
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:426
config_output
static int config_output(AVFilterLink *outlink)
Definition: vf_dedot.c:224
AVOption
AVOption.
Definition: opt.h:247
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:389
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:427
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
formats.h
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1418
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2580
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:423
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dedot)
DedotContext::max
int max
Definition: vf_dedot.c:41
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:433
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:387
DedotContext::lumaT
int lumaT
Definition: vf_dedot.c:43
pts
static int64_t pts
Definition: transcode_aac.c:653
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:392
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:401
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:402
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
DedotContext::desc
const AVPixFmtDescriptor * desc
Definition: vf_dedot.c:39
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
DedotContext::chromaT1
int chromaT1
Definition: vf_dedot.c:44
filters.h
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:430
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:386
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:400
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:424
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
DedotContext::chromaT2
int chromaT2
Definition: vf_dedot.c:45
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
DedotContext::ct
float ct
Definition: vf_dedot.c:37
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
DEFINE_DEDOTCRAWL
#define DEFINE_DEDOTCRAWL(name, type, div)
Definition: vf_dedot.c:81
DedotContext::planeheight
int planeheight[4]
Definition: vf_dedot.c:51
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:390
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1372
DedotContext::derainbow
int(* derainbow)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_dedot.c:56
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
DedotContext::dedotcrawl
int(* dedotcrawl)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_dedot.c:55
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_dedot.c:360
DedotContext::planewidth
int planewidth[4]
Definition: vf_dedot.c:50
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:394
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_dedot.c:59
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:396
FLAGS
#define FLAGS
Definition: vf_dedot.c:369
DedotContext::eof_frames
int eof_frames
Definition: vf_dedot.c:48
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:428
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
i
int i
Definition: input.c:406
DedotContext::luma2d
int luma2d
Definition: vf_dedot.c:42
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:804
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
DedotContext::tc
float tc
Definition: vf_dedot.c:36
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:388
DEFINE_DERAINBOW
#define DEFINE_DERAINBOW(name, type, div)
Definition: vf_dedot.c:160
AVFilter
Filter definition.
Definition: avfilter.h:149
DedotContext::frames
AVFrame * frames[5]
Definition: vf_dedot.c:53
DedotContext::tl
float tl
Definition: vf_dedot.c:35
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:425
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:393
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:398
OFFSET
#define OFFSET(x)
Definition: vf_dedot.c:368
DedotContext::eof
int eof
Definition: vf_dedot.c:47
DedotContext::m
int m
Definition: vf_dedot.c:33
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:429
avfilter.h
dedot_options
static const AVOption dedot_options[]
Definition: vf_dedot.c:371
DedotContext::lt
float lt
Definition: vf_dedot.c:34
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
tc
#define tc
Definition: regdef.h:69
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:223
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:138
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
activate
static int activate(AVFilterContext *ctx)
Definition: vf_dedot.c:258
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:395
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:399
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
int
int
Definition: ffmpeg_filter.c:156
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
inputs
static const AVFilterPad inputs[]
Definition: vf_dedot.c:382
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:397
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:212