FFmpeg
vf_neighbor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012-2013 Oka Motofumi (chikuzen.mo at gmail dot com)
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/imgutils.h"
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/pixdesc.h"
25 #include "libavutil/opt.h"
26 #include "avfilter.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "video.h"
30 
31 typedef struct ThreadData {
32  AVFrame *in, *out;
33 } ThreadData;
34 
35 typedef struct NContext {
36  const AVClass *class;
37  int planeheight[4];
38  int planewidth[4];
39  int nb_planes;
40  int threshold[4];
42 
43  int depth;
44  int max;
45  int bpc;
46 
47  void (*filter)(uint8_t *dst, const uint8_t *p1, int width,
48  int threshold, const uint8_t *coordinates[], int coord,
49  int maxc);
50 } NContext;
51 
53 {
54  static const enum AVPixelFormat pix_fmts[] = {
74  };
75 
76  return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
77 }
78 
79 static void erosion(uint8_t *dst, const uint8_t *p1, int width,
80  int threshold, const uint8_t *coordinates[], int coord,
81  int maxc)
82 {
83  int x, i;
84 
85  for (x = 0; x < width; x++) {
86  int min = p1[x];
87  int limit = FFMAX(min - threshold, 0);
88 
89  for (i = 0; i < 8; i++) {
90  if (coord & (1 << i)) {
91  min = FFMIN(min, *(coordinates[i] + x));
92  }
93  min = FFMAX(min, limit);
94  }
95 
96  dst[x] = min;
97  }
98 }
99 
100 static void erosion16(uint8_t *dstp, const uint8_t *p1, int width,
101  int threshold, const uint8_t *coordinates[], int coord,
102  int maxc)
103 {
104  uint16_t *dst = (uint16_t *)dstp;
105  int x, i;
106 
107  for (x = 0; x < width; x++) {
108  int min = AV_RN16A(&p1[2 * x]);
109  int limit = FFMAX(min - threshold, 0);
110 
111  for (i = 0; i < 8; i++) {
112  if (coord & (1 << i)) {
113  min = FFMIN(min, AV_RN16A(coordinates[i] + x * 2));
114  }
115  min = FFMAX(min, limit);
116  }
117 
118  dst[x] = min;
119  }
120 }
121 
122 static void dilation(uint8_t *dst, const uint8_t *p1, int width,
123  int threshold, const uint8_t *coordinates[], int coord,
124  int maxc)
125 {
126  int x, i;
127 
128  for (x = 0; x < width; x++) {
129  int max = p1[x];
130  int limit = FFMIN(max + threshold, 255);
131 
132  for (i = 0; i < 8; i++) {
133  if (coord & (1 << i)) {
134  max = FFMAX(max, *(coordinates[i] + x));
135  }
136  max = FFMIN(max, limit);
137  }
138 
139  dst[x] = max;
140  }
141 }
142 
143 static void dilation16(uint8_t *dstp, const uint8_t *p1, int width,
144  int threshold, const uint8_t *coordinates[], int coord,
145  int maxc)
146 {
147  uint16_t *dst = (uint16_t *)dstp;
148  int x, i;
149 
150  for (x = 0; x < width; x++) {
151  int max = AV_RN16A(&p1[x * 2]);
152  int limit = FFMIN(max + threshold, maxc);
153 
154  for (i = 0; i < 8; i++) {
155  if (coord & (1 << i)) {
156  max = FFMAX(max, AV_RN16A(coordinates[i] + x * 2));
157  }
158  max = FFMIN(max, limit);
159  }
160 
161  dst[x] = max;
162  }
163 }
164 
165 static void deflate(uint8_t *dst, const uint8_t *p1, int width,
166  int threshold, const uint8_t *coordinates[], int coord,
167  int maxc)
168 {
169  int x, i;
170 
171  for (x = 0; x < width; x++) {
172  int sum = 0;
173  int limit = FFMAX(p1[x] - threshold, 0);
174 
175  for (i = 0; i < 8; sum += *(coordinates[i++] + x));
176 
177  dst[x] = FFMAX(FFMIN(sum / 8, p1[x]), limit);
178  }
179 }
180 
181 static void deflate16(uint8_t *dstp, const uint8_t *p1, int width,
182  int threshold, const uint8_t *coordinates[], int coord,
183  int maxc)
184 {
185  uint16_t *dst = (uint16_t *)dstp;
186  int x, i;
187 
188  for (x = 0; x < width; x++) {
189  int sum = 0;
190  int limit = FFMAX(AV_RN16A(&p1[2 * x]) - threshold, 0);
191 
192  for (i = 0; i < 8; sum += AV_RN16A(coordinates[i++] + x * 2));
193 
194  dst[x] = FFMAX(FFMIN(sum / 8, AV_RN16A(&p1[2 * x])), limit);
195  }
196 }
197 
198 static void inflate(uint8_t *dst, const uint8_t *p1, int width,
199  int threshold, const uint8_t *coordinates[], int coord,
200  int maxc)
201 {
202  int x, i;
203 
204  for (x = 0; x < width; x++) {
205  int sum = 0;
206  int limit = FFMIN(p1[x] + threshold, 255);
207 
208  for (i = 0; i < 8; sum += *(coordinates[i++] + x));
209 
210  dst[x] = FFMIN(FFMAX(sum / 8, p1[x]), limit);
211  }
212 }
213 
214 static void inflate16(uint8_t *dstp, const uint8_t *p1, int width,
215  int threshold, const uint8_t *coordinates[], int coord,
216  int maxc)
217 {
218  uint16_t *dst = (uint16_t *)dstp;
219  int x, i;
220 
221  for (x = 0; x < width; x++) {
222  int sum = 0;
223  int limit = FFMIN(AV_RN16A(&p1[2 * x]) + threshold, maxc);
224 
225  for (i = 0; i < 8; sum += AV_RN16A(coordinates[i++] + x * 2));
226 
227  dst[x] = FFMIN(FFMAX(sum / 8, AV_RN16A(&p1[x * 2])), limit);
228  }
229 }
230 
232 {
233  AVFilterContext *ctx = inlink->dst;
234  NContext *s = ctx->priv;
236 
237  s->depth = desc->comp[0].depth;
238  s->max = (1 << s->depth) - 1;
239  s->bpc = (s->depth + 7) / 8;
240 
241  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
242  s->planewidth[0] = s->planewidth[3] = inlink->w;
243  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
244  s->planeheight[0] = s->planeheight[3] = inlink->h;
245 
247 
248  if (!strcmp(ctx->filter->name, "erosion"))
249  s->filter = s->depth > 8 ? erosion16 : erosion;
250  else if (!strcmp(ctx->filter->name, "dilation"))
251  s->filter = s->depth > 8 ? dilation16 : dilation;
252  else if (!strcmp(ctx->filter->name, "deflate"))
253  s->filter = s->depth > 8 ? deflate16 : deflate;
254  else if (!strcmp(ctx->filter->name, "inflate"))
255  s->filter = s->depth > 8 ? inflate16 : inflate;
256 
257  return 0;
258 }
259 
260 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
261 {
262  NContext *s = ctx->priv;
263  ThreadData *td = arg;
264  AVFrame *out = td->out;
265  AVFrame *in = td->in;
266  int plane, y;
267 
268  for (plane = 0; plane < s->nb_planes; plane++) {
269  const int bpc = s->bpc;
270  const int threshold = s->threshold[plane];
271  const int stride = in->linesize[plane];
272  const int dstride = out->linesize[plane];
273  const int height = s->planeheight[plane];
274  const int width = s->planewidth[plane];
275  const int slice_start = (height * jobnr) / nb_jobs;
276  const int slice_end = (height * (jobnr+1)) / nb_jobs;
277  const uint8_t *src = (const uint8_t *)in->data[plane] + slice_start * stride;
278  uint8_t *dst = out->data[plane] + slice_start * dstride;
279 
280  if (!threshold) {
281  av_image_copy_plane(dst, dstride, src, stride, width * bpc, slice_end - slice_start);
282  continue;
283  }
284 
285  for (y = slice_start; y < slice_end; y++) {
286  const int nh = y > 0;
287  const int ph = y < height - 1;
288  const uint8_t *coordinates[] = { src - nh * stride, src + 1 * bpc - nh * stride, src + 2 * bpc - nh * stride,
289  src, src + 2 * bpc,
290  src + ph * stride, src + 1 * bpc + ph * stride, src + 2 * bpc + ph * stride};
291 
292  const uint8_t *coordinateslb[] = { src + 1 * bpc - nh * stride, src - nh * stride, src + 1 * bpc - nh * stride,
293  src + 1 * bpc, src + 1 * bpc,
294  src + 1 * bpc + ph * stride, src + ph * stride, src + 1 * bpc + ph * stride};
295 
296  const uint8_t *coordinatesrb[] = { src + (width - 2) * bpc - nh * stride, src + (width - 1) * bpc - nh * stride, src + (width - 2) * bpc - nh * stride,
297  src + (width - 2) * bpc, src + (width - 2) * bpc,
298  src + (width - 2) * bpc + ph * stride, src + (width - 1) * bpc + ph * stride, src + (width - 2) * bpc + ph * stride};
299 
300  s->filter(dst, src, 1, threshold, coordinateslb, s->coordinates, s->max);
301  if (width > 1) {
302  s->filter(dst + 1 * bpc, src + 1 * bpc, width - 2, threshold, coordinates, s->coordinates, s->max);
303  s->filter(dst + (width - 1) * bpc, src + (width - 1) * bpc, 1, threshold, coordinatesrb, s->coordinates, s->max);
304  }
305 
306  src += stride;
307  dst += dstride;
308  }
309  }
310 
311  return 0;
312 }
313 
315 {
316  AVFilterContext *ctx = inlink->dst;
317  AVFilterLink *outlink = ctx->outputs[0];
318  NContext *s = ctx->priv;
319  ThreadData td;
320  AVFrame *out;
321 
322  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
323  if (!out) {
324  av_frame_free(&in);
325  return AVERROR(ENOMEM);
326  }
327  av_frame_copy_props(out, in);
328 
329  td.in = in;
330  td.out = out;
332 
333  av_frame_free(&in);
334  return ff_filter_frame(outlink, out);
335 }
336 
337 static const AVFilterPad neighbor_inputs[] = {
338  {
339  .name = "default",
340  .type = AVMEDIA_TYPE_VIDEO,
341  .filter_frame = filter_frame,
342  .config_props = config_input,
343  },
344  { NULL }
345 };
346 
347 static const AVFilterPad neighbor_outputs[] = {
348  {
349  .name = "default",
350  .type = AVMEDIA_TYPE_VIDEO,
351  },
352  { NULL }
353 };
354 
355 #define OFFSET(x) offsetof(NContext, x)
356 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
357 
358 #define DEFINE_NEIGHBOR_FILTER(name_, description_) \
359 AVFILTER_DEFINE_CLASS(name_); \
360  \
361 AVFilter ff_vf_##name_ = { \
362  .name = #name_, \
363  .description = NULL_IF_CONFIG_SMALL(description_), \
364  .priv_size = sizeof(NContext), \
365  .priv_class = &name_##_class, \
366  .query_formats = query_formats, \
367  .inputs = neighbor_inputs, \
368  .outputs = neighbor_outputs, \
369  .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC| \
370  AVFILTER_FLAG_SLICE_THREADS, \
371  .process_command = ff_filter_process_command, \
372 }
373 
374 #if CONFIG_EROSION_FILTER
375 
376 static const AVOption erosion_options[] = {
377  { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
378  { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
379  { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
380  { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
381  { "coordinates", "set coordinates", OFFSET(coordinates), AV_OPT_TYPE_INT, {.i64=255}, 0, 255, FLAGS },
382  { NULL }
383 };
384 
385 DEFINE_NEIGHBOR_FILTER(erosion, "Apply erosion effect.");
386 
387 #endif /* CONFIG_EROSION_FILTER */
388 
389 #if CONFIG_DILATION_FILTER
390 
391 static const AVOption dilation_options[] = {
392  { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
393  { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
394  { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
395  { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
396  { "coordinates", "set coordinates", OFFSET(coordinates), AV_OPT_TYPE_INT, {.i64=255}, 0, 255, FLAGS },
397  { NULL }
398 };
399 
400 DEFINE_NEIGHBOR_FILTER(dilation, "Apply dilation effect.");
401 
402 #endif /* CONFIG_DILATION_FILTER */
403 
404 #if CONFIG_DEFLATE_FILTER
405 
406 static const AVOption deflate_options[] = {
407  { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
408  { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
409  { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
410  { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
411  { NULL }
412 };
413 
414 DEFINE_NEIGHBOR_FILTER(deflate, "Apply deflate effect.");
415 
416 #endif /* CONFIG_DEFLATE_FILTER */
417 
418 #if CONFIG_INFLATE_FILTER
419 
420 static const AVOption inflate_options[] = {
421  { "threshold0", "set threshold for 1st plane", OFFSET(threshold[0]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
422  { "threshold1", "set threshold for 2nd plane", OFFSET(threshold[1]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
423  { "threshold2", "set threshold for 3rd plane", OFFSET(threshold[2]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
424  { "threshold3", "set threshold for 4th plane", OFFSET(threshold[3]), AV_OPT_TYPE_INT, {.i64=65535}, 0, 65535, FLAGS },
425  { NULL }
426 };
427 
428 DEFINE_NEIGHBOR_FILTER(inflate, "Apply inflate effect.");
429 
430 #endif /* CONFIG_INFLATE_FILTER */
#define NULL
Definition: coverity.c:32
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
AVFrame * out
Definition: af_adeclick.c:494
int planewidth[4]
Definition: vf_neighbor.c:38
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
Main libavfilter public API header.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static void inflate16(uint8_t *dstp, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:214
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
static void erosion16(uint8_t *dstp, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:100
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
AVOptions.
int threshold[4]
Definition: vf_neighbor.c:40
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_neighbor.c:260
static void erosion(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:79
static void deflate16(uint8_t *dstp, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:181
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
int height
Definition: vf_avgblur.c:61
AVFrame * dst
Definition: vf_blend.c:55
int plane
Definition: vf_blend.c:57
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
static const AVFilterPad neighbor_outputs[]
Definition: vf_neighbor.c:347
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
#define FLAGS
Definition: vf_neighbor.c:356
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_neighbor.c:314
A filter pad used for either input or output.
Definition: internal.h:54
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
int nb_planes
Definition: vf_neighbor.c:39
BYTE * dstp
Definition: avisynth_c.h:908
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
const uint8_t * src
Definition: vf_bm3d.c:56
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define OFFSET(x)
Definition: vf_neighbor.c:355
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
const char * arg
Definition: jacosubdec.c:66
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
#define FFMAX(a, b)
Definition: common.h:94
int coordinates
Definition: vf_neighbor.c:41
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static void dilation16(uint8_t *dstp, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:143
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:428
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
AVFormatContext * ctx
Definition: movenc.c:48
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
if(ret)
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
void(* filter)(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:47
Used for passing data between threads.
Definition: dsddec.c:64
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int query_formats(AVFilterContext *ctx)
Definition: vf_neighbor.c:52
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:370
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
AVFrame * min
Definition: vf_threshold.c:74
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
static int config_input(AVFilterLink *inlink)
Definition: vf_neighbor.c:231
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
int planeheight[4]
Definition: vf_neighbor.c:37
static const AVFilterPad neighbor_inputs[]
Definition: vf_neighbor.c:337
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVFrame * threshold
Definition: vf_threshold.c:73
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
#define DEFINE_NEIGHBOR_FILTER(name_, description_)
Definition: vf_neighbor.c:358
avfilter_execute_func * execute
Definition: internal.h:144
static void deflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:165
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2035
const AVPixFmtDescriptor * desc
Definition: vf_tonemap.c:196
int depth
Definition: vf_neighbor.c:43
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
#define AV_RN16A(p)
Definition: intreadwrite.h:522
An instance of a filter.
Definition: avfilter.h:338
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFrame * in
Definition: af_afftdn.c:1083
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
AVFilterLink * inlink
Definition: vf_blend.c:56
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static void dilation(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:122
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int stride
Definition: v210dec.c:44
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
AVFrame * max
Definition: vf_threshold.c:75
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:427
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58