FFmpeg
vf_gblur.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Pascal Getreuer
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following
12  * disclaimer in the documentation and/or other materials provided
13  * with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19  * HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <float.h>
29 
30 #include "libavutil/imgutils.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "gblur.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 #define OFFSET(x) offsetof(GBlurContext, x)
40 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
41 
42 static const AVOption gblur_options[] = {
43  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0.0, 1024, FLAGS },
44  { "steps", "set number of steps", OFFSET(steps), AV_OPT_TYPE_INT, {.i64=1}, 1, 6, FLAGS },
45  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
46  { "sigmaV", "set vertical sigma", OFFSET(sigmaV), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -1, 1024, FLAGS },
47  { NULL }
48 };
49 
51 
52 typedef struct ThreadData {
53  int height;
54  int width;
55 } ThreadData;
56 
57 static void postscale_c(float *buffer, int length,
58  float postscale, float min, float max)
59 {
60  for (int i = 0; i < length; i++) {
61  buffer[i] *= postscale;
62  buffer[i] = av_clipf(buffer[i], min, max);
63  }
64 }
65 
66 static void horiz_slice_c(float *buffer, int width, int height, int steps,
67  float nu, float bscale, float *localbuf)
68 {
69  int step, x, y;
70  float *ptr;
71  for (y = 0; y < height; y++) {
72  for (step = 0; step < steps; step++) {
73  ptr = buffer + width * y;
74  ptr[0] *= bscale;
75 
76  /* Filter rightwards */
77  for (x = 1; x < width; x++)
78  ptr[x] += nu * ptr[x - 1];
79  ptr[x = width - 1] *= bscale;
80 
81  /* Filter leftwards */
82  for (; x > 0; x--)
83  ptr[x - 1] += nu * ptr[x];
84  }
85  }
86 }
87 
88 static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
89 {
90  GBlurContext *s = ctx->priv;
91  ThreadData *td = arg;
92  const int height = td->height;
93  const int width = td->width;
94  const int slice_start = (height * jobnr ) / nb_jobs;
95  const int slice_end = (height * (jobnr+1)) / nb_jobs;
96  const float boundaryscale = s->boundaryscale;
97  const int steps = s->steps;
98  const float nu = s->nu;
99  float *buffer = s->buffer;
100  float *localbuf = NULL;
101 
102  if (s->localbuf)
103  localbuf = s->localbuf + s->stride * width * slice_start;
104 
105  s->horiz_slice(buffer + width * slice_start, width, slice_end - slice_start,
106  steps, nu, boundaryscale, localbuf);
107  emms_c();
108  return 0;
109 }
110 
111 static void do_vertical_columns(float *buffer, int width, int height,
112  int column_begin, int column_end, int steps,
113  float nu, float boundaryscale, int column_step)
114 {
115  const int numpixels = width * height;
116  int i, x, k, step;
117  float *ptr;
118  for (x = column_begin; x < column_end;) {
119  for (step = 0; step < steps; step++) {
120  ptr = buffer + x;
121  for (k = 0; k < column_step; k++) {
122  ptr[k] *= boundaryscale;
123  }
124  /* Filter downwards */
125  for (i = width; i < numpixels; i += width) {
126  for (k = 0; k < column_step; k++) {
127  ptr[i + k] += nu * ptr[i - width + k];
128  }
129  }
130  i = numpixels - width;
131 
132  for (k = 0; k < column_step; k++)
133  ptr[i + k] *= boundaryscale;
134 
135  /* Filter upwards */
136  for (; i > 0; i -= width) {
137  for (k = 0; k < column_step; k++)
138  ptr[i - width + k] += nu * ptr[i + k];
139  }
140  }
141  x += column_step;
142  }
143 }
144 
145 static void verti_slice_c(float *buffer, int width, int height,
146  int slice_start, int slice_end, int steps,
147  float nu, float boundaryscale)
148 {
149  int aligned_end = slice_start + (((slice_end - slice_start) >> 3) << 3);
150  /* Filter vertically along columns (process 8 columns in each step) */
151  do_vertical_columns(buffer, width, height, slice_start, aligned_end,
152  steps, nu, boundaryscale, 8);
153  /* Filter un-aligned columns one by one */
155  steps, nu, boundaryscale, 1);
156 }
157 
158 static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
159 {
160  GBlurContext *s = ctx->priv;
161  ThreadData *td = arg;
162  const int height = td->height;
163  const int width = td->width;
164  const int slice_start = (width * jobnr ) / nb_jobs;
165  const int slice_end = (width * (jobnr+1)) / nb_jobs;
166  const float boundaryscale = s->boundaryscaleV;
167  const int steps = s->steps;
168  const float nu = s->nuV;
169  float *buffer = s->buffer;
170 
171  s->verti_slice(buffer, width, height, slice_start, slice_end,
172  steps, nu, boundaryscale);
173 
174  return 0;
175 }
176 
177 static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
178 {
179  GBlurContext *s = ctx->priv;
180  ThreadData *td = arg;
181  const float max = s->flt ? FLT_MAX : (1 << s->depth) - 1;
182  const float min = s->flt ? -FLT_MAX : 0.f;
183  const int height = td->height;
184  const int width = td->width;
185  const int awidth = FFALIGN(width, 64);
186  const int slice_start = (height * jobnr ) / nb_jobs;
187  const int slice_end = (height * (jobnr+1)) / nb_jobs;
188  const float postscale = s->postscale * s->postscaleV;
189  const int slice_size = slice_end - slice_start;
190 
191  s->postscale_slice(s->buffer + slice_start * awidth,
192  slice_size * awidth, postscale, min, max);
193 
194  return 0;
195 }
196 
197 static void gaussianiir2d(AVFilterContext *ctx, int plane)
198 {
199  GBlurContext *s = ctx->priv;
200  const int width = s->planewidth[plane];
201  const int height = s->planeheight[plane];
202  const int nb_threads = ff_filter_get_nb_threads(ctx);
203  ThreadData td;
204 
205  if (s->sigma <= 0 || s->steps < 0)
206  return;
207 
208  td.width = width;
209  td.height = height;
211  NULL, FFMIN(height, nb_threads));
213  NULL, FFMIN(width, nb_threads));
215  NULL, FFMIN(width * height, nb_threads));
216 }
217 
218 static const enum AVPixelFormat pix_fmts[] = {
240 };
241 
243 {
244  s->localbuf = NULL;
245  s->horiz_slice = horiz_slice_c;
246  s->verti_slice = verti_slice_c;
247  s->postscale_slice = postscale_c;
248  if (ARCH_X86)
250 }
251 
253 {
255  GBlurContext *s = inlink->dst->priv;
256 
257  s->depth = desc->comp[0].depth;
258  s->flt = !!(desc->flags & AV_PIX_FMT_FLAG_FLOAT);
259  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
260  s->planewidth[0] = s->planewidth[3] = inlink->w;
261  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
262  s->planeheight[0] = s->planeheight[3] = inlink->h;
263 
264  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
265 
266  s->buffer = av_malloc_array(FFALIGN(inlink->w, 64), FFALIGN(inlink->h, 64) * sizeof(*s->buffer));
267  if (!s->buffer)
268  return AVERROR(ENOMEM);
269 
270  if (s->sigmaV < 0) {
271  s->sigmaV = s->sigma;
272  }
273  ff_gblur_init(s);
274 
275  return 0;
276 }
277 
278 static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
279 {
280  double dnu, lambda;
281 
282  lambda = (sigma * sigma) / (2.0 * steps);
283  dnu = (1.0 + 2.0 * lambda - sqrt(1.0 + 4.0 * lambda)) / (2.0 * lambda);
284  *postscale = pow(dnu / lambda, steps);
285  *boundaryscale = 1.0 / (1.0 - dnu);
286  *nu = (float)dnu;
287 }
288 
290 {
291  AVFilterContext *ctx = inlink->dst;
292  GBlurContext *s = ctx->priv;
293  AVFilterLink *outlink = ctx->outputs[0];
294  AVFrame *out;
295  int plane;
296 
297  set_params(s->sigma, s->steps, &s->postscale, &s->boundaryscale, &s->nu);
298  set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);
299 
300  if (av_frame_is_writable(in)) {
301  out = in;
302  } else {
303  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
304  if (!out) {
305  av_frame_free(&in);
306  return AVERROR(ENOMEM);
307  }
309  }
310 
311  for (plane = 0; plane < s->nb_planes; plane++) {
312  const int height = s->planeheight[plane];
313  const int width = s->planewidth[plane];
314  float *bptr = s->buffer;
315  const uint8_t *src = in->data[plane];
316  const uint16_t *src16 = (const uint16_t *)in->data[plane];
317  uint8_t *dst = out->data[plane];
318  uint16_t *dst16 = (uint16_t *)out->data[plane];
319  int y, x;
320 
321  if (!s->sigma || !(s->planes & (1 << plane))) {
322  if (out != in)
323  av_image_copy_plane(out->data[plane], out->linesize[plane],
324  in->data[plane], in->linesize[plane],
325  width * ((s->depth + 7) / 8), height);
326  continue;
327  }
328 
329  if (s->flt) {
330  av_image_copy_plane((uint8_t *)bptr, width * sizeof(float),
331  in->data[plane], in->linesize[plane],
332  width * sizeof(float), height);
333  } else if (s->depth == 8) {
334  for (y = 0; y < height; y++) {
335  for (x = 0; x < width; x++) {
336  bptr[x] = src[x];
337  }
338  bptr += width;
339  src += in->linesize[plane];
340  }
341  } else {
342  for (y = 0; y < height; y++) {
343  for (x = 0; x < width; x++) {
344  bptr[x] = src16[x];
345  }
346  bptr += width;
347  src16 += in->linesize[plane] / 2;
348  }
349  }
350 
351  gaussianiir2d(ctx, plane);
352 
353  bptr = s->buffer;
354  if (s->flt) {
355  av_image_copy_plane(out->data[plane], out->linesize[plane],
356  (uint8_t *)bptr, width * sizeof(float),
357  width * sizeof(float), height);
358  } else if (s->depth == 8) {
359  for (y = 0; y < height; y++) {
360  for (x = 0; x < width; x++) {
361  dst[x] = bptr[x];
362  }
363  bptr += width;
364  dst += out->linesize[plane];
365  }
366  } else {
367  for (y = 0; y < height; y++) {
368  for (x = 0; x < width; x++) {
369  dst16[x] = bptr[x];
370  }
371  bptr += width;
372  dst16 += out->linesize[plane] / 2;
373  }
374  }
375  }
376 
377  if (out != in)
378  av_frame_free(&in);
379  return ff_filter_frame(outlink, out);
380 }
381 
383 {
384  GBlurContext *s = ctx->priv;
385 
386  av_freep(&s->buffer);
387  if (s->localbuf)
388  av_free(s->localbuf);
389 }
390 
391 static const AVFilterPad gblur_inputs[] = {
392  {
393  .name = "default",
394  .type = AVMEDIA_TYPE_VIDEO,
395  .config_props = config_input,
396  .filter_frame = filter_frame,
397  },
398 };
399 
400 static const AVFilterPad gblur_outputs[] = {
401  {
402  .name = "default",
403  .type = AVMEDIA_TYPE_VIDEO,
404  },
405 };
406 
408  .name = "gblur",
409  .description = NULL_IF_CONFIG_SMALL("Apply Gaussian Blur filter."),
410  .priv_size = sizeof(GBlurContext),
411  .priv_class = &gblur_class,
412  .uninit = uninit,
417  .process_command = ff_filter_process_command,
418 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:447
AV_PIX_FMT_GBRAP16
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:426
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
FILTER_PIXFMTS_ARRAY
#define FILTER_PIXFMTS_ARRAY(array)
Definition: internal.h:171
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AV_PIX_FMT_FLAG_FLOAT
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:158
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:439
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:446
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:441
AVOption
AVOption.
Definition: opt.h:247
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:404
filter_horizontally
static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:88
float.h
filter_vertically
static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:158
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
video.h
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:442
ThreadData::width
int width
Definition: vf_avgblur.c:65
AV_PIX_FMT_GRAY9
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:384
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
formats.h
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2700
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:438
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:422
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:420
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:448
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_gblur.c:382
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:402
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:388
OFFSET
#define OFFSET(x)
Definition: vf_gblur.c:39
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
gblur_inputs
static const AVFilterPad gblur_inputs[]
Definition: vf_gblur.c:391
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:416
gblur.h
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:424
width
#define width
filter_postscale
static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:177
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:425
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:417
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
ThreadData::height
int height
Definition: vf_avgblur.c:64
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2042
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:445
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:401
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:415
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AV_PIX_FMT_GRAY14
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:387
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_PIX_FMT_GRAYF32
#define AV_PIX_FMT_GRAYF32
Definition: pixfmt.h:436
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:191
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
arg
const char * arg
Definition: jacosubdec.c:67
planes
static const struct @321 planes[]
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:385
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:423
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: vf_gblur.c:40
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
config_input
static int config_input(AVFilterLink *inlink)
Definition: vf_gblur.c:252
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
av_clipf
#define av_clipf
Definition: common.h:144
src
#define src
Definition: vp8dsp.c:255
gblur_outputs
static const AVFilterPad gblur_outputs[]
Definition: vf_gblur.c:400
ff_gblur_init_x86
void ff_gblur_init_x86(GBlurContext *s)
Definition: vf_gblur_init.c:40
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:405
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:419
postscale
static const FLOAT postscale[64]
Definition: faandct.c:54
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:433
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:409
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
gblur_options
static const AVOption gblur_options[]
Definition: vf_gblur.c:42
horiz_slice_c
static void horiz_slice_c(float *buffer, int width, int height, int steps, float nu, float bscale, float *localbuf)
Definition: vf_gblur.c:66
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:443
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:146
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_gblur.c:289
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:421
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
postscale_c
static void postscale_c(float *buffer, int length, float postscale, float min, float max)
Definition: vf_gblur.c:57
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:403
AVFilter
Filter definition.
Definition: avfilter.h:165
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(gblur)
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:440
set_params
static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
Definition: vf_gblur.c:278
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:408
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:413
ff_vf_gblur
const AVFilter ff_vf_gblur
Definition: vf_gblur.c:407
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
verti_slice_c
static void verti_slice_c(float *buffer, int width, int height, int slice_start, int slice_end, int steps, float nu, float boundaryscale)
Definition: vf_gblur.c:145
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:444
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AV_PIX_FMT_GBRAPF32
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:434
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
ff_gblur_init
void ff_gblur_init(GBlurContext *s)
Definition: vf_gblur.c:242
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
gaussianiir2d
static void gaussianiir2d(AVFilterContext *ctx, int plane)
Definition: vf_gblur.c:197
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
GBlurContext
Definition: gblur.h:32
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_gblur.c:218
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:410
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:414
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:386
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
do_vertical_columns
static void do_vertical_columns(float *buffer, int width, int height, int column_begin, int column_end, int steps, float nu, float boundaryscale, int column_step)
Definition: vf_gblur.c:111
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:412
min
float min
Definition: vorbis_enc_data.h:429