FFmpeg
vf_gblur.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Pascal Getreuer
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following
12  * disclaimer in the documentation and/or other materials provided
13  * with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19  * HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "libavutil/imgutils.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixdesc.h"
31 #include "avfilter.h"
32 #include "formats.h"
33 #include "internal.h"
34 #include "video.h"
35 
36 typedef struct GBlurContext {
37  const AVClass *class;
38 
39  float sigma;
40  float sigmaV;
41  int steps;
42  int planes;
43 
44  int depth;
45  int planewidth[4];
46  int planeheight[4];
47  float *buffer;
50  float postscale;
51  float postscaleV;
52  float nu;
53  float nuV;
54  int nb_planes;
55 } GBlurContext;
56 
57 #define OFFSET(x) offsetof(GBlurContext, x)
58 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
59 
60 static const AVOption gblur_options[] = {
61  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0.0, 1024, FLAGS },
62  { "steps", "set number of steps", OFFSET(steps), AV_OPT_TYPE_INT, {.i64=1}, 1, 6, FLAGS },
63  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
64  { "sigmaV", "set vertical sigma", OFFSET(sigmaV), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -1, 1024, FLAGS },
65  { NULL }
66 };
67 
69 
70 typedef struct ThreadData {
71  int height;
72  int width;
73 } ThreadData;
74 
75 static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
76 {
77  GBlurContext *s = ctx->priv;
78  ThreadData *td = arg;
79  const int height = td->height;
80  const int width = td->width;
81  const int slice_start = (height * jobnr ) / nb_jobs;
82  const int slice_end = (height * (jobnr+1)) / nb_jobs;
83  const float boundaryscale = s->boundaryscale;
84  const int steps = s->steps;
85  const float nu = s->nu;
86  float *buffer = s->buffer;
87  int y, x, step;
88  float *ptr;
89 
90  /* Filter horizontally along each row */
91  for (y = slice_start; y < slice_end; y++) {
92  for (step = 0; step < steps; step++) {
93  ptr = buffer + width * y;
94  ptr[0] *= boundaryscale;
95 
96  /* Filter rightwards */
97  for (x = 1; x < width; x++)
98  ptr[x] += nu * ptr[x - 1];
99 
100  ptr[x = width - 1] *= boundaryscale;
101 
102  /* Filter leftwards */
103  for (; x > 0; x--)
104  ptr[x - 1] += nu * ptr[x];
105  }
106  }
107 
108  return 0;
109 }
110 
111 static void do_vertical_columns(float *buffer, int width, int height,
112  int column_begin, int column_end, int steps,
113  float nu, float boundaryscale, int column_step)
114 {
115  const int numpixels = width * height;
116  int i, x, k, step;
117  float *ptr;
118  for (x = column_begin; x < column_end;) {
119  for (step = 0; step < steps; step++) {
120  ptr = buffer + x;
121  for (k = 0; k < column_step; k++) {
122  ptr[k] *= boundaryscale;
123  }
124  /* Filter downwards */
125  for (i = width; i < numpixels; i += width) {
126  for (k = 0; k < column_step; k++) {
127  ptr[i + k] += nu * ptr[i - width + k];
128  }
129  }
130  i = numpixels - width;
131 
132  for (k = 0; k < column_step; k++)
133  ptr[i + k] *= boundaryscale;
134 
135  /* Filter upwards */
136  for (; i > 0; i -= width) {
137  for (k = 0; k < column_step; k++)
138  ptr[i - width + k] += nu * ptr[i + k];
139  }
140  }
141  x += column_step;
142  }
143 }
144 
145 static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
146 {
147  GBlurContext *s = ctx->priv;
148  ThreadData *td = arg;
149  const int height = td->height;
150  const int width = td->width;
151  const int slice_start = (width * jobnr ) / nb_jobs;
152  const int slice_end = (width * (jobnr+1)) / nb_jobs;
153  const float boundaryscale = s->boundaryscaleV;
154  const int steps = s->steps;
155  const float nu = s->nuV;
156  float *buffer = s->buffer;
157  int aligned_end;
158 
159  aligned_end = slice_start + (((slice_end - slice_start) >> 3) << 3);
160  /* Filter vertically along columns (process 8 columns in each step) */
161  do_vertical_columns(buffer, width, height, slice_start, aligned_end,
162  steps, nu, boundaryscale, 8);
163 
164  /* Filter un-aligned columns one by one */
165  do_vertical_columns(buffer, width, height, aligned_end, slice_end,
166  steps, nu, boundaryscale, 1);
167  return 0;
168 }
169 
170 
171 static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
172 {
173  GBlurContext *s = ctx->priv;
174  ThreadData *td = arg;
175  const int height = td->height;
176  const int width = td->width;
177  const int64_t numpixels = width * (int64_t)height;
178  const unsigned slice_start = (numpixels * jobnr ) / nb_jobs;
179  const unsigned slice_end = (numpixels * (jobnr+1)) / nb_jobs;
180  const float postscale = s->postscale * s->postscaleV;
181  float *buffer = s->buffer;
182  unsigned i;
183 
184  for (i = slice_start; i < slice_end; i++)
185  buffer[i] *= postscale;
186 
187  return 0;
188 }
189 
191 {
192  GBlurContext *s = ctx->priv;
193  const int width = s->planewidth[plane];
194  const int height = s->planeheight[plane];
195  const int nb_threads = ff_filter_get_nb_threads(ctx);
196  ThreadData td;
197 
198  if (s->sigma <= 0 || s->steps < 0)
199  return;
200 
201  td.width = width;
202  td.height = height;
203  ctx->internal->execute(ctx, filter_horizontally, &td, NULL, FFMIN(height, nb_threads));
204  ctx->internal->execute(ctx, filter_vertically, &td, NULL, FFMIN(width, nb_threads));
205  ctx->internal->execute(ctx, filter_postscale, &td, NULL, FFMIN(width * height, nb_threads));
206 }
207 
209 {
210  static const enum AVPixelFormat pix_fmts[] = {
229  };
230 
231  return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
232 }
233 
235 {
237  GBlurContext *s = inlink->dst->priv;
238 
239  s->depth = desc->comp[0].depth;
240  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
241  s->planewidth[0] = s->planewidth[3] = inlink->w;
242  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
243  s->planeheight[0] = s->planeheight[3] = inlink->h;
244 
246 
247  s->buffer = av_malloc_array(inlink->w, inlink->h * sizeof(*s->buffer));
248  if (!s->buffer)
249  return AVERROR(ENOMEM);
250 
251  if (s->sigmaV < 0) {
252  s->sigmaV = s->sigma;
253  }
254 
255  return 0;
256 }
257 
258 static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
259 {
260  double dnu, lambda;
261 
262  lambda = (sigma * sigma) / (2.0 * steps);
263  dnu = (1.0 + 2.0 * lambda - sqrt(1.0 + 4.0 * lambda)) / (2.0 * lambda);
264  *postscale = pow(dnu / lambda, steps);
265  *boundaryscale = 1.0 / (1.0 - dnu);
266  *nu = (float)dnu;
267 }
268 
270 {
271  AVFilterContext *ctx = inlink->dst;
272  GBlurContext *s = ctx->priv;
273  AVFilterLink *outlink = ctx->outputs[0];
274  AVFrame *out;
275  int plane;
276 
277  set_params(s->sigma, s->steps, &s->postscale, &s->boundaryscale, &s->nu);
278  set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);
279 
280  if (av_frame_is_writable(in)) {
281  out = in;
282  } else {
283  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
284  if (!out) {
285  av_frame_free(&in);
286  return AVERROR(ENOMEM);
287  }
288  av_frame_copy_props(out, in);
289  }
290 
291  for (plane = 0; plane < s->nb_planes; plane++) {
292  const int height = s->planeheight[plane];
293  const int width = s->planewidth[plane];
294  float *bptr = s->buffer;
295  const uint8_t *src = in->data[plane];
296  const uint16_t *src16 = (const uint16_t *)in->data[plane];
297  uint8_t *dst = out->data[plane];
298  uint16_t *dst16 = (uint16_t *)out->data[plane];
299  int y, x;
300 
301  if (!s->sigma || !(s->planes & (1 << plane))) {
302  if (out != in)
303  av_image_copy_plane(out->data[plane], out->linesize[plane],
304  in->data[plane], in->linesize[plane],
305  width * ((s->depth + 7) / 8), height);
306  continue;
307  }
308 
309  if (s->depth == 8) {
310  for (y = 0; y < height; y++) {
311  for (x = 0; x < width; x++) {
312  bptr[x] = src[x];
313  }
314  bptr += width;
315  src += in->linesize[plane];
316  }
317  } else {
318  for (y = 0; y < height; y++) {
319  for (x = 0; x < width; x++) {
320  bptr[x] = src16[x];
321  }
322  bptr += width;
323  src16 += in->linesize[plane] / 2;
324  }
325  }
326 
327  gaussianiir2d(ctx, plane);
328 
329  bptr = s->buffer;
330  if (s->depth == 8) {
331  for (y = 0; y < height; y++) {
332  for (x = 0; x < width; x++) {
333  dst[x] = bptr[x];
334  }
335  bptr += width;
336  dst += out->linesize[plane];
337  }
338  } else {
339  for (y = 0; y < height; y++) {
340  for (x = 0; x < width; x++) {
341  dst16[x] = bptr[x];
342  }
343  bptr += width;
344  dst16 += out->linesize[plane] / 2;
345  }
346  }
347  }
348 
349  if (out != in)
350  av_frame_free(&in);
351  return ff_filter_frame(outlink, out);
352 }
353 
355 {
356  GBlurContext *s = ctx->priv;
357 
358  av_freep(&s->buffer);
359 }
360 
361 static const AVFilterPad gblur_inputs[] = {
362  {
363  .name = "default",
364  .type = AVMEDIA_TYPE_VIDEO,
365  .config_props = config_input,
366  .filter_frame = filter_frame,
367  },
368  { NULL }
369 };
370 
371 static const AVFilterPad gblur_outputs[] = {
372  {
373  .name = "default",
374  .type = AVMEDIA_TYPE_VIDEO,
375  },
376  { NULL }
377 };
378 
380  .name = "gblur",
381  .description = NULL_IF_CONFIG_SMALL("Apply Gaussian Blur filter."),
382  .priv_size = sizeof(GBlurContext),
383  .priv_class = &gblur_class,
384  .uninit = uninit,
386  .inputs = gblur_inputs,
387  .outputs = gblur_outputs,
389 };
float nu
Definition: vf_gblur.c:52
int plane
Definition: avisynth_c.h:384
float boundaryscale
Definition: vf_gblur.c:48
#define NULL
Definition: coverity.c:32
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:430
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:422
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
static void gaussianiir2d(AVFilterContext *ctx, int plane)
Definition: vf_gblur.c:190
static const AVFilterPad gblur_inputs[]
Definition: vf_gblur.c:361
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:424
float nuV
Definition: vf_gblur.c:53
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:407
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:425
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
Main libavfilter public API header.
#define FLAGS
Definition: vf_gblur.c:58
const char * desc
Definition: nvenc.c:68
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:367
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int planeheight[4]
Definition: vf_gblur.c:46
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:368
int planewidth[4]
Definition: vf_gblur.c:45
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:369
float sigma
Definition: vf_gblur.c:39
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
float boundaryscaleV
Definition: vf_gblur.c:49
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
int height
Definition: vf_avgblur.c:61
#define height
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_gblur.c:354
A filter pad used for either input or output.
Definition: internal.h:54
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
float sigmaV
Definition: vf_gblur.c:40
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
const char * arg
Definition: jacosubdec.c:66
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:408
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
AVFILTER_DEFINE_CLASS(gblur)
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:409
static const AVOption gblur_options[]
Definition: vf_gblur.c:60
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define width
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
float * buffer
Definition: vf_gblur.c:47
float postscaleV
Definition: vf_gblur.c:51
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
Definition: vf_gblur.c:258
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static int config_input(AVFilterLink *inlink)
Definition: vf_gblur.c:234
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
Used for passing data between threads.
Definition: af_adeclick.c:487
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
static int query_formats(AVFilterContext *ctx)
Definition: vf_gblur.c:208
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:370
int planes
Definition: vf_gblur.c:42
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
AVFilter ff_vf_gblur
Definition: vf_gblur.c:379
int nb_planes
Definition: vf_gblur.c:54
float postscale
Definition: vf_gblur.c:50
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:393
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_gblur.c:269
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
static void do_vertical_columns(float *buffer, int width, int height, int column_begin, int column_end, int steps, float nu, float boundaryscale, int column_step)
Definition: vf_gblur.c:111
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:145
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:423
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
#define OFFSET(x)
Definition: vf_gblur.c:57
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
avfilter_execute_func * execute
Definition: internal.h:155
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2029
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
An instance of a filter.
Definition: avfilter.h:338
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static const AVFilterPad gblur_outputs[]
Definition: vf_gblur.c:371
#define av_malloc_array(a, b)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:171
static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:75
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
GLuint buffer
Definition: opengl_enc.c:101
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58