FFmpeg
vf_smartblur.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2012 Jeremy Tran
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Apply a smartblur filter to the input video
25  * Ported from MPlayer libmpcodecs/vf_smartblur.c by Michael Niedermayer.
26  */
27 
28 #include "libavutil/opt.h"
29 #include "libavutil/pixdesc.h"
30 #include "libswscale/swscale.h"
31 
32 #include "avfilter.h"
33 #include "formats.h"
34 #include "internal.h"
35 
36 #define RADIUS_MIN 0.1
37 #define RADIUS_MAX 5.0
38 
39 #define STRENGTH_MIN -1.0
40 #define STRENGTH_MAX 1.0
41 
42 #define THRESHOLD_MIN -30
43 #define THRESHOLD_MAX 30
44 
45 typedef struct FilterParam {
46  float radius;
47  float strength;
48  int threshold;
49  float quality;
51 } FilterParam;
52 
53 typedef struct SmartblurContext {
54  const AVClass *class;
57  int hsub;
58  int vsub;
59  unsigned int sws_flags;
61 
62 #define OFFSET(x) offsetof(SmartblurContext, x)
63 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
64 
65 static const AVOption smartblur_options[] = {
66  { "luma_radius", "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
67  { "lr" , "set luma radius", OFFSET(luma.radius), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, RADIUS_MIN, RADIUS_MAX, .flags=FLAGS },
68  { "luma_strength", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
69  { "ls", "set luma strength", OFFSET(luma.strength), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, STRENGTH_MIN, STRENGTH_MAX, .flags=FLAGS },
70  { "luma_threshold", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
71  { "lt", "set luma threshold", OFFSET(luma.threshold), AV_OPT_TYPE_INT, {.i64=0}, THRESHOLD_MIN, THRESHOLD_MAX, .flags=FLAGS },
72 
73  { "chroma_radius", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
74  { "cr", "set chroma radius", OFFSET(chroma.radius), AV_OPT_TYPE_FLOAT, {.dbl=RADIUS_MIN-1}, RADIUS_MIN-1, RADIUS_MAX, .flags=FLAGS },
75  { "chroma_strength", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
76  { "cs", "set chroma strength", OFFSET(chroma.strength), AV_OPT_TYPE_FLOAT, {.dbl=STRENGTH_MIN-1}, STRENGTH_MIN-1, STRENGTH_MAX, .flags=FLAGS },
77  { "chroma_threshold", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
78  { "ct", "set chroma threshold", OFFSET(chroma.threshold), AV_OPT_TYPE_INT, {.i64=THRESHOLD_MIN-1}, THRESHOLD_MIN-1, THRESHOLD_MAX, .flags=FLAGS },
79 
80  { NULL }
81 };
82 
83 AVFILTER_DEFINE_CLASS(smartblur);
84 
86 {
87  SmartblurContext *s = ctx->priv;
88 
89  /* make chroma default to luma values, if not explicitly set */
90  if (s->chroma.radius < RADIUS_MIN)
91  s->chroma.radius = s->luma.radius;
92  if (s->chroma.strength < STRENGTH_MIN)
93  s->chroma.strength = s->luma.strength;
96 
97  s->luma.quality = s->chroma.quality = 3.0;
99 
100  av_log(ctx, AV_LOG_VERBOSE,
101  "luma_radius:%f luma_strength:%f luma_threshold:%d "
102  "chroma_radius:%f chroma_strength:%f chroma_threshold:%d\n",
103  s->luma.radius, s->luma.strength, s->luma.threshold,
105 
106  return 0;
107 }
108 
110 {
111  SmartblurContext *s = ctx->priv;
112 
115 }
116 
118 {
119  static const enum AVPixelFormat pix_fmts[] = {
125  };
126 
127  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
128  if (!fmts_list)
129  return AVERROR(ENOMEM);
130  return ff_set_common_formats(ctx, fmts_list);
131 }
132 
133 static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags)
134 {
135  SwsVector *vec;
136  SwsFilter sws_filter;
137 
138  vec = sws_getGaussianVec(f->radius, f->quality);
139 
140  if (!vec)
141  return AVERROR(EINVAL);
142 
143  sws_scaleVec(vec, f->strength);
144  vec->coeff[vec->length / 2] += 1.0 - f->strength;
145  sws_filter.lumH = sws_filter.lumV = vec;
146  sws_filter.chrH = sws_filter.chrV = NULL;
148  width, height, AV_PIX_FMT_GRAY8,
149  width, height, AV_PIX_FMT_GRAY8,
150  flags, &sws_filter, NULL, NULL);
151 
152  sws_freeVec(vec);
153 
154  if (!f->filter_context)
155  return AVERROR(EINVAL);
156 
157  return 0;
158 }
159 
161 {
162  SmartblurContext *s = inlink->dst->priv;
164 
165  s->hsub = desc->log2_chroma_w;
166  s->vsub = desc->log2_chroma_h;
167 
168  alloc_sws_context(&s->luma, inlink->w, inlink->h, s->sws_flags);
170  AV_CEIL_RSHIFT(inlink->w, s->hsub),
171  AV_CEIL_RSHIFT(inlink->h, s->vsub),
172  s->sws_flags);
173 
174  return 0;
175 }
176 
177 static void blur(uint8_t *dst, const int dst_linesize,
178  const uint8_t *src, const int src_linesize,
179  const int w, const int h, const int threshold,
180  struct SwsContext *filter_context)
181 {
182  int x, y;
183  int orig, filtered;
184  int diff;
185  /* Declare arrays of 4 to get aligned data */
186  const uint8_t* const src_array[4] = {src};
187  uint8_t *dst_array[4] = {dst};
188  int src_linesize_array[4] = {src_linesize};
189  int dst_linesize_array[4] = {dst_linesize};
190 
191  sws_scale(filter_context, src_array, src_linesize_array,
192  0, h, dst_array, dst_linesize_array);
193 
194  if (threshold > 0) {
195  for (y = 0; y < h; ++y) {
196  for (x = 0; x < w; ++x) {
197  orig = src[x + y * src_linesize];
198  filtered = dst[x + y * dst_linesize];
199  diff = orig - filtered;
200 
201  if (diff > 0) {
202  if (diff > 2 * threshold)
203  dst[x + y * dst_linesize] = orig;
204  else if (diff > threshold)
205  /* add 'diff' and subtract 'threshold' from 'filtered' */
206  dst[x + y * dst_linesize] = orig - threshold;
207  } else {
208  if (-diff > 2 * threshold)
209  dst[x + y * dst_linesize] = orig;
210  else if (-diff > threshold)
211  /* add 'diff' and 'threshold' to 'filtered' */
212  dst[x + y * dst_linesize] = orig + threshold;
213  }
214  }
215  }
216  } else if (threshold < 0) {
217  for (y = 0; y < h; ++y) {
218  for (x = 0; x < w; ++x) {
219  orig = src[x + y * src_linesize];
220  filtered = dst[x + y * dst_linesize];
221  diff = orig - filtered;
222 
223  if (diff > 0) {
224  if (diff <= -threshold)
225  dst[x + y * dst_linesize] = orig;
226  else if (diff <= -2 * threshold)
227  /* subtract 'diff' and 'threshold' from 'orig' */
228  dst[x + y * dst_linesize] = filtered - threshold;
229  } else {
230  if (diff >= threshold)
231  dst[x + y * dst_linesize] = orig;
232  else if (diff >= 2 * threshold)
233  /* add 'threshold' and subtract 'diff' from 'orig' */
234  dst[x + y * dst_linesize] = filtered + threshold;
235  }
236  }
237  }
238  }
239 }
240 
242 {
243  SmartblurContext *s = inlink->dst->priv;
244  AVFilterLink *outlink = inlink->dst->outputs[0];
245  AVFrame *outpic;
246  int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
247  int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
248 
249  outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
250  if (!outpic) {
251  av_frame_free(&inpic);
252  return AVERROR(ENOMEM);
253  }
254  av_frame_copy_props(outpic, inpic);
255 
256  blur(outpic->data[0], outpic->linesize[0],
257  inpic->data[0], inpic->linesize[0],
258  inlink->w, inlink->h, s->luma.threshold,
259  s->luma.filter_context);
260 
261  if (inpic->data[2]) {
262  blur(outpic->data[1], outpic->linesize[1],
263  inpic->data[1], inpic->linesize[1],
264  cw, ch, s->chroma.threshold,
266  blur(outpic->data[2], outpic->linesize[2],
267  inpic->data[2], inpic->linesize[2],
268  cw, ch, s->chroma.threshold,
270  }
271 
272  av_frame_free(&inpic);
273  return ff_filter_frame(outlink, outpic);
274 }
275 
276 static const AVFilterPad smartblur_inputs[] = {
277  {
278  .name = "default",
279  .type = AVMEDIA_TYPE_VIDEO,
280  .filter_frame = filter_frame,
281  .config_props = config_props,
282  },
283  { NULL }
284 };
285 
286 static const AVFilterPad smartblur_outputs[] = {
287  {
288  .name = "default",
289  .type = AVMEDIA_TYPE_VIDEO,
290  },
291  { NULL }
292 };
293 
295  .name = "smartblur",
296  .description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
297  .priv_size = sizeof(SmartblurContext),
298  .init = init,
299  .uninit = uninit,
301  .inputs = smartblur_inputs,
302  .outputs = smartblur_outputs,
303  .priv_class = &smartblur_class,
305 };
#define OFFSET(x)
Definition: vf_smartblur.c:62
SwsVector * chrV
Definition: swscale.h:119
#define NULL
Definition: coverity.c:32
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
int radius
Definition: boxblur.h:33
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
#define SWS_BICUBIC
Definition: swscale.h:60
AVOption.
Definition: opt.h:246
AVFILTER_DEFINE_CLASS(smartblur)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
SwsVector * lumV
Definition: swscale.h:117
Main libavfilter public API header.
const char * desc
Definition: nvenc.c:68
SwsVector * sws_getGaussianVec(double variance, double quality)
Return a normalized Gaussian curve used to filter stuff quality = 3 is high quality, lower is lower quality.
Definition: utils.c:2028
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
float strength
Definition: vf_sab.c:37
#define FLAGS
Definition: vf_smartblur.c:63
const char * name
Pad name.
Definition: internal.h:60
FilterParam chroma
Definition: vf_smartblur.c:56
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
uint8_t
#define av_cold
Definition: attributes.h:82
int length
number of coefficients in the vector
Definition: swscale.h:111
AVOptions.
#define f(width, name)
Definition: cbs_vp9.c:255
av_frame_free & inpic
Definition: vf_mcdeint.c:278
#define THRESHOLD_MIN
Definition: vf_smartblur.c:42
#define height
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
external API header
unsigned int sws_flags
Definition: vf_smartblur.c:59
#define av_log(a,...)
struct SwsContext * filter_context
Definition: vf_smartblur.c:50
A filter pad used for either input or output.
Definition: internal.h:54
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1542
static int config_props(AVFilterLink *inlink)
Definition: vf_smartblur.c:160
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
void sws_scaleVec(SwsVector *a, double scalar)
Scale all the coefficients of a by the scalar value.
Definition: utils.c:2098
SwsVector * lumH
Definition: swscale.h:116
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2371
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static int alloc_sws_context(FilterParam *f, int width, int height, unsigned int flags)
Definition: vf_smartblur.c:133
#define STRENGTH_MAX
Definition: vf_smartblur.c:40
float quality
Definition: vf_sab.c:38
SwsVector * chrH
Definition: swscale.h:118
#define width
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2311
#define s(width, name)
Definition: cbs_vp9.c:257
static int query_formats(AVFilterContext *ctx)
Definition: vf_smartblur.c:117
static const AVOption smartblur_options[]
Definition: vf_smartblur.c:65
#define RADIUS_MIN
Definition: vf_smartblur.c:36
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
FilterParam luma
Definition: vf_smartblur.c:55
double * coeff
pointer to the list of coefficients
Definition: swscale.h:110
#define THRESHOLD_MAX
Definition: vf_smartblur.c:43
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static const AVFilterPad smartblur_inputs[]
Definition: vf_smartblur.c:276
void sws_freeVec(SwsVector *a)
Definition: utils.c:2290
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:753
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_smartblur.c:109
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
AVFilter ff_vf_smartblur
Definition: vf_smartblur.c:294
#define RADIUS_MAX
Definition: vf_smartblur.c:37
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
static void blur(uint8_t *dst, const int dst_linesize, const uint8_t *src, const int src_linesize, const int w, const int h, const int threshold, struct SwsContext *filter_context)
Definition: vf_smartblur.c:177
static av_always_inline int diff(const uint32_t a, const uint32_t b)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define STRENGTH_MIN
Definition: vf_smartblur.c:39
An instance of a filter.
Definition: avfilter.h:338
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
static const AVFilterPad smartblur_outputs[]
Definition: vf_smartblur.c:286
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
Definition: vf_smartblur.c:241
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
static av_cold int init(AVFilterContext *ctx)
Definition: vf_smartblur.c:85