Go to the documentation of this file.
28 #ifndef AVFILTER_GBLUR_INIT_H
29 #define AVFILTER_GBLUR_INIT_H
39 for (
int i = 0;
i < length;
i++) {
46 float nu,
float bscale,
float *localbuf)
49 for (
int y = 0; y <
height; y++) {
55 for (x = 1; x <
width; x++)
56 ptr[x] += nu * ptr[x - 1];
57 ptr[x =
width - 1] *= bscale;
61 ptr[x - 1] += nu * ptr[x];
67 int column_begin,
int column_end,
int steps,
68 float nu,
float boundaryscale,
int column_step)
72 for (
int x = column_begin; x < column_end;) {
75 for (
int k = 0; k < column_step; k++) {
76 ptr[k] *= boundaryscale;
80 for (
int k = 0; k < column_step; k++) {
81 ptr[
i + k] += nu * ptr[
i -
width + k];
86 for (
int k = 0; k < column_step; k++)
87 ptr[
i + k] *= boundaryscale;
91 for (
int k = 0; k < column_step; k++)
92 ptr[
i -
width + k] += nu * ptr[
i + k];
100 int slice_start,
int slice_end,
int steps,
101 float nu,
float boundaryscale)
103 int aligned_end = slice_start + (((
slice_end - slice_start) >> 3) << 3);
106 steps, nu, boundaryscale, 8);
109 steps, nu, boundaryscale, 1);
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static void verti_slice_c(float *buffer, int width, int height, int slice_start, int slice_end, int steps, float nu, float boundaryscale)
static void do_vertical_columns(float *buffer, int width, int height, int column_begin, int column_end, int steps, float nu, float boundaryscale, int column_step)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
static void postscale_c(float *buffer, int length, float postscale, float min, float max)
void ff_gblur_init_x86(GBlurContext *s)
static void horiz_slice_c(float *buffer, int width, int height, int steps, float nu, float bscale, float *localbuf)
static const FLOAT postscale[64]
#define i(width, name, range_min, range_max)
static av_unused void ff_gblur_init(GBlurContext *s)
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer