FFmpeg
vsrc_mandelbrot.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Michael Niedermayer
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * The vsrc_color filter from Stefano Sabatini was used as template to create
21  * this
22  */
23 
24 /**
25  * @file
26  * Mandelbrot fractal renderer
27  */
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "video.h"
32 #include "internal.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include <float.h>
37 #include <math.h>
38 
39 #define SQR(a) ((a)*(a))
40 
41 enum Outer{
46 };
47 
48 enum Inner{
53 };
54 
55 typedef struct Point {
56  double p[2];
57  uint32_t val;
58 } Point;
59 
60 typedef struct MBContext {
61  const AVClass *class;
62  int w, h;
64  uint64_t pts;
65  int maxiter;
66  double start_x;
67  double start_y;
68  double start_scale;
69  double end_scale;
70  double end_pts;
71  double bailout;
72  int outer;
73  int inner;
78  double (*zyklus)[2];
79  uint32_t dither;
80 
81  double morphxf;
82  double morphyf;
83  double morphamp;
84 } MBContext;
85 
86 #define OFFSET(x) offsetof(MBContext, x)
87 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
88 
89 static const AVOption mandelbrot_options[] = {
90  {"size", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
91  {"s", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, CHAR_MIN, CHAR_MAX, FLAGS },
92  {"rate", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
93  {"r", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, CHAR_MIN, CHAR_MAX, FLAGS },
94  {"maxiter", "set max iterations number", OFFSET(maxiter), AV_OPT_TYPE_INT, {.i64=7189}, 1, INT_MAX, FLAGS },
95  {"start_x", "set the initial x position", OFFSET(start_x), AV_OPT_TYPE_DOUBLE, {.dbl=-0.743643887037158704752191506114774}, -100, 100, FLAGS },
96  {"start_y", "set the initial y position", OFFSET(start_y), AV_OPT_TYPE_DOUBLE, {.dbl=-0.131825904205311970493132056385139}, -100, 100, FLAGS },
97  {"start_scale", "set the initial scale value", OFFSET(start_scale), AV_OPT_TYPE_DOUBLE, {.dbl=3.0}, 0, FLT_MAX, FLAGS },
98  {"end_scale", "set the terminal scale value", OFFSET(end_scale), AV_OPT_TYPE_DOUBLE, {.dbl=0.3}, 0, FLT_MAX, FLAGS },
99  {"end_pts", "set the terminal pts value", OFFSET(end_pts), AV_OPT_TYPE_DOUBLE, {.dbl=400}, 0, INT64_MAX, FLAGS },
100  {"bailout", "set the bailout value", OFFSET(bailout), AV_OPT_TYPE_DOUBLE, {.dbl=10}, 0, FLT_MAX, FLAGS },
101  {"morphxf", "set morph x frequency", OFFSET(morphxf), AV_OPT_TYPE_DOUBLE, {.dbl=0.01}, -FLT_MAX, FLT_MAX, FLAGS },
102  {"morphyf", "set morph y frequency", OFFSET(morphyf), AV_OPT_TYPE_DOUBLE, {.dbl=0.0123}, -FLT_MAX, FLT_MAX, FLAGS },
103  {"morphamp", "set morph amplitude", OFFSET(morphamp), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -FLT_MAX, FLT_MAX, FLAGS },
104 
105  {"outer", "set outer coloring mode", OFFSET(outer), AV_OPT_TYPE_INT, {.i64=NORMALIZED_ITERATION_COUNT}, 0, INT_MAX, FLAGS, "outer" },
106  {"iteration_count", "set iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
107  {"normalized_iteration_count", "set normalized iteration count mode", 0, AV_OPT_TYPE_CONST, {.i64=NORMALIZED_ITERATION_COUNT}, INT_MIN, INT_MAX, FLAGS, "outer" },
108  {"white", "set white mode", 0, AV_OPT_TYPE_CONST, {.i64=WHITE}, INT_MIN, INT_MAX, FLAGS, "outer" },
109  {"outz", "set outz mode", 0, AV_OPT_TYPE_CONST, {.i64=OUTZ}, INT_MIN, INT_MAX, FLAGS, "outer" },
110 
111  {"inner", "set inner coloring mode", OFFSET(inner), AV_OPT_TYPE_INT, {.i64=MINCOL}, 0, INT_MAX, FLAGS, "inner" },
112  {"black", "set black mode", 0, AV_OPT_TYPE_CONST, {.i64=BLACK}, INT_MIN, INT_MAX, FLAGS, "inner"},
113  {"period", "set period mode", 0, AV_OPT_TYPE_CONST, {.i64=PERIOD}, INT_MIN, INT_MAX, FLAGS, "inner"},
114  {"convergence", "show time until convergence", 0, AV_OPT_TYPE_CONST, {.i64=CONVTIME}, INT_MIN, INT_MAX, FLAGS, "inner"},
115  {"mincol", "color based on point closest to the origin of the iterations", 0, AV_OPT_TYPE_CONST, {.i64=MINCOL}, INT_MIN, INT_MAX, FLAGS, "inner"},
116 
117  {NULL},
118 };
119 
120 AVFILTER_DEFINE_CLASS(mandelbrot);
121 
123 {
124  MBContext *s = ctx->priv;
125 
126  s->bailout *= s->bailout;
127 
128  s->start_scale /=s->h;
129  s->end_scale /=s->h;
130 
131  s->cache_allocated = s->w * s->h * 3;
132  s->cache_used = 0;
134  s-> next_cache= av_malloc_array(s->cache_allocated, sizeof(*s-> next_cache));
135  s-> zyklus = av_malloc_array(s->maxiter + 16, sizeof(*s->zyklus));
136 
137  return 0;
138 }
139 
141 {
142  MBContext *s = ctx->priv;
143 
144  av_freep(&s->point_cache);
145  av_freep(&s-> next_cache);
146  av_freep(&s->zyklus);
147 }
148 
150 {
151  static const enum AVPixelFormat pix_fmts[] = {
154  };
155 
156  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
157  if (!fmts_list)
158  return AVERROR(ENOMEM);
159  return ff_set_common_formats(ctx, fmts_list);
160 }
161 
163 {
164  AVFilterContext *ctx = inlink->src;
165  MBContext *s = ctx->priv;
166 
167  if (av_image_check_size(s->w, s->h, 0, ctx) < 0)
168  return AVERROR(EINVAL);
169 
170  inlink->w = s->w;
171  inlink->h = s->h;
172  inlink->time_base = av_inv_q(s->frame_rate);
173 
174  return 0;
175 }
176 
177 static void fill_from_cache(AVFilterContext *ctx, uint32_t *color, int *in_cidx, int *out_cidx, double py, double scale){
178  MBContext *s = ctx->priv;
179  if(s->morphamp)
180  return;
181  for(; *in_cidx < s->cache_used; (*in_cidx)++){
182  Point *p= &s->point_cache[*in_cidx];
183  int x;
184  if(p->p[1] > py)
185  break;
186  x= lrint((p->p[0] - s->start_x) / scale + s->w/2);
187  if(x<0 || x >= s->w)
188  continue;
189  if(color) color[x] = p->val;
190  if(out_cidx && *out_cidx < s->cache_allocated)
191  s->next_cache[(*out_cidx)++]= *p;
192  }
193 }
194 
195 static int interpol(MBContext *s, uint32_t *color, int x, int y, int linesize)
196 {
197  uint32_t a,b,c,d, i;
198  uint32_t ipol=0xFF000000;
199  int dist;
200 
201  if(!x || !y || x+1==s->w || y+1==s->h)
202  return 0;
203 
204  dist= FFMAX(FFABS(x-(s->w>>1))*s->h, FFABS(y-(s->h>>1))*s->w);
205 
206  if(dist<(s->w*s->h>>3))
207  return 0;
208 
209  a=color[(x+1) + (y+0)*linesize];
210  b=color[(x-1) + (y+1)*linesize];
211  c=color[(x+0) + (y+1)*linesize];
212  d=color[(x+1) + (y+1)*linesize];
213 
214  if(a&&c){
215  b= color[(x-1) + (y+0)*linesize];
216  d= color[(x+0) + (y-1)*linesize];
217  }else if(b&&d){
218  a= color[(x+1) + (y-1)*linesize];
219  c= color[(x-1) + (y-1)*linesize];
220  }else if(c){
221  d= color[(x+0) + (y-1)*linesize];
222  a= color[(x-1) + (y+0)*linesize];
223  b= color[(x+1) + (y-1)*linesize];
224  }else if(d){
225  c= color[(x-1) + (y-1)*linesize];
226  a= color[(x-1) + (y+0)*linesize];
227  b= color[(x+1) + (y-1)*linesize];
228  }else
229  return 0;
230 
231  for(i=0; i<3; i++){
232  int s= 8*i;
233  uint8_t ac= a>>s;
234  uint8_t bc= b>>s;
235  uint8_t cc= c>>s;
236  uint8_t dc= d>>s;
237  int ipolab= (ac + bc);
238  int ipolcd= (cc + dc);
239  if(FFABS(ipolab - ipolcd) > 5)
240  return 0;
241  if(FFABS(ac-bc)+FFABS(cc-dc) > 20)
242  return 0;
243  ipol |= ((ipolab + ipolcd + 2)/4)<<s;
244  }
245  color[x + y*linesize]= ipol;
246  return 1;
247 }
248 
249 static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize, int64_t pts)
250 {
251  MBContext *s = ctx->priv;
252  int x,y,i, in_cidx=0, next_cidx=0, tmp_cidx;
253  double scale= s->start_scale*pow(s->end_scale/s->start_scale, pts/s->end_pts);
254  int use_zyklus=0;
255  fill_from_cache(ctx, NULL, &in_cidx, NULL, s->start_y+scale*(-s->h/2-0.5), scale);
256  tmp_cidx= in_cidx;
257  memset(color, 0, sizeof(*color)*s->w);
258  for(y=0; y<s->h; y++){
259  int y1= y+1;
260  const double ci=s->start_y+scale*(y-s->h/2);
261  fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci, scale);
262  if(y1<s->h){
263  memset(color+linesize*y1, 0, sizeof(*color)*s->w);
264  fill_from_cache(ctx, color+linesize*y1, &tmp_cidx, NULL, ci + 3*scale/2, scale);
265  }
266 
267  for(x=0; x<s->w; x++){
268  float av_uninit(epsilon);
269  const double cr=s->start_x+scale*(x-s->w/2);
270  double zr=cr;
271  double zi=ci;
272  uint32_t c=0;
273  double dv= s->dither / (double)(1LL<<32);
274  s->dither= s->dither*1664525+1013904223;
275 
276  if(color[x + y*linesize] & 0xFF000000)
277  continue;
278  if(!s->morphamp){
279  if(interpol(s, color, x, y, linesize)){
280  if(next_cidx < s->cache_allocated){
281  s->next_cache[next_cidx ].p[0]= cr;
282  s->next_cache[next_cidx ].p[1]= ci;
283  s->next_cache[next_cidx++].val = color[x + y*linesize];
284  }
285  continue;
286  }
287  }else{
288  zr += cos(pts * s->morphxf) * s->morphamp;
289  zi += sin(pts * s->morphyf) * s->morphamp;
290  }
291 
292  use_zyklus= (x==0 || s->inner!=BLACK ||color[x-1 + y*linesize] == 0xFF000000);
293  if(use_zyklus)
294  epsilon= scale*(abs(x-s->w/2) + abs(y-s->h/2))/s->w;
295 
296 #define Z_Z2_C(outr,outi,inr,ini)\
297  outr= inr*inr - ini*ini + cr;\
298  outi= 2*inr*ini + ci;
299 
300 #define Z_Z2_C_ZYKLUS(outr,outi,inr,ini, Z)\
301  Z_Z2_C(outr,outi,inr,ini)\
302  if(use_zyklus){\
303  if(Z && fabs(s->zyklus[i>>1][0]-outr)+fabs(s->zyklus[i>>1][1]-outi) <= epsilon)\
304  break;\
305  }\
306  s->zyklus[i][0]= outr;\
307  s->zyklus[i][1]= outi;\
308 
309 
310 
311  for(i=0; i<s->maxiter-8; i++){
312  double t;
313  Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
314  i++;
315  Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
316  i++;
317  Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
318  i++;
319  Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
320  i++;
321  Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
322  i++;
323  Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
324  i++;
325  Z_Z2_C_ZYKLUS(t, zi, zr, zi, 0)
326  i++;
327  Z_Z2_C_ZYKLUS(zr, zi, t, zi, 1)
328  if(zr*zr + zi*zi > s->bailout){
329  i-= FFMIN(7, i);
330  for(; i<s->maxiter; i++){
331  zr= s->zyklus[i][0];
332  zi= s->zyklus[i][1];
333  if(zr*zr + zi*zi > s->bailout){
334  switch(s->outer){
335  case ITERATION_COUNT:
336  zr = i;
337  c = lrintf((sinf(zr)+1)*127) + lrintf((sinf(zr/1.234)+1)*127)*256*256 + lrintf((sinf(zr/100)+1)*127)*256;
338  break;
340  zr = i + log2(log(s->bailout) / log(zr*zr + zi*zi));
341  c = lrintf((sinf(zr)+1)*127) + lrintf((sinf(zr/1.234)+1)*127)*256*256 + lrintf((sinf(zr/100)+1)*127)*256;
342  break;
343  case WHITE:
344  c = 0xFFFFFF;
345  break;
346  case OUTZ:
347  zr /= s->bailout;
348  zi /= s->bailout;
349  c = (((int)(zr*128+128))&0xFF)*256 + (((int)(zi*128+128))&0xFF);
350  }
351  break;
352  }
353  }
354  break;
355  }
356  }
357  if(!c){
358  if(s->inner==PERIOD){
359  int j;
360  for(j=i-1; j; j--)
361  if(SQR(s->zyklus[j][0]-zr) + SQR(s->zyklus[j][1]-zi) < epsilon*epsilon*10)
362  break;
363  if(j){
364  c= i-j;
365  c= ((c<<5)&0xE0) + ((c<<10)&0xE000) + ((c<<15)&0xE00000);
366  }
367  }else if(s->inner==CONVTIME){
368  c= floor(i*255.0/s->maxiter+dv)*0x010101;
369  } else if(s->inner==MINCOL){
370  int j;
371  double closest=9999;
372  int closest_index=0;
373  for(j=i-1; j>=0; j--)
374  if(SQR(s->zyklus[j][0]) + SQR(s->zyklus[j][1]) < closest){
375  closest= SQR(s->zyklus[j][0]) + SQR(s->zyklus[j][1]);
376  closest_index= j;
377  }
378  closest = sqrt(closest);
379  c= lrintf((s->zyklus[closest_index][0]/closest+1)*127+dv) + lrintf((s->zyklus[closest_index][1]/closest+1)*127+dv)*256;
380  }
381  }
382  c |= 0xFF000000;
383  color[x + y*linesize]= c;
384  if(next_cidx < s->cache_allocated){
385  s->next_cache[next_cidx ].p[0]= cr;
386  s->next_cache[next_cidx ].p[1]= ci;
387  s->next_cache[next_cidx++].val = c;
388  }
389  }
390  fill_from_cache(ctx, NULL, &in_cidx, &next_cidx, ci + scale/2, scale);
391  }
392  FFSWAP(void*, s->next_cache, s->point_cache);
393  s->cache_used = next_cidx;
394  if(s->cache_used == s->cache_allocated)
395  av_log(ctx, AV_LOG_INFO, "Mandelbrot cache is too small!\n");
396 }
397 
399 {
400  MBContext *s = link->src->priv;
401  AVFrame *picref = ff_get_video_buffer(link, s->w, s->h);
402  if (!picref)
403  return AVERROR(ENOMEM);
404 
405  picref->sample_aspect_ratio = (AVRational) {1, 1};
406  picref->pts = s->pts++;
407 
408  draw_mandelbrot(link->src, (uint32_t*)picref->data[0], picref->linesize[0]/4, picref->pts);
409  return ff_filter_frame(link, picref);
410 }
411 
412 static const AVFilterPad mandelbrot_outputs[] = {
413  {
414  .name = "default",
415  .type = AVMEDIA_TYPE_VIDEO,
416  .request_frame = request_frame,
417  .config_props = config_props,
418  },
419  { NULL }
420 };
421 
423  .name = "mandelbrot",
424  .description = NULL_IF_CONFIG_SMALL("Render a Mandelbrot fractal."),
425  .priv_size = sizeof(MBContext),
426  .priv_class = &mandelbrot_class,
427  .init = init,
428  .uninit = uninit,
430  .inputs = NULL,
431  .outputs = mandelbrot_outputs,
432 };
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
static int query_formats(AVFilterContext *ctx)
AVFilter ff_vsrc_mandelbrot
static av_cold int init(AVFilterContext *ctx)
double p[2]
uint64_t pts
misc image utilities
uint8_t y
Definition: signature.h:56
Main libavfilter public API header.
double start_y
static int interpol(MBContext *s, uint32_t *color, int x, int y, int linesize)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static const AVOption mandelbrot_options[]
static av_cold void uninit(AVFilterContext *ctx)
#define FLAGS
#define log2(x)
Definition: libm.h:404
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
double(* zyklus)[2]
const char * name
Pad name.
Definition: internal.h:60
double morphamp
#define Z_Z2_C(outr, outi, inr, ini)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
uint8_t
static void draw_mandelbrot(AVFilterContext *ctx, uint32_t *color, int linesize, int64_t pts)
#define av_cold
Definition: attributes.h:82
AVOptions.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Inner
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
double end_pts
#define lrintf(x)
Definition: libm_mips.h:70
static const AVFilterPad mandelbrot_outputs[]
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define OFFSET(x)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
double morphyf
void * priv
private data for use by the filter
Definition: avfilter.h:353
uint8_t x
Definition: signature.h:55
#define Z_Z2_C_ZYKLUS(outr, outi, inr, ini, Z)
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:365
#define FFMAX(a, b)
Definition: common.h:94
Outer
Point * point_cache
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define b
Definition: input.c:41
#define FFMIN(a, b)
Definition: common.h:96
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
double morphxf
static void fill_from_cache(AVFilterContext *ctx, uint32_t *color, int *in_cidx, int *out_cidx, double py, double scale)
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int request_frame(AVFilterLink *link)
#define sinf(x)
Definition: libm.h:419
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define abs(x)
Definition: cuda_runtime.h:35
double start_scale
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
double end_scale
double start_x
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
#define SQR(a)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
offset must point to AVRational
Definition: opt.h:236
const char * name
Filter name.
Definition: avfilter.h:148
double bailout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
static int64_t pts
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVRational frame_rate
int
Point * next_cache
uint32_t val
AVFILTER_DEFINE_CLASS(mandelbrot)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
An instance of a filter.
Definition: avfilter.h:338
#define av_uninit(x)
Definition: attributes.h:148
static int ipol(uint8_t *src, int x, int y)
Definition: rotozoom.c:65
#define av_freep(p)
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:113
uint32_t dither
static int config_props(AVFilterLink *inlink)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int cache_allocated