FFmpeg
vsrc_sierpinski.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Sierpinski carpet fractal renderer
24  */
25 
26 #include "avfilter.h"
27 #include "formats.h"
28 #include "video.h"
29 #include "internal.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/parseutils.h"
34 #include "libavutil/lfg.h"
35 #include "libavutil/random_seed.h"
36 #include <float.h>
37 #include <math.h>
38 
39 typedef struct SierpinskiContext {
40  const AVClass *class;
41  int w, h;
42  int type;
44  uint64_t pts;
45 
46  unsigned int seed;
47  int jump;
48 
49  int pos_x, pos_y;
50  int dest_x, dest_y;
51 
53  int (*draw_slice)(AVFilterContext *ctx, void *arg, int job, int nb_jobs);
55 
56 #define OFFSET(x) offsetof(SierpinskiContext, x)
57 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
58 
59 static const AVOption sierpinski_options[] = {
60  {"size", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, 0, 0, FLAGS },
61  {"s", "set frame size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="640x480"}, 0, 0, FLAGS },
62  {"rate", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
63  {"r", "set frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, 0, FLAGS },
64  {"seed", "set the seed", OFFSET(seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, UINT32_MAX, FLAGS },
65  {"jump", "set the jump", OFFSET(jump), AV_OPT_TYPE_INT, {.i64=100}, 1, 10000, FLAGS },
66  {"type","set fractal type",OFFSET(type), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "type" },
67  {"carpet", "sierpinksi carpet", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
68  {"triangle", "sierpinksi triangle", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
69  {NULL},
70 };
71 
72 AVFILTER_DEFINE_CLASS(sierpinski);
73 
75 {
76  static const enum AVPixelFormat pix_fmts[] = {
79  };
80 
81  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
82  if (!fmts_list)
83  return AVERROR(ENOMEM);
84  return ff_set_common_formats(ctx, fmts_list);
85 }
86 
87 static int fill_sierpinski(SierpinskiContext *s, int x, int y)
88 {
89  int pos_x = x + s->pos_x;
90  int pos_y = y + s->pos_y;
91 
92  while (pos_x != 0 && pos_y != 0) {
93  if (FFABS(pos_x % 3) == 1 && FFABS(pos_y % 3) == 1)
94  return 1;
95 
96  pos_x /= 3;
97  pos_y /= 3;
98  }
99 
100  return 0;
101 }
102 
103 static int draw_triangle_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
104 {
105  SierpinskiContext *s = ctx->priv;
106  AVFrame *frame = arg;
107  const int width = frame->width;
108  const int height = frame->height;
109  const int start = (height * job ) / nb_jobs;
110  const int end = (height * (job+1)) / nb_jobs;
111  uint8_t *dst = frame->data[0] + start * frame->linesize[0];
112 
113  for (int y = start; y < end; y++) {
114  for (int x = 0; x < width; x++) {
115  if ((s->pos_x + x) & (s->pos_y + y)) {
116  AV_WL32(&dst[x*4], 0x00000000);
117  } else {
118  AV_WL32(&dst[x*4], 0xFFFFFFFF);
119  }
120  }
121 
122  dst += frame->linesize[0];
123  }
124 
125  return 0;
126 }
127 
128 static int draw_carpet_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
129 {
130  SierpinskiContext *s = ctx->priv;
131  AVFrame *frame = arg;
132  const int width = frame->width;
133  const int height = frame->height;
134  const int start = (height * job ) / nb_jobs;
135  const int end = (height * (job+1)) / nb_jobs;
136  uint8_t *dst = frame->data[0] + start * frame->linesize[0];
137 
138  for (int y = start; y < end; y++) {
139  for (int x = 0; x < width; x++) {
140  if (fill_sierpinski(s, x, y)) {
141  AV_WL32(&dst[x*4], 0x00000000);
142  } else {
143  AV_WL32(&dst[x*4], 0xFFFFFFFF);
144  }
145  }
146 
147  dst += frame->linesize[0];
148  }
149 
150  return 0;
151 }
152 
154 {
155  AVFilterContext *ctx = inlink->src;
156  SierpinskiContext *s = ctx->priv;
157 
158  if (av_image_check_size(s->w, s->h, 0, ctx) < 0)
159  return AVERROR(EINVAL);
160 
161  inlink->w = s->w;
162  inlink->h = s->h;
163  inlink->time_base = av_inv_q(s->frame_rate);
164  inlink->sample_aspect_ratio = (AVRational) {1, 1};
165  if (s->seed == -1)
166  s->seed = av_get_random_seed();
167  av_lfg_init(&s->lfg, s->seed);
168 
170 
171  return 0;
172 }
173 
175 {
176  SierpinskiContext *s = ctx->priv;
177  AVFilterLink *outlink = ctx->outputs[0];
178 
179  if (s->pos_x == s->dest_x && s->pos_y == s->dest_y) {
180  unsigned int rnd = av_lfg_get(&s->lfg);
181  int mod = 2 * s->jump + 1;
182 
183  s->dest_x += (int)((rnd & 0xffff) % mod) - s->jump;
184  s->dest_y += (int)((rnd >> 16) % mod) - s->jump;
185  } else {
186  if (s->pos_x < s->dest_x)
187  s->pos_x++;
188  else if (s->pos_x > s->dest_x)
189  s->pos_x--;
190 
191  if (s->pos_y < s->dest_y)
192  s->pos_y++;
193  else if (s->pos_y > s->dest_y)
194  s->pos_y--;
195  }
196 
197  ctx->internal->execute(ctx, s->draw_slice, frame, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
198 }
199 
201 {
202  SierpinskiContext *s = link->src->priv;
203  AVFrame *frame = ff_get_video_buffer(link, s->w, s->h);
204 
205  if (!frame)
206  return AVERROR(ENOMEM);
207 
208  frame->sample_aspect_ratio = (AVRational) {1, 1};
209  frame->pts = s->pts++;
210 
211  draw_sierpinski(link->src, frame);
212 
213  return ff_filter_frame(link, frame);
214 }
215 
216 static const AVFilterPad sierpinski_outputs[] = {
217  {
218  .name = "default",
219  .type = AVMEDIA_TYPE_VIDEO,
220  .request_frame = sierpinski_request_frame,
221  .config_props = config_output,
222  },
223  { NULL }
224 };
225 
227  .name = "sierpinski",
228  .description = NULL_IF_CONFIG_SMALL("Render a Sierpinski fractal."),
229  .priv_size = sizeof(SierpinskiContext),
230  .priv_class = &sierpinski_class,
232  .inputs = NULL,
233  .outputs = sierpinski_outputs,
235 };
Context structure for the Lagged Fibonacci PRNG.
Definition: lfg.h:33
#define NULL
Definition: coverity.c:32
AVRational frame_rate
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
misc image utilities
Main libavfilter public API header.
static int draw_carpet_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
AVFilter ff_vsrc_sierpinski
GLint GLenum type
Definition: opengl_enc.c:104
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static int config_output(AVFilterLink *inlink)
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
uint8_t
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int(* draw_slice)(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static int sierpinski_request_frame(AVFilterLink *link)
static int draw_triangle_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
#define height
unsigned int seed
A filter pad used for either input or output.
Definition: internal.h:54
int width
Definition: frame.h:353
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
#define OFFSET(x)
const char * arg
Definition: jacosubdec.c:66
static const AVFilterPad sierpinski_outputs[]
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:365
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define FFMIN(a, b)
Definition: common.h:96
static const AVOption sierpinski_options[]
#define width
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVFILTER_DEFINE_CLASS(sierpinski)
static int query_formats(AVFilterContext *ctx)
static void draw_sierpinski(AVFilterContext *ctx, AVFrame *frame)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:474
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
static unsigned int av_lfg_get(AVLFG *c)
Get the next random unsigned 32-bit number using an ALFG.
Definition: lfg.h:53
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
offset must point to AVRational
Definition: opt.h:236
const char * name
Filter name.
Definition: avfilter.h:148
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
Definition: lfg.c:32
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define FLAGS
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int
#define rnd()
Definition: checkasm.h:106
avfilter_execute_func * execute
Definition: internal.h:155
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
static int fill_sierpinski(SierpinskiContext *s, int x, int y)
int height
Definition: frame.h:353
void INT64 start
Definition: avisynth_c.h:766
uint32_t av_get_random_seed(void)
Get a seed to use in conjunction with random functions.
Definition: random_seed.c:120
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
#define AV_WL32(p, v)
Definition: intreadwrite.h:426