Go to the documentation of this file.
19 #define OFFSET(x) offsetof(StackHWContext, x)
20 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
22 #define SET_OUTPUT_REGION(region, rx, ry, rw, rh) do { \
26 region->height = rh; \
91 int xpos = 0, ypos = 0;
119 char *arg2, *p2, *saveptr2 =
NULL;
120 char *arg3, *p3, *saveptr3 =
NULL;
121 int xpos, ypos,
size;
141 for (
int j = 0; j < 3; j++) {
142 if (!(arg2 =
av_strtok(p2,
"_", &saveptr2))) {
161 while ((arg3 =
av_strtok(p3,
"+", &saveptr3))) {
163 if (sscanf(arg3,
"w%d", &
size) == 1) {
171 }
else if (sscanf(arg3,
"h%d", &
size) == 1) {
179 }
else if (sscanf(arg3,
"%d", &
size) == 1) {
210 "Video inputs have different frame rates, output will be VFR\n");
241 if (sctx->
layout && is_grid) {
242 av_log(avctx,
AV_LOG_ERROR,
"Both layout and grid were specified. Only one is allowed.\n");
246 if (!sctx->
layout && !is_grid) {
310 #define STACK_COMMON_OPTS \
311 { "inputs", "Set number of inputs", OFFSET(base.nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 2, UINT16_MAX, .flags = FLAGS }, \
312 { "shortest", "Force termination when the shortest input terminates", OFFSET(base.shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
314 #define DEFINE_HSTACK_OPTIONS(api) \
315 static const AVOption hstack_##api##_options[] = { \
317 { "height", "Set output height (0 to use the height of input 0)", OFFSET(base.tile_height), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, FLAGS }, \
321 #define DEFINE_VSTACK_OPTIONS(api) \
322 static const AVOption vstack_##api##_options[] = { \
324 { "width", "Set output width (0 to use the width of input 0)", OFFSET(base.tile_width), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, FLAGS }, \
328 #define DEFINE_XSTACK_OPTIONS(api) \
329 static const AVOption xstack_##api##_options[] = { \
331 { "layout", "Set custom layout", OFFSET(base.layout), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, .flags = FLAGS }, \
332 { "grid", "set fixed size grid layout", OFFSET(base.nb_grid_columns), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, .flags = FLAGS }, \
333 { "grid_tile_size", "set tile size in grid layout", OFFSET(base.tile_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, .flags = FLAGS }, \
334 { "fill", "Set the color for unused pixels", OFFSET(base.fillcolor_str), AV_OPT_TYPE_STRING, {.str = "none"}, .flags = FLAGS }, \
338 #define DEFINE_STACK_FILTER(category, api, capi, filter_flags) \
339 static const AVClass category##_##api##_class = { \
340 .class_name = #category "_" #api, \
341 .item_name = av_default_item_name, \
342 .option = category##_##api##_options, \
343 .version = LIBAVUTIL_VERSION_INT, \
345 const AVFilter ff_vf_##category##_##api = { \
346 .name = #category "_" #api, \
347 .description = NULL_IF_CONFIG_SMALL(#capi " " #category), \
348 .priv_size = sizeof(StackHWContext), \
349 .priv_class = &category##_##api##_class, \
350 .init = api##_stack_init, \
351 .uninit = api##_stack_uninit, \
352 .activate = stack_activate, \
353 FILTER_PIXFMTS_ARRAY(api ## _stack_pix_fmts), \
354 FILTER_OUTPUTS(stack_outputs), \
355 .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, \
356 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | filter_flags, \
AVRational time_base
Time base for the incoming frames.
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
char * av_asprintf(const char *fmt,...)
AVRational time_base
Time base for the output events.
#define AV_LOG_VERBOSE
Detailed information.
const char * name
Filter name.
@ EXT_INFINITY
Extend the frame to infinity.
A link between two filters.
Link properties exposed to filter code, but not external callers.
static av_always_inline int process_frame(WriterContext *w, InputFile *ifile, AVFrame *frame, const AVPacket *pkt, int *packet_new)
@ EXT_STOP
Completely stop all streams with this one.
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
void * priv
private data for use by the filter
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events.
A filter pad used for either input or output.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_cold void stack_uninit(AVFilterContext *avctx)
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define av_assert0(cond)
assert() equivalent, that is always enabled.
FFFrameSyncIn * in
Pointer to array of inputs.
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
AVFilterLink ** inputs
array of pointers to input links
static FilterLink * ff_filter_link(AVFilterLink *link)
unsigned nb_inputs
number of input pads
static int config_output(AVFilterLink *outlink)
void * opaque
Opaque pointer, not used by the API.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int init_framesync(AVFilterContext *avctx)
static const AVFilterPad stack_outputs[]
AVFilterContext * src
source filter
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
#define SET_OUTPUT_REGION(region, rx, ry, rw, rh)
StackItemRegion * regions
#define i(width, name, range_min, range_max)
int w
agreed upon image width
static int config_comm_output(AVFilterLink *outlink)
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
void * av_calloc(size_t nmemb, size_t size)
enum AVMediaType type
AVFilterPad type.
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
int h
agreed upon image height
static int stack_activate(AVFilterContext *avctx)
static int stack_init(AVFilterContext *avctx)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable.
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
const AVFilter * filter
the AVFilter of which this is an instance