FFmpeg
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "float.h"
22 
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/timestamp.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct GraphMonitorContext {
36  const AVClass *class;
37 
38  int w, h;
39  float opacity;
40  int mode;
41  int flags;
43 
44  int64_t pts;
45  int64_t next_pts;
51  uint8_t bg[4];
53 
54 enum {
55  MODE_QUEUE = 1 << 0,
56  MODE_FCIN = 1 << 1,
57  MODE_FCOUT = 1 << 2,
58  MODE_PTS = 1 << 3,
59  MODE_TIME = 1 << 4,
60  MODE_TB = 1 << 5,
61  MODE_FMT = 1 << 6,
62  MODE_SIZE = 1 << 7,
63  MODE_RATE = 1 << 8,
64  MODE_EOF = 1 << 9,
65 };
66 
67 #define OFFSET(x) offsetof(GraphMonitorContext, x)
68 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
69 
70 static const AVOption graphmonitor_options[] = {
71  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
72  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
73  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
74  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
75  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
76  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
77  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
78  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
79  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
80  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
81  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" },
82  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" },
83  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" },
84  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" },
85  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" },
86  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" },
87  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" },
88  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" },
89  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" },
90  { "eof", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_EOF}, 0, 0, VF, "flags" },
91  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
92  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
93  { NULL }
94 };
95 
97 {
98  AVFilterLink *outlink = ctx->outputs[0];
99  static const enum AVPixelFormat pix_fmts[] = {
102  };
103  int ret;
104 
105  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
106  if ((ret = ff_formats_ref(fmts_list, &outlink->incfg.formats)) < 0)
107  return ret;
108 
109  return 0;
110 }
111 
113 {
114  int bg = AV_RN32(s->bg);
115 
116  for (int i = 0; i < out->height; i++)
117  for (int j = 0; j < out->width; j++)
118  AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
119 }
120 
121 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
122 {
123  const uint8_t *font;
124  int font_height;
125  int i;
126 
127  font = avpriv_cga_font, font_height = 8;
128 
129  if (y + 8 >= pic->height ||
130  x + strlen(txt) * 8 >= pic->width)
131  return;
132 
133  for (i = 0; txt[i]; i++) {
134  int char_y, mask;
135 
136  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
137  for (char_y = 0; char_y < font_height; char_y++) {
138  for (mask = 0x80; mask; mask >>= 1) {
139  if (font[txt[i] * font_height + char_y] & mask) {
140  p[0] = color[0];
141  p[1] = color[1];
142  p[2] = color[2];
143  }
144  p += 4;
145  }
146  p += pic->linesize[0] - 8 * 4;
147  }
148  }
149 }
150 
152 {
153  for (int j = 0; j < filter->nb_inputs; j++) {
154  AVFilterLink *l = filter->inputs[j];
155  size_t frames = ff_inlink_queued_frames(l);
156 
157  if (frames)
158  return 1;
159  }
160 
161  for (int j = 0; j < filter->nb_outputs; j++) {
162  AVFilterLink *l = filter->outputs[j];
163  size_t frames = ff_inlink_queued_frames(l);
164 
165  if (frames)
166  return 1;
167  }
168 
169  return 0;
170 }
171 
173  int xpos, int ypos,
174  AVFilterLink *l,
175  size_t frames)
176 {
177  GraphMonitorContext *s = ctx->priv;
178  char buffer[1024] = { 0 };
179 
180  if (s->flags & MODE_FMT) {
181  if (l->type == AVMEDIA_TYPE_VIDEO) {
182  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
184  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
185  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
187  }
188  drawtext(out, xpos, ypos, buffer, s->white);
189  xpos += strlen(buffer) * 8;
190  }
191  if (s->flags & MODE_SIZE) {
192  if (l->type == AVMEDIA_TYPE_VIDEO) {
193  snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
194  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
195  snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels);
196  }
197  drawtext(out, xpos, ypos, buffer, s->white);
198  xpos += strlen(buffer) * 8;
199  }
200  if (s->flags & MODE_RATE) {
201  if (l->type == AVMEDIA_TYPE_VIDEO) {
202  snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
203  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
204  snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
205  }
206  drawtext(out, xpos, ypos, buffer, s->white);
207  xpos += strlen(buffer) * 8;
208  }
209  if (s->flags & MODE_TB) {
210  snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
211  drawtext(out, xpos, ypos, buffer, s->white);
212  xpos += strlen(buffer) * 8;
213  }
214  if (s->flags & MODE_QUEUE) {
215  snprintf(buffer, sizeof(buffer)-1, " | queue: ");
216  drawtext(out, xpos, ypos, buffer, s->white);
217  xpos += strlen(buffer) * 8;
218  snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
219  drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
220  xpos += strlen(buffer) * 8;
221  }
222  if (s->flags & MODE_FCIN) {
223  snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
224  drawtext(out, xpos, ypos, buffer, s->white);
225  xpos += strlen(buffer) * 8;
226  }
227  if (s->flags & MODE_FCOUT) {
228  snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
229  drawtext(out, xpos, ypos, buffer, s->white);
230  xpos += strlen(buffer) * 8;
231  }
232  if (s->flags & MODE_PTS) {
233  snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us));
234  drawtext(out, xpos, ypos, buffer, s->white);
235  xpos += strlen(buffer) * 8;
236  }
237  if (s->flags & MODE_TIME) {
238  snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q));
239  drawtext(out, xpos, ypos, buffer, s->white);
240  xpos += strlen(buffer) * 8;
241  }
242  if (s->flags & MODE_EOF && ff_outlink_get_status(l)) {
243  snprintf(buffer, sizeof(buffer)-1, " | eof");
244  drawtext(out, xpos, ypos, buffer, s->blue);
245  xpos += strlen(buffer) * 8;
246  }
247 }
248 
249 static int create_frame(AVFilterContext *ctx, int64_t pts)
250 {
251  GraphMonitorContext *s = ctx->priv;
252  AVFilterLink *outlink = ctx->outputs[0];
253  AVFrame *out;
254  int xpos, ypos = 0;
255 
256  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
257  if (!out)
258  return AVERROR(ENOMEM);
259 
260  clear_image(s, out, outlink);
261 
262  for (int i = 0; i < ctx->graph->nb_filters; i++) {
264  char buffer[1024] = { 0 };
265 
266  if (s->mode && !filter_have_queued(filter))
267  continue;
268 
269  xpos = 0;
270  drawtext(out, xpos, ypos, filter->name, s->white);
271  xpos += strlen(filter->name) * 8 + 10;
272  drawtext(out, xpos, ypos, filter->filter->name, s->white);
273  ypos += 10;
274  for (int j = 0; j < filter->nb_inputs; j++) {
275  AVFilterLink *l = filter->inputs[j];
276  size_t frames = ff_inlink_queued_frames(l);
277 
278  if (s->mode && !frames)
279  continue;
280 
281  xpos = 10;
282  snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
283  drawtext(out, xpos, ypos, buffer, s->white);
284  xpos += strlen(buffer) * 8;
285  drawtext(out, xpos, ypos, l->src->name, s->white);
286  xpos += strlen(l->src->name) * 8 + 10;
287  draw_items(ctx, out, xpos, ypos, l, frames);
288  ypos += 10;
289  }
290 
291  ypos += 2;
292  for (int j = 0; j < filter->nb_outputs; j++) {
293  AVFilterLink *l = filter->outputs[j];
294  size_t frames = ff_inlink_queued_frames(l);
295 
296  if (s->mode && !frames)
297  continue;
298 
299  xpos = 10;
300  snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
301  drawtext(out, xpos, ypos, buffer, s->white);
302  xpos += strlen(buffer) * 8;
303  drawtext(out, xpos, ypos, l->dst->name, s->white);
304  xpos += strlen(l->dst->name) * 8 + 10;
305  draw_items(ctx, out, xpos, ypos, l, frames);
306  ypos += 10;
307  }
308  ypos += 5;
309  }
310 
311  out->pts = pts;
312  s->pts = pts + 1;
313  return ff_filter_frame(outlink, out);
314 }
315 
317 {
318  GraphMonitorContext *s = ctx->priv;
319  AVFilterLink *inlink = ctx->inputs[0];
320  AVFilterLink *outlink = ctx->outputs[0];
321  int64_t pts = AV_NOPTS_VALUE;
322 
323  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
324 
325  if (ff_inlink_queued_frames(inlink)) {
326  AVFrame *frame = NULL;
327  int ret;
328 
329  ret = ff_inlink_consume_frame(inlink, &frame);
330  if (ret < 0)
331  return ret;
332  if (ret > 0) {
333  pts = frame->pts;
334  av_frame_free(&frame);
335  }
336  }
337 
338  if (pts != AV_NOPTS_VALUE) {
339  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
340  if (s->pts == AV_NOPTS_VALUE)
341  s->pts = pts;
342  s->next_pts = pts;
343  }
344 
345  if (s->pts < s->next_pts && ff_outlink_frame_wanted(outlink))
346  return create_frame(ctx, s->pts);
347 
348  FF_FILTER_FORWARD_STATUS(inlink, outlink);
349  FF_FILTER_FORWARD_WANTED(outlink, inlink);
350 
351  return FFERROR_NOT_READY;
352 }
353 
354 static int config_output(AVFilterLink *outlink)
355 {
356  GraphMonitorContext *s = outlink->src->priv;
357 
358  s->bg[3] = 255 * s->opacity;
359  s->white[0] = s->white[1] = s->white[2] = 255;
360  s->yellow[0] = s->yellow[1] = 255;
361  s->red[0] = 255;
362  s->green[1] = 255;
363  s->blue[2] = 255;
364  s->pts = AV_NOPTS_VALUE;
366  outlink->w = s->w;
367  outlink->h = s->h;
368  outlink->sample_aspect_ratio = (AVRational){1,1};
369  outlink->frame_rate = s->frame_rate;
370  outlink->time_base = av_inv_q(s->frame_rate);
371 
372  return 0;
373 }
374 
375 #if CONFIG_GRAPHMONITOR_FILTER
376 
377 AVFILTER_DEFINE_CLASS(graphmonitor);
378 
379 static const AVFilterPad graphmonitor_inputs[] = {
380  {
381  .name = "default",
382  .type = AVMEDIA_TYPE_VIDEO,
383  },
384  { NULL }
385 };
386 
387 static const AVFilterPad graphmonitor_outputs[] = {
388  {
389  .name = "default",
390  .type = AVMEDIA_TYPE_VIDEO,
391  .config_props = config_output,
392  },
393  { NULL }
394 };
395 
397  .name = "graphmonitor",
398  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
399  .priv_size = sizeof(GraphMonitorContext),
400  .priv_class = &graphmonitor_class,
402  .activate = activate,
403  .inputs = graphmonitor_inputs,
404  .outputs = graphmonitor_outputs,
405 };
406 
407 #endif // CONFIG_GRAPHMONITOR_FILTER
408 
409 #if CONFIG_AGRAPHMONITOR_FILTER
410 
411 #define agraphmonitor_options graphmonitor_options
412 AVFILTER_DEFINE_CLASS(agraphmonitor);
413 
414 static const AVFilterPad agraphmonitor_inputs[] = {
415  {
416  .name = "default",
417  .type = AVMEDIA_TYPE_AUDIO,
418  },
419  { NULL }
420 };
421 
422 static const AVFilterPad agraphmonitor_outputs[] = {
423  {
424  .name = "default",
425  .type = AVMEDIA_TYPE_VIDEO,
426  .config_props = config_output,
427  },
428  { NULL }
429 };
430 
432  .name = "agraphmonitor",
433  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
434  .priv_size = sizeof(GraphMonitorContext),
435  .priv_class = &agraphmonitor_class,
437  .activate = activate,
438  .inputs = agraphmonitor_inputs,
439  .outputs = agraphmonitor_outputs,
440 };
441 #endif // CONFIG_AGRAPHMONITOR_FILTER
AVFilterContext ** filters
Definition: avfilter.h:855
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1489
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
Main libavfilter public API header.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
int num
Numerator.
Definition: rational.h:59
return FFERROR_NOT_READY
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFilter ff_avf_agraphmonitor
if it could not because there are no more frames
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
struct AVFilterGraph * graph
filtergraph this filter belongs to
Definition: avfilter.h:356
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:347
char * name
name of this filter instance
Definition: avfilter.h:344
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
uint8_t
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
int width
Definition: frame.h:366
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static const uint16_t mask[17]
Definition: lzw.c:38
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
unsigned nb_outputs
number of output pads
Definition: avfilter.h:352
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
void * priv
private data for use by the filter
Definition: avfilter.h:354
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:443
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
unsigned nb_inputs
number of input pads
Definition: avfilter.h:348
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
static int config_output(AVFilterLink *outlink)
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVFilter ff_vf_graphmonitor
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static const AVOption graphmonitor_options[]
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1638
static int filter_have_queued(AVFilterContext *filter)
Rational number (pair of numerator and denominator).
Definition: rational.h:58
offset must point to AVRational
Definition: opt.h:238
const char * name
Filter name.
Definition: avfilter.h:149
unsigned nb_filters
Definition: avfilter.h:856
#define snprintf
Definition: snprintf.h:34
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1459
offset must point to two consecutive integers
Definition: opt.h:235
#define AV_RN32(p)
Definition: intreadwrite.h:364
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:351
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
#define SIZE_SPECIFIER
Definition: internal.h:229
#define VF
static int activate(AVFilterContext *ctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
int den
Denominator.
Definition: rational.h:60
FF_FILTER_FORWARD_WANTED(outlink, inlink)
static int query_formats(AVFilterContext *ctx)
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:288
#define OFFSET(x)
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
A list of supported formats for one end of a filter link.
Definition: formats.h:65
An instance of a filter.
Definition: avfilter.h:339
int height
Definition: frame.h:366
FILE * out
Definition: movenc.c:54
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int create_frame(AVFilterContext *ctx, int64_t pts)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:342
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:101
CGA/EGA/VGA ROM font data.
simple arithmetic expression evaluator