FFmpeg
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "float.h"
22 
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/timestamp.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct GraphMonitorContext {
36  const AVClass *class;
37 
38  int w, h;
39  float opacity;
40  int mode;
41  int flags;
43 
44  int64_t pts;
49  uint8_t bg[4];
51 
52 enum {
53  MODE_QUEUE = 1 << 0,
54  MODE_FCIN = 1 << 1,
55  MODE_FCOUT = 1 << 2,
56  MODE_PTS = 1 << 3,
57  MODE_TIME = 1 << 4,
58  MODE_TB = 1 << 5,
59  MODE_FMT = 1 << 6,
60  MODE_SIZE = 1 << 7,
61  MODE_RATE = 1 << 8,
62 };
63 
64 #define OFFSET(x) offsetof(GraphMonitorContext, x)
65 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
66 
67 static const AVOption graphmonitor_options[] = {
68  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
69  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
70  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
71  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
72  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
73  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
74  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
75  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
76  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
77  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
78  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" },
79  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" },
80  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" },
81  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" },
82  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" },
83  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" },
84  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" },
85  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" },
86  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" },
87  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
88  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
89  { NULL }
90 };
91 
93 {
94  AVFilterLink *outlink = ctx->outputs[0];
95  static const enum AVPixelFormat pix_fmts[] = {
98  };
99  int ret;
100 
102  if ((ret = ff_formats_ref(fmts_list, &outlink->in_formats)) < 0)
103  return ret;
104 
105  return 0;
106 }
107 
109 {
110  int bg = AV_RN32(s->bg);
111 
112  for (int i = 0; i < out->height; i++)
113  for (int j = 0; j < out->width; j++)
114  AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
115 }
116 
117 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
118 {
119  const uint8_t *font;
120  int font_height;
121  int i;
122 
123  font = avpriv_cga_font, font_height = 8;
124 
125  if (y + 8 >= pic->height ||
126  x + strlen(txt) * 8 >= pic->width)
127  return;
128 
129  for (i = 0; txt[i]; i++) {
130  int char_y, mask;
131 
132  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
133  for (char_y = 0; char_y < font_height; char_y++) {
134  for (mask = 0x80; mask; mask >>= 1) {
135  if (font[txt[i] * font_height + char_y] & mask) {
136  p[0] = color[0];
137  p[1] = color[1];
138  p[2] = color[2];
139  }
140  p += 4;
141  }
142  p += pic->linesize[0] - 8 * 4;
143  }
144  }
145 }
146 
148 {
149  for (int j = 0; j < filter->nb_inputs; j++) {
150  AVFilterLink *l = filter->inputs[j];
151  size_t frames = ff_inlink_queued_frames(l);
152 
153  if (frames)
154  return 1;
155  }
156 
157  for (int j = 0; j < filter->nb_outputs; j++) {
158  AVFilterLink *l = filter->outputs[j];
159  size_t frames = ff_inlink_queued_frames(l);
160 
161  if (frames)
162  return 1;
163  }
164 
165  return 0;
166 }
167 
169  int xpos, int ypos,
170  AVFilterLink *l,
171  size_t frames)
172 {
173  GraphMonitorContext *s = ctx->priv;
174  char buffer[1024] = { 0 };
175 
176  if (s->flags & MODE_FMT) {
177  if (l->type == AVMEDIA_TYPE_VIDEO) {
178  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
180  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
181  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
183  }
184  drawtext(out, xpos, ypos, buffer, s->white);
185  xpos += strlen(buffer) * 8;
186  }
187  if (s->flags & MODE_SIZE) {
188  if (l->type == AVMEDIA_TYPE_VIDEO) {
189  snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
190  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
191  snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels);
192  }
193  drawtext(out, xpos, ypos, buffer, s->white);
194  xpos += strlen(buffer) * 8;
195  }
196  if (s->flags & MODE_RATE) {
197  if (l->type == AVMEDIA_TYPE_VIDEO) {
198  snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
199  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
200  snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
201  }
202  drawtext(out, xpos, ypos, buffer, s->white);
203  xpos += strlen(buffer) * 8;
204  }
205  if (s->flags & MODE_TB) {
206  snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
207  drawtext(out, xpos, ypos, buffer, s->white);
208  xpos += strlen(buffer) * 8;
209  }
210  if (s->flags & MODE_QUEUE) {
211  snprintf(buffer, sizeof(buffer)-1, " | queue: ");
212  drawtext(out, xpos, ypos, buffer, s->white);
213  xpos += strlen(buffer) * 8;
214  snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
215  drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
216  xpos += strlen(buffer) * 8;
217  }
218  if (s->flags & MODE_FCIN) {
219  snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
220  drawtext(out, xpos, ypos, buffer, s->white);
221  xpos += strlen(buffer) * 8;
222  }
223  if (s->flags & MODE_FCOUT) {
224  snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
225  drawtext(out, xpos, ypos, buffer, s->white);
226  xpos += strlen(buffer) * 8;
227  }
228  if (s->flags & MODE_PTS) {
229  snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us));
230  drawtext(out, xpos, ypos, buffer, s->white);
231  xpos += strlen(buffer) * 8;
232  }
233  if (s->flags & MODE_TIME) {
234  snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q));
235  drawtext(out, xpos, ypos, buffer, s->white);
236  xpos += strlen(buffer) * 8;
237  }
238 }
239 
240 static int create_frame(AVFilterContext *ctx, int64_t pts)
241 {
242  GraphMonitorContext *s = ctx->priv;
243  AVFilterLink *outlink = ctx->outputs[0];
244  AVFrame *out;
245  int xpos, ypos = 0;
246 
247  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
248  if (!out)
249  return AVERROR(ENOMEM);
250 
251  clear_image(s, out, outlink);
252 
253  for (int i = 0; i < ctx->graph->nb_filters; i++) {
254  AVFilterContext *filter = ctx->graph->filters[i];
255  char buffer[1024] = { 0 };
256 
257  if (s->mode && !filter_have_queued(filter))
258  continue;
259 
260  xpos = 0;
261  drawtext(out, xpos, ypos, filter->name, s->white);
262  xpos += strlen(filter->name) * 8 + 10;
263  drawtext(out, xpos, ypos, filter->filter->name, s->white);
264  ypos += 10;
265  for (int j = 0; j < filter->nb_inputs; j++) {
266  AVFilterLink *l = filter->inputs[j];
267  size_t frames = ff_inlink_queued_frames(l);
268 
269  if (s->mode && !frames)
270  continue;
271 
272  xpos = 10;
273  snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
274  drawtext(out, xpos, ypos, buffer, s->white);
275  xpos += strlen(buffer) * 8;
276  drawtext(out, xpos, ypos, l->src->name, s->white);
277  xpos += strlen(l->src->name) * 8 + 10;
278  draw_items(ctx, out, xpos, ypos, l, frames);
279  ypos += 10;
280  }
281 
282  ypos += 2;
283  for (int j = 0; j < filter->nb_outputs; j++) {
284  AVFilterLink *l = filter->outputs[j];
285  size_t frames = ff_inlink_queued_frames(l);
286 
287  if (s->mode && !frames)
288  continue;
289 
290  xpos = 10;
291  snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
292  drawtext(out, xpos, ypos, buffer, s->white);
293  xpos += strlen(buffer) * 8;
294  drawtext(out, xpos, ypos, l->dst->name, s->white);
295  xpos += strlen(l->dst->name) * 8 + 10;
296  draw_items(ctx, out, xpos, ypos, l, frames);
297  ypos += 10;
298  }
299  ypos += 5;
300  }
301 
302  out->pts = pts;
303  s->pts = pts;
304  return ff_filter_frame(outlink, out);
305 }
306 
308 {
309  GraphMonitorContext *s = ctx->priv;
310  AVFilterLink *inlink = ctx->inputs[0];
311  AVFilterLink *outlink = ctx->outputs[0];
312  int64_t pts = AV_NOPTS_VALUE;
313 
315 
317  AVFrame *frame = NULL;
318  int ret;
319 
321  if (ret < 0)
322  return ret;
323  if (ret > 0) {
324  pts = frame->pts;
326  }
327  }
328 
329  if (pts != AV_NOPTS_VALUE) {
330  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
331  if (s->pts < pts && ff_outlink_frame_wanted(outlink))
332  return create_frame(ctx, pts);
333  }
334 
337 
338  return FFERROR_NOT_READY;
339 }
340 
341 static int config_output(AVFilterLink *outlink)
342 {
343  GraphMonitorContext *s = outlink->src->priv;
344 
345  s->bg[3] = 255 * s->opacity;
346  s->white[0] = s->white[1] = s->white[2] = 255;
347  s->yellow[0] = s->yellow[1] = 255;
348  s->red[0] = 255;
349  s->green[1] = 255;
350  outlink->w = s->w;
351  outlink->h = s->h;
352  outlink->sample_aspect_ratio = (AVRational){1,1};
353  outlink->frame_rate = s->frame_rate;
354  outlink->time_base = av_inv_q(s->frame_rate);
355 
356  return 0;
357 }
358 
359 #if CONFIG_GRAPHMONITOR_FILTER
360 
361 AVFILTER_DEFINE_CLASS(graphmonitor);
362 
363 static const AVFilterPad graphmonitor_inputs[] = {
364  {
365  .name = "default",
366  .type = AVMEDIA_TYPE_VIDEO,
367  },
368  { NULL }
369 };
370 
371 static const AVFilterPad graphmonitor_outputs[] = {
372  {
373  .name = "default",
374  .type = AVMEDIA_TYPE_VIDEO,
375  .config_props = config_output,
376  },
377  { NULL }
378 };
379 
381  .name = "graphmonitor",
382  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
383  .priv_size = sizeof(GraphMonitorContext),
384  .priv_class = &graphmonitor_class,
386  .activate = activate,
387  .inputs = graphmonitor_inputs,
388  .outputs = graphmonitor_outputs,
389 };
390 
391 #endif // CONFIG_GRAPHMONITOR_FILTER
392 
393 #if CONFIG_AGRAPHMONITOR_FILTER
394 
395 #define agraphmonitor_options graphmonitor_options
396 AVFILTER_DEFINE_CLASS(agraphmonitor);
397 
398 static const AVFilterPad agraphmonitor_inputs[] = {
399  {
400  .name = "default",
401  .type = AVMEDIA_TYPE_AUDIO,
402  },
403  { NULL }
404 };
405 
406 static const AVFilterPad agraphmonitor_outputs[] = {
407  {
408  .name = "default",
409  .type = AVMEDIA_TYPE_VIDEO,
410  .config_props = config_output,
411  },
412  { NULL }
413 };
414 
416  .name = "agraphmonitor",
417  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
418  .priv_size = sizeof(GraphMonitorContext),
419  .priv_class = &agraphmonitor_class,
421  .activate = activate,
422  .inputs = agraphmonitor_inputs,
423  .outputs = agraphmonitor_outputs,
424 };
425 #endif // CONFIG_AGRAPHMONITOR_FILTER
GraphMonitorContext::mode
int mode
Definition: f_graphmonitor.c:40
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
MODE_FMT
@ MODE_FMT
Definition: f_graphmonitor.c:59
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:588
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:236
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
GraphMonitorContext::opacity
float opacity
Definition: f_graphmonitor.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
pixdesc.h
AVFrame::width
int width
Definition: frame.h:353
w
uint8_t w
Definition: llviddspenc.c:38
OFFSET
#define OFFSET(x)
Definition: f_graphmonitor.c:64
AVOption
AVOption.
Definition: opt.h:246
float.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
MODE_TIME
@ MODE_TIME
Definition: f_graphmonitor.c:57
video.h
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
create_frame
static int create_frame(AVFilterContext *ctx, int64_t pts)
Definition: f_graphmonitor.c:240
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
ff_avf_agraphmonitor
AVFilter ff_avf_agraphmonitor
formats.h
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1481
GraphMonitorContext::bg
uint8_t bg[4]
Definition: f_graphmonitor.c:49
MODE_SIZE
@ MODE_SIZE
Definition: f_graphmonitor.c:60
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:353
MODE_PTS
@ MODE_PTS
Definition: f_graphmonitor.c:56
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
MODE_FCOUT
@ MODE_FCOUT
Definition: f_graphmonitor.c:55
pts
static int64_t pts
Definition: transcode_aac.c:647
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: f_graphmonitor.c:92
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
GraphMonitorContext::green
uint8_t green[4]
Definition: f_graphmonitor.c:48
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
GraphMonitorContext::white
uint8_t white[4]
Definition: f_graphmonitor.c:45
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
GraphMonitorContext::pts
int64_t pts
Definition: f_graphmonitor.c:44
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
MODE_TB
@ MODE_TB
Definition: f_graphmonitor.c:58
GraphMonitorContext::red
uint8_t red[4]
Definition: f_graphmonitor.c:47
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:233
AV_RN32
#define AV_RN32(p)
Definition: intreadwrite.h:364
AVFilterContext::name
char * name
name of this filter instance
Definition: avfilter.h:343
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
VF
#define VF
Definition: f_graphmonitor.c:65
ff_inlink_queued_frames
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1451
activate
static int activate(AVFilterContext *ctx)
Definition: f_graphmonitor.c:307
eval.h
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_vf_graphmonitor
AVFilter ff_vf_graphmonitor
GraphMonitorContext
Definition: f_graphmonitor.c:35
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
xga_font_data.h
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
Definition: f_graphmonitor.c:117
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:226
MODE_RATE
@ MODE_RATE
Definition: f_graphmonitor.c:61
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
uint8_t
uint8_t
Definition: audio_convert.c:194
graphmonitor_options
static const AVOption graphmonitor_options[]
Definition: f_graphmonitor.c:67
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
GraphMonitorContext::frame_rate
AVRational frame_rate
Definition: f_graphmonitor.c:42
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
filter_have_queued
static int filter_have_queued(AVFilterContext *filter)
Definition: f_graphmonitor.c:147
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
GraphMonitorContext::w
int w
Definition: f_graphmonitor.c:38
clear_image
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
Definition: f_graphmonitor.c:108
config_output
static int config_output(AVFilterLink *outlink)
Definition: f_graphmonitor.c:341
SIZE_SPECIFIER
#define SIZE_SPECIFIER
Definition: internal.h:264
AVFrame::height
int height
Definition: frame.h:353
GraphMonitorContext::h
int h
Definition: f_graphmonitor.c:38
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
MODE_FCIN
@ MODE_FCIN
Definition: f_graphmonitor.c:54
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
GraphMonitorContext::flags
int flags
Definition: f_graphmonitor.c:41
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:222
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
MODE_QUEUE
@ MODE_QUEUE
Definition: f_graphmonitor.c:53
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
GraphMonitorContext::yellow
uint8_t yellow[4]
Definition: f_graphmonitor.c:46
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
snprintf
#define snprintf
Definition: snprintf.h:34
draw_items
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
Definition: f_graphmonitor.c:168
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438