FFmpeg
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "float.h"
22 
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/timestamp.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct GraphMonitorContext {
36  const AVClass *class;
37 
38  int w, h;
39  float opacity;
40  int mode;
41  int flags;
43 
44  int64_t pts;
45  int64_t next_pts;
51  uint8_t bg[4];
53 
54 enum {
55  MODE_QUEUE = 1 << 0,
56  MODE_FCIN = 1 << 1,
57  MODE_FCOUT = 1 << 2,
58  MODE_PTS = 1 << 3,
59  MODE_TIME = 1 << 4,
60  MODE_TB = 1 << 5,
61  MODE_FMT = 1 << 6,
62  MODE_SIZE = 1 << 7,
63  MODE_RATE = 1 << 8,
64  MODE_EOF = 1 << 9,
65 };
66 
67 #define OFFSET(x) offsetof(GraphMonitorContext, x)
68 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
69 
70 static const AVOption graphmonitor_options[] = {
71  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
72  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
73  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
74  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
75  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
76  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
77  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
78  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
79  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
80  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
81  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" },
82  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" },
83  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" },
84  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" },
85  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" },
86  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" },
87  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" },
88  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" },
89  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" },
90  { "eof", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_EOF}, 0, 0, VF, "flags" },
91  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
92  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
93  { NULL }
94 };
95 
97 {
98  AVFilterLink *outlink = ctx->outputs[0];
99  static const enum AVPixelFormat pix_fmts[] = {
102  };
103  int ret;
104 
106  if ((ret = ff_formats_ref(fmts_list, &outlink->incfg.formats)) < 0)
107  return ret;
108 
109  return 0;
110 }
111 
113 {
114  int bg = AV_RN32(s->bg);
115 
116  for (int i = 0; i < out->height; i++)
117  for (int j = 0; j < out->width; j++)
118  AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
119 }
120 
121 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
122 {
123  const uint8_t *font;
124  int font_height;
125  int i;
126 
127  font = avpriv_cga_font, font_height = 8;
128 
129  if (y + 8 >= pic->height ||
130  x + strlen(txt) * 8 >= pic->width)
131  return;
132 
133  for (i = 0; txt[i]; i++) {
134  int char_y, mask;
135 
136  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
137  for (char_y = 0; char_y < font_height; char_y++) {
138  for (mask = 0x80; mask; mask >>= 1) {
139  if (font[txt[i] * font_height + char_y] & mask) {
140  p[0] = color[0];
141  p[1] = color[1];
142  p[2] = color[2];
143  }
144  p += 4;
145  }
146  p += pic->linesize[0] - 8 * 4;
147  }
148  }
149 }
150 
152 {
153  for (int j = 0; j < filter->nb_inputs; j++) {
154  AVFilterLink *l = filter->inputs[j];
155  size_t frames = ff_inlink_queued_frames(l);
156 
157  if (frames)
158  return 1;
159  }
160 
161  for (int j = 0; j < filter->nb_outputs; j++) {
162  AVFilterLink *l = filter->outputs[j];
163  size_t frames = ff_inlink_queued_frames(l);
164 
165  if (frames)
166  return 1;
167  }
168 
169  return 0;
170 }
171 
173  int xpos, int ypos,
174  AVFilterLink *l,
175  size_t frames)
176 {
177  GraphMonitorContext *s = ctx->priv;
178  char buffer[1024] = { 0 };
179 
180  if (s->flags & MODE_FMT) {
181  if (l->type == AVMEDIA_TYPE_VIDEO) {
182  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
184  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
185  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
187  }
188  drawtext(out, xpos, ypos, buffer, s->white);
189  xpos += strlen(buffer) * 8;
190  }
191  if (s->flags & MODE_SIZE) {
192  if (l->type == AVMEDIA_TYPE_VIDEO) {
193  snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
194  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
195  snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels);
196  }
197  drawtext(out, xpos, ypos, buffer, s->white);
198  xpos += strlen(buffer) * 8;
199  }
200  if (s->flags & MODE_RATE) {
201  if (l->type == AVMEDIA_TYPE_VIDEO) {
202  snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
203  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
204  snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
205  }
206  drawtext(out, xpos, ypos, buffer, s->white);
207  xpos += strlen(buffer) * 8;
208  }
209  if (s->flags & MODE_TB) {
210  snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
211  drawtext(out, xpos, ypos, buffer, s->white);
212  xpos += strlen(buffer) * 8;
213  }
214  if (s->flags & MODE_QUEUE) {
215  snprintf(buffer, sizeof(buffer)-1, " | queue: ");
216  drawtext(out, xpos, ypos, buffer, s->white);
217  xpos += strlen(buffer) * 8;
218  snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
219  drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
220  xpos += strlen(buffer) * 8;
221  }
222  if (s->flags & MODE_FCIN) {
223  snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
224  drawtext(out, xpos, ypos, buffer, s->white);
225  xpos += strlen(buffer) * 8;
226  }
227  if (s->flags & MODE_FCOUT) {
228  snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
229  drawtext(out, xpos, ypos, buffer, s->white);
230  xpos += strlen(buffer) * 8;
231  }
232  if (s->flags & MODE_PTS) {
233  snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us));
234  drawtext(out, xpos, ypos, buffer, s->white);
235  xpos += strlen(buffer) * 8;
236  }
237  if (s->flags & MODE_TIME) {
238  snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q));
239  drawtext(out, xpos, ypos, buffer, s->white);
240  xpos += strlen(buffer) * 8;
241  }
242  if (s->flags & MODE_EOF && ff_outlink_get_status(l)) {
243  snprintf(buffer, sizeof(buffer)-1, " | eof");
244  drawtext(out, xpos, ypos, buffer, s->blue);
245  xpos += strlen(buffer) * 8;
246  }
247 }
248 
249 static int create_frame(AVFilterContext *ctx, int64_t pts)
250 {
251  GraphMonitorContext *s = ctx->priv;
252  AVFilterLink *outlink = ctx->outputs[0];
253  AVFrame *out;
254  int xpos, ypos = 0;
255 
256  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
257  if (!out)
258  return AVERROR(ENOMEM);
259 
260  clear_image(s, out, outlink);
261 
262  for (int i = 0; i < ctx->graph->nb_filters; i++) {
263  AVFilterContext *filter = ctx->graph->filters[i];
264  char buffer[1024] = { 0 };
265 
266  if (s->mode && !filter_have_queued(filter))
267  continue;
268 
269  xpos = 0;
270  drawtext(out, xpos, ypos, filter->name, s->white);
271  xpos += strlen(filter->name) * 8 + 10;
272  drawtext(out, xpos, ypos, filter->filter->name, s->white);
273  ypos += 10;
274  for (int j = 0; j < filter->nb_inputs; j++) {
275  AVFilterLink *l = filter->inputs[j];
276  size_t frames = ff_inlink_queued_frames(l);
277 
278  if (s->mode && !frames)
279  continue;
280 
281  xpos = 10;
282  snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
283  drawtext(out, xpos, ypos, buffer, s->white);
284  xpos += strlen(buffer) * 8;
285  drawtext(out, xpos, ypos, l->src->name, s->white);
286  xpos += strlen(l->src->name) * 8 + 10;
287  draw_items(ctx, out, xpos, ypos, l, frames);
288  ypos += 10;
289  }
290 
291  ypos += 2;
292  for (int j = 0; j < filter->nb_outputs; j++) {
293  AVFilterLink *l = filter->outputs[j];
294  size_t frames = ff_inlink_queued_frames(l);
295 
296  if (s->mode && !frames)
297  continue;
298 
299  xpos = 10;
300  snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
301  drawtext(out, xpos, ypos, buffer, s->white);
302  xpos += strlen(buffer) * 8;
303  drawtext(out, xpos, ypos, l->dst->name, s->white);
304  xpos += strlen(l->dst->name) * 8 + 10;
305  draw_items(ctx, out, xpos, ypos, l, frames);
306  ypos += 10;
307  }
308  ypos += 5;
309  }
310 
311  out->pts = pts;
312  s->pts = pts + 1;
313  return ff_filter_frame(outlink, out);
314 }
315 
317 {
318  GraphMonitorContext *s = ctx->priv;
319  AVFilterLink *inlink = ctx->inputs[0];
320  AVFilterLink *outlink = ctx->outputs[0];
321  int64_t pts = AV_NOPTS_VALUE;
322 
324 
326  AVFrame *frame = NULL;
327  int ret;
328 
330  if (ret < 0)
331  return ret;
332  if (ret > 0) {
333  pts = frame->pts;
335  }
336  }
337 
338  if (pts != AV_NOPTS_VALUE) {
339  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
340  if (s->pts == AV_NOPTS_VALUE)
341  s->pts = pts;
342  s->next_pts = pts;
343  }
344 
345  if (s->pts < s->next_pts && ff_outlink_frame_wanted(outlink))
346  return create_frame(ctx, s->pts);
347 
350 
351  return FFERROR_NOT_READY;
352 }
353 
354 static int config_output(AVFilterLink *outlink)
355 {
356  GraphMonitorContext *s = outlink->src->priv;
357 
358  s->bg[3] = 255 * s->opacity;
359  s->white[0] = s->white[1] = s->white[2] = 255;
360  s->yellow[0] = s->yellow[1] = 255;
361  s->red[0] = 255;
362  s->green[1] = 255;
363  s->blue[2] = 255;
364  s->pts = AV_NOPTS_VALUE;
365  s->next_pts = AV_NOPTS_VALUE;
366  outlink->w = s->w;
367  outlink->h = s->h;
368  outlink->sample_aspect_ratio = (AVRational){1,1};
369  outlink->frame_rate = s->frame_rate;
370  outlink->time_base = av_inv_q(s->frame_rate);
371 
372  return 0;
373 }
374 
375 #if CONFIG_GRAPHMONITOR_FILTER
376 
377 AVFILTER_DEFINE_CLASS(graphmonitor);
378 
379 static const AVFilterPad graphmonitor_inputs[] = {
380  {
381  .name = "default",
382  .type = AVMEDIA_TYPE_VIDEO,
383  },
384  { NULL }
385 };
386 
387 static const AVFilterPad graphmonitor_outputs[] = {
388  {
389  .name = "default",
390  .type = AVMEDIA_TYPE_VIDEO,
391  .config_props = config_output,
392  },
393  { NULL }
394 };
395 
397  .name = "graphmonitor",
398  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
399  .priv_size = sizeof(GraphMonitorContext),
400  .priv_class = &graphmonitor_class,
402  .activate = activate,
403  .inputs = graphmonitor_inputs,
404  .outputs = graphmonitor_outputs,
405 };
406 
407 #endif // CONFIG_GRAPHMONITOR_FILTER
408 
409 #if CONFIG_AGRAPHMONITOR_FILTER
410 
411 #define agraphmonitor_options graphmonitor_options
412 AVFILTER_DEFINE_CLASS(agraphmonitor);
413 
414 static const AVFilterPad agraphmonitor_inputs[] = {
415  {
416  .name = "default",
417  .type = AVMEDIA_TYPE_AUDIO,
418  },
419  { NULL }
420 };
421 
422 static const AVFilterPad agraphmonitor_outputs[] = {
423  {
424  .name = "default",
425  .type = AVMEDIA_TYPE_VIDEO,
426  .config_props = config_output,
427  },
428  { NULL }
429 };
430 
432  .name = "agraphmonitor",
433  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
434  .priv_size = sizeof(GraphMonitorContext),
435  .priv_class = &agraphmonitor_class,
437  .activate = activate,
438  .inputs = agraphmonitor_inputs,
439  .outputs = agraphmonitor_outputs,
440 };
441 #endif // CONFIG_AGRAPHMONITOR_FILTER
GraphMonitorContext::mode
int mode
Definition: f_graphmonitor.c:40
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
MODE_TB
@ MODE_TB
Definition: f_graphmonitor.c:60
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
MODE_SIZE
@ MODE_SIZE
Definition: f_graphmonitor.c:62
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:583
MODE_RATE
@ MODE_RATE
Definition: f_graphmonitor.c:63
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:238
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
GraphMonitorContext::opacity
float opacity
Definition: f_graphmonitor.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
pixdesc.h
AVFrame::width
int width
Definition: frame.h:376
w
uint8_t w
Definition: llviddspenc.c:39
OFFSET
#define OFFSET(x)
Definition: f_graphmonitor.c:67
AVOption
AVOption.
Definition: opt.h:248
float.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
video.h
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
create_frame
static int create_frame(AVFilterContext *ctx, int64_t pts)
Definition: f_graphmonitor.c:249
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
ff_avf_agraphmonitor
AVFilter ff_avf_agraphmonitor
formats.h
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1494
GraphMonitorContext::bg
uint8_t bg[4]
Definition: f_graphmonitor.c:51
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:356
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
pts
static int64_t pts
Definition: transcode_aac.c:652
MODE_FCOUT
@ MODE_FCOUT
Definition: f_graphmonitor.c:57
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: f_graphmonitor.c:96
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
GraphMonitorContext::green
uint8_t green[4]
Definition: f_graphmonitor.c:49
MODE_FCIN
@ MODE_FCIN
Definition: f_graphmonitor.c:56
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:466
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
GraphMonitorContext::white
uint8_t white[4]
Definition: f_graphmonitor.c:46
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
MODE_TIME
@ MODE_TIME
Definition: f_graphmonitor.c:59
GraphMonitorContext::pts
int64_t pts
Definition: f_graphmonitor.c:44
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
GraphMonitorContext::red
uint8_t red[4]
Definition: f_graphmonitor.c:48
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
AV_RN32
#define AV_RN32(p)
Definition: intreadwrite.h:364
AVFilterContext::name
char * name
name of this filter instance
Definition: avfilter.h:346
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
VF
#define VF
Definition: f_graphmonitor.c:68
ff_inlink_queued_frames
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1464
activate
static int activate(AVFilterContext *ctx)
Definition: f_graphmonitor.c:316
eval.h
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
GraphMonitorContext::next_pts
int64_t next_pts
Definition: f_graphmonitor.c:45
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_vf_graphmonitor
AVFilter ff_vf_graphmonitor
GraphMonitorContext
Definition: f_graphmonitor.c:35
MODE_EOF
@ MODE_EOF
Definition: f_graphmonitor.c:64
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
xga_font_data.h
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
Definition: f_graphmonitor.c:121
internal.h
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:288
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
i
int i
Definition: input.c:407
GraphMonitorContext::blue
uint8_t blue[4]
Definition: f_graphmonitor.c:50
uint8_t
uint8_t
Definition: audio_convert.c:194
graphmonitor_options
static const AVOption graphmonitor_options[]
Definition: f_graphmonitor.c:70
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
GraphMonitorContext::frame_rate
AVRational frame_rate
Definition: f_graphmonitor.c:42
AVFilter
Filter definition.
Definition: avfilter.h:145
MODE_QUEUE
@ MODE_QUEUE
Definition: f_graphmonitor.c:55
ret
ret
Definition: filter_design.txt:187
filter_have_queued
static int filter_have_queued(AVFilterContext *filter)
Definition: f_graphmonitor.c:151
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
GraphMonitorContext::w
int w
Definition: f_graphmonitor.c:38
clear_image
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
Definition: f_graphmonitor.c:112
config_output
static int config_output(AVFilterLink *outlink)
Definition: f_graphmonitor.c:354
SIZE_SPECIFIER
#define SIZE_SPECIFIER
Definition: internal.h:193
AVFrame::height
int height
Definition: frame.h:376
GraphMonitorContext::h
int h
Definition: f_graphmonitor.c:38
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
MODE_PTS
@ MODE_PTS
Definition: f_graphmonitor.c:58
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
MODE_FMT
@ MODE_FMT
Definition: f_graphmonitor.c:61
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1643
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
GraphMonitorContext::flags
int flags
Definition: f_graphmonitor.c:41
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:224
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
GraphMonitorContext::yellow
uint8_t yellow[4]
Definition: f_graphmonitor.c:47
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
snprintf
#define snprintf
Definition: snprintf.h:34
draw_items
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
Definition: f_graphmonitor.c:172
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489