FFmpeg
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config_components.h"
22 
23 #include "libavutil/mem.h"
24 #include "libavutil/pixdesc.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/timestamp.h"
29 #include "audio.h"
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "formats.h"
33 #include "internal.h"
34 #include "video.h"
35 
36 typedef struct CacheItem {
37  int64_t previous_pts_us;
38 } CacheItem;
39 
40 typedef struct GraphMonitorContext {
41  const AVClass *class;
42 
43  int w, h;
44  float opacity;
45  int mode;
46  int flags;
48 
49  int eof;
51  int64_t pts;
52  int64_t next_pts;
53  uint8_t white[4];
54  uint8_t yellow[4];
55  uint8_t red[4];
56  uint8_t green[4];
57  uint8_t blue[4];
58  uint8_t gray[4];
59  uint8_t bg[4];
60 
62  unsigned int cache_size;
63  unsigned int cache_index;
65 
66 enum {
67  MODE_FULL = 0,
72  MODE_MAX = 15
73 };
74 
75 enum {
76  FLAG_NONE = 0 << 0,
77  FLAG_QUEUE = 1 << 0,
78  FLAG_FCIN = 1 << 1,
79  FLAG_FCOUT = 1 << 2,
80  FLAG_PTS = 1 << 3,
81  FLAG_TIME = 1 << 4,
82  FLAG_TB = 1 << 5,
83  FLAG_FMT = 1 << 6,
84  FLAG_SIZE = 1 << 7,
85  FLAG_RATE = 1 << 8,
86  FLAG_EOF = 1 << 9,
87  FLAG_SCIN = 1 << 10,
88  FLAG_SCOUT = 1 << 11,
89  FLAG_PTS_DELTA = 1 << 12,
90  FLAG_TIME_DELTA = 1 << 13,
91  FLAG_FC_DELTA = 1 << 14,
92  FLAG_SC_DELTA = 1 << 15,
93  FLAG_DISABLED = 1 << 16,
94 };
95 
96 #define OFFSET(x) offsetof(GraphMonitorContext, x)
97 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
98 #define VFR AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
99 
100 static const AVOption graphmonitor_options[] = {
101  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
102  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
103  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VFR },
104  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VFR },
105  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, MODE_MAX, VFR, .unit = "mode" },
106  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, MODE_MAX, VFR, .unit = "mode" },
107  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FULL}, 0, 0, VFR, .unit = "mode" },
108  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_COMPACT},0, 0, VFR, .unit = "mode" },
109  { "nozero", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NOZERO}, 0, 0, VFR, .unit = "mode" },
110  { "noeof", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NOEOF}, 0, 0, VFR, .unit = "mode" },
111  { "nodisabled",NULL,0,AV_OPT_TYPE_CONST, {.i64=MODE_NODISABLED},0,0,VFR,.unit = "mode" },
112  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=FLAG_QUEUE}, 0, INT_MAX, VFR, .unit = "flags" },
113  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=FLAG_QUEUE}, 0, INT_MAX, VFR, .unit = "flags" },
114  { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_NONE}, 0, 0, VFR, .unit = "flags" },
115  { "all", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INT_MAX}, 0, 0, VFR, .unit = "flags" },
116  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_QUEUE}, 0, 0, VFR, .unit = "flags" },
117  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FCOUT}, 0, 0, VFR, .unit = "flags" },
118  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FCIN}, 0, 0, VFR, .unit = "flags" },
119  { "frame_count_delta",NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FC_DELTA},0, 0, VFR, .unit = "flags" },
120  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_PTS}, 0, 0, VFR, .unit = "flags" },
121  { "pts_delta", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_PTS_DELTA},0,0, VFR, .unit = "flags" },
122  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_TIME}, 0, 0, VFR, .unit = "flags" },
123  { "time_delta", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_TIME_DELTA},0,0,VFR, .unit = "flags" },
124  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_TB}, 0, 0, VFR, .unit = "flags" },
125  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FMT}, 0, 0, VFR, .unit = "flags" },
126  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_SIZE}, 0, 0, VFR, .unit = "flags" },
127  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_RATE}, 0, 0, VFR, .unit = "flags" },
128  { "eof", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_EOF}, 0, 0, VFR, .unit = "flags" },
129  { "sample_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_SCOUT}, 0, 0, VFR, .unit = "flags" },
130  { "sample_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_SCIN}, 0, 0, VFR, .unit = "flags" },
131  { "sample_count_delta",NULL,0, AV_OPT_TYPE_CONST, {.i64=FLAG_SC_DELTA},0, 0, VFR, .unit = "flags" },
132  { "disabled", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_DISABLED},0, 0, VFR, .unit = "flags" },
133  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
134  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
135  { NULL }
136 };
137 
139 {
140  GraphMonitorContext *s = ctx->priv;
141 
142  s->cache = av_fast_realloc(NULL, &s->cache_size,
143  8192 * sizeof(*(s->cache)));
144  if (!s->cache)
145  return AVERROR(ENOMEM);
146 
147  return 0;
148 }
149 
151 {
152  AVFilterLink *outlink = ctx->outputs[0];
153  static const enum AVPixelFormat pix_fmts[] = {
156  };
157  int ret;
158 
160  if ((ret = ff_formats_ref(fmts_list, &outlink->incfg.formats)) < 0)
161  return ret;
162 
163  return 0;
164 }
165 
167 {
168  const int h = out->height;
169  const int w = out->width;
170  uint8_t *dst = out->data[0];
171  int bg = AV_RN32(s->bg);
172 
173  for (int j = 0; j < w; j++)
174  AV_WN32(dst + j * 4, bg);
175  dst += out->linesize[0];
176  for (int i = 1; i < h; i++) {
177  memcpy(dst, out->data[0], w * 4);
178  dst += out->linesize[0];
179  }
180 }
181 
182 static void drawtext(AVFrame *pic, int x, int y, const char *txt,
183  const int len, uint8_t *color)
184 {
185  const uint8_t *font;
186  int font_height;
187  int i;
188 
189  font = avpriv_cga_font, font_height = 8;
190 
191  if (y + 8 >= pic->height ||
192  x + len * 8 >= pic->width)
193  return;
194 
195  for (i = 0; txt[i]; i++) {
196  int char_y, mask;
197 
198  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
199  for (char_y = 0; char_y < font_height; char_y++) {
200  for (mask = 0x80; mask; mask >>= 1) {
201  if (font[txt[i] * font_height + char_y] & mask) {
202  p[0] = color[0];
203  p[1] = color[1];
204  p[2] = color[2];
205  }
206  p += 4;
207  }
208  p += pic->linesize[0] - 8 * 4;
209  }
210  }
211 }
212 
214 {
215  for (int j = 0; j < filter->nb_inputs; j++) {
216  AVFilterLink *l = filter->inputs[j];
217 
218  if (!ff_outlink_get_status(l))
219  return 0;
220  }
221 
222  for (int j = 0; j < filter->nb_outputs; j++) {
223  AVFilterLink *l = filter->outputs[j];
224 
225  if (!ff_outlink_get_status(l))
226  return 0;
227  }
228 
229  return 1;
230 }
231 
233 {
234  for (int j = 0; j < filter->nb_inputs; j++) {
235  AVFilterLink *l = filter->inputs[j];
236  size_t frames = ff_inlink_queued_frames(l);
237 
238  if (frames)
239  return 1;
240  }
241 
242  for (int j = 0; j < filter->nb_outputs; j++) {
243  AVFilterLink *l = filter->outputs[j];
244  size_t frames = ff_inlink_queued_frames(l);
245 
246  if (frames)
247  return 1;
248  }
249 
250  return 0;
251 }
252 
255  AVFrame *out,
256  int xpos, int ypos,
257  AVFilterLink *l,
258  size_t frames)
259 {
260  GraphMonitorContext *s = ctx->priv;
261  int64_t previous_pts_us = s->cache[s->cache_index].previous_pts_us;
262  int64_t current_pts_us = l->current_pts_us;
263  const int flags = s->flags;
264  const int mode = s->mode;
265  char buffer[1024] = { 0 };
266  int len = 0;
267 
268  if (flags & FLAG_FMT) {
269  if (l->type == AVMEDIA_TYPE_VIDEO) {
270  len = snprintf(buffer, sizeof(buffer)-1, " | format: %s",
272  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
273  len = snprintf(buffer, sizeof(buffer)-1, " | format: %s",
275  }
276  drawtext(out, xpos, ypos, buffer, len, s->white);
277  xpos += len * 8;
278  }
279  if (flags & FLAG_SIZE) {
280  if (l->type == AVMEDIA_TYPE_VIDEO) {
281  len = snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
282  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
283  len = snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->ch_layout.nb_channels);
284  }
285  drawtext(out, xpos, ypos, buffer, len, s->white);
286  xpos += len * 8;
287  }
288  if (flags & FLAG_RATE) {
289  if (l->type == AVMEDIA_TYPE_VIDEO) {
290  len = snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
291  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
292  len = snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
293  }
294  drawtext(out, xpos, ypos, buffer, len, s->white);
295  xpos += len * 8;
296  }
297  if (flags & FLAG_TB) {
298  len = snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
299  drawtext(out, xpos, ypos, buffer, len, s->white);
300  xpos += len * 8;
301  }
302  if ((flags & FLAG_QUEUE) && (!(mode & MODE_NOZERO) || frames)) {
303  len = snprintf(buffer, sizeof(buffer)-1, " | queue: ");
304  drawtext(out, xpos, ypos, buffer, len, s->white);
305  xpos += len * 8;
306  len = snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
307  drawtext(out, xpos, ypos, buffer, len, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
308  xpos += len * 8;
309  }
310  if ((flags & FLAG_FCIN) && (!(mode & MODE_NOZERO) || l->frame_count_in)) {
311  len = snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
312  drawtext(out, xpos, ypos, buffer, len, s->white);
313  xpos += len * 8;
314  }
315  if ((flags & FLAG_FCOUT) && (!(mode & MODE_NOZERO) || l->frame_count_out)) {
316  len = snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
317  drawtext(out, xpos, ypos, buffer, len, s->white);
318  xpos += len * 8;
319  }
320  if ((flags & FLAG_FC_DELTA) && (!(mode & MODE_NOZERO) || (l->frame_count_in - l->frame_count_out))) {
321  len = snprintf(buffer, sizeof(buffer)-1, " | delta: %"PRId64, l->frame_count_in - l->frame_count_out);
322  drawtext(out, xpos, ypos, buffer, len, s->white);
323  xpos += len * 8;
324  }
325  if ((flags & FLAG_SCIN) && (!(mode & MODE_NOZERO) || l->sample_count_in)) {
326  len = snprintf(buffer, sizeof(buffer)-1, " | sin: %"PRId64, l->sample_count_in);
327  drawtext(out, xpos, ypos, buffer, len, s->white);
328  xpos += len * 8;
329  }
330  if ((flags & FLAG_SCOUT) && (!(mode & MODE_NOZERO) || l->sample_count_out)) {
331  len = snprintf(buffer, sizeof(buffer)-1, " | sout: %"PRId64, l->sample_count_out);
332  drawtext(out, xpos, ypos, buffer, len, s->white);
333  xpos += len * 8;
334  }
335  if ((flags & FLAG_SC_DELTA) && (!(mode & MODE_NOZERO) || (l->sample_count_in - l->sample_count_out))) {
336  len = snprintf(buffer, sizeof(buffer)-1, " | sdelta: %"PRId64, l->sample_count_in - l->sample_count_out);
337  drawtext(out, xpos, ypos, buffer, len, s->white);
338  xpos += len * 8;
339  }
340  if ((flags & FLAG_PTS) && (!(mode & MODE_NOZERO) || current_pts_us)) {
341  len = snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(current_pts_us));
342  drawtext(out, xpos, ypos, buffer, len, s->white);
343  xpos += len * 8;
344  }
345  if ((flags & FLAG_PTS_DELTA) && (!(mode & MODE_NOZERO) || (current_pts_us - previous_pts_us))) {
346  len = snprintf(buffer, sizeof(buffer)-1, " | pts_delta: %s", av_ts2str(current_pts_us - previous_pts_us));
347  drawtext(out, xpos, ypos, buffer, len, s->white);
348  xpos += len * 8;
349  }
350  if ((flags & FLAG_TIME) && (!(mode & MODE_NOZERO) || current_pts_us)) {
351  len = snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(current_pts_us, &AV_TIME_BASE_Q));
352  drawtext(out, xpos, ypos, buffer, len, s->white);
353  xpos += len * 8;
354  }
355  if ((flags & FLAG_TIME_DELTA) && (!(mode & MODE_NOZERO) || (current_pts_us - previous_pts_us))) {
356  len = snprintf(buffer, sizeof(buffer)-1, " | time_delta: %s", av_ts2timestr(current_pts_us - previous_pts_us, &AV_TIME_BASE_Q));
357  drawtext(out, xpos, ypos, buffer, len, s->white);
358  xpos += len * 8;
359  }
360  if ((flags & FLAG_EOF) && ff_outlink_get_status(l)) {
361  len = snprintf(buffer, sizeof(buffer)-1, " | eof");
362  drawtext(out, xpos, ypos, buffer, len, s->blue);
363  xpos += len * 8;
364  }
365  if ((flags & FLAG_DISABLED) && filter->is_disabled) {
366  len = snprintf(buffer, sizeof(buffer)-1, " | off");
367  drawtext(out, xpos, ypos, buffer, len, s->gray);
368  xpos += len * 8;
369  }
370 
371  s->cache[s->cache_index].previous_pts_us = l->current_pts_us;
372 
373  if (s->cache_index + 1 >= s->cache_size / sizeof(*(s->cache))) {
374  void *ptr = av_fast_realloc(s->cache, &s->cache_size, s->cache_size * 2);
375 
376  if (!ptr)
377  return AVERROR(ENOMEM);
378  s->cache = ptr;
379  }
380  s->cache_index++;
381 
382  return 0;
383 }
384 
385 static int create_frame(AVFilterContext *ctx, int64_t pts)
386 {
387  GraphMonitorContext *s = ctx->priv;
388  AVFilterLink *outlink = ctx->outputs[0];
389  int ret, len, xpos, ypos = 0;
390  char buffer[1024];
391  AVFrame *out;
392 
393  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
394  if (!out)
395  return AVERROR(ENOMEM);
396 
397  s->bg[3] = 255 * s->opacity;
398  clear_image(s, out, outlink);
399 
400  s->cache_index = 0;
401 
402  for (int i = 0; i < ctx->graph->nb_filters; i++) {
403  AVFilterContext *filter = ctx->graph->filters[i];
404 
405  if ((s->mode & MODE_COMPACT) && !filter_have_queued(filter))
406  continue;
407 
408  if ((s->mode & MODE_NOEOF) && filter_have_eof(filter))
409  continue;
410 
411  if ((s->mode & MODE_NODISABLED) && filter->is_disabled)
412  continue;
413 
414  xpos = 0;
415  len = strlen(filter->name);
416  drawtext(out, xpos, ypos, filter->name, len, s->white);
417  xpos += len * 8 + 10;
418  len = strlen(filter->filter->name);
419  drawtext(out, xpos, ypos, filter->filter->name, len, s->white);
420  ypos += 10;
421  for (int j = 0; j < filter->nb_inputs; j++) {
422  AVFilterLink *l = filter->inputs[j];
423  size_t frames = ff_inlink_queued_frames(l);
424 
425  if ((s->mode & MODE_COMPACT) && !frames)
426  continue;
427 
428  if ((s->mode & MODE_NOEOF) && ff_outlink_get_status(l))
429  continue;
430 
431  xpos = 10;
432  len = snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
433  drawtext(out, xpos, ypos, buffer, len, s->white);
434  xpos += len * 8;
435  len = strlen(l->src->name);
436  drawtext(out, xpos, ypos, l->src->name, len, s->white);
437  xpos += len * 8 + 10;
438  ret = draw_items(ctx, filter, out, xpos, ypos, l, frames);
439  if (ret < 0)
440  goto error;
441  ypos += 10;
442  }
443 
444  ypos += 2;
445  for (int j = 0; j < filter->nb_outputs; j++) {
446  AVFilterLink *l = filter->outputs[j];
447  size_t frames = ff_inlink_queued_frames(l);
448 
449  if ((s->mode & MODE_COMPACT) && !frames)
450  continue;
451 
452  if ((s->mode & MODE_NOEOF) && ff_outlink_get_status(l))
453  continue;
454 
455  xpos = 10;
456  len = snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
457  drawtext(out, xpos, ypos, buffer, len, s->white);
458  xpos += len * 8;
459  len = strlen(l->dst->name);
460  drawtext(out, xpos, ypos, l->dst->name, len, s->white);
461  xpos += len * 8 + 10;
462  ret = draw_items(ctx, filter, out, xpos, ypos, l, frames);
463  if (ret < 0)
464  goto error;
465  ypos += 10;
466  }
467  ypos += 5;
468  }
469 
470  out->pts = pts;
471  out->duration = 1;
472  s->pts = pts + 1;
473  if (s->eof_frames)
474  s->eof_frames = 0;
475  return ff_filter_frame(outlink, out);
476 error:
477  av_frame_free(&out);
478  return ret;
479 }
480 
482 {
483  GraphMonitorContext *s = ctx->priv;
484  AVFilterLink *inlink = ctx->inputs[0];
485  AVFilterLink *outlink = ctx->outputs[0];
486  int64_t pts = AV_NOPTS_VALUE;
487  int status;
488 
490 
491  if (!s->eof && ff_inlink_queued_frames(inlink)) {
492  AVFrame *frame = NULL;
493  int ret;
494 
496  if (ret < 0)
497  return ret;
498  if (ret > 0) {
499  pts = frame->pts;
501  }
502  }
503 
504  if (pts != AV_NOPTS_VALUE) {
505  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
506  if (s->pts == AV_NOPTS_VALUE)
507  s->pts = pts;
508  s->next_pts = pts;
509  } else if (s->eof) {
510  s->next_pts = s->pts + 1;
511  }
512 
513  if (s->eof && s->eof_frames == 0) {
514  ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
515  return 0;
516  }
517 
518  if (s->eof || (s->pts < s->next_pts && ff_outlink_frame_wanted(outlink)))
519  return create_frame(ctx, s->pts);
520 
521  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
522  s->eof = 1;
523  s->eof_frames = 1;
524  ff_filter_set_ready(ctx, 100);
525  return 0;
526  }
527 
528  if (!s->eof) {
530  } else {
531  ff_filter_set_ready(ctx, 100);
532  return 0;
533  }
534 
535  return FFERROR_NOT_READY;
536 }
537 
538 static int config_output(AVFilterLink *outlink)
539 {
540  GraphMonitorContext *s = outlink->src->priv;
541 
542  s->white[0] = s->white[1] = s->white[2] = 255;
543  s->yellow[0] = s->yellow[1] = 255;
544  s->red[0] = 255;
545  s->green[1] = 255;
546  s->blue[2] = 255;
547  s->gray[0] = s->gray[1] = s->gray[2] = 128;
548  s->pts = AV_NOPTS_VALUE;
549  s->next_pts = AV_NOPTS_VALUE;
550  outlink->w = s->w;
551  outlink->h = s->h;
552  outlink->sample_aspect_ratio = (AVRational){1,1};
553  outlink->frame_rate = s->frame_rate;
554  outlink->time_base = av_inv_q(s->frame_rate);
555 
556  return 0;
557 }
558 
560 {
561  GraphMonitorContext *s = ctx->priv;
562 
563  av_freep(&s->cache);
564  s->cache_size = s->cache_index = 0;
565 }
566 
567 AVFILTER_DEFINE_CLASS_EXT(graphmonitor, "(a)graphmonitor", graphmonitor_options);
568 
570  {
571  .name = "default",
572  .type = AVMEDIA_TYPE_VIDEO,
573  .config_props = config_output,
574  },
575 };
576 
577 #if CONFIG_GRAPHMONITOR_FILTER
578 
580  .name = "graphmonitor",
581  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
582  .priv_size = sizeof(GraphMonitorContext),
583  .priv_class = &graphmonitor_class,
584  .init = init,
585  .uninit = uninit,
586  .activate = activate,
590  .process_command = ff_filter_process_command,
591 };
592 
593 #endif // CONFIG_GRAPHMONITOR_FILTER
594 
595 #if CONFIG_AGRAPHMONITOR_FILTER
596 
598  .name = "agraphmonitor",
599  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
600  .priv_class = &graphmonitor_class,
601  .priv_size = sizeof(GraphMonitorContext),
602  .init = init,
603  .uninit = uninit,
604  .activate = activate,
608  .process_command = ff_filter_process_command,
609 };
610 #endif // CONFIG_AGRAPHMONITOR_FILTER
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
GraphMonitorContext::mode
int mode
Definition: f_graphmonitor.c:45
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
MODE_NODISABLED
@ MODE_NODISABLED
Definition: f_graphmonitor.c:71
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
CacheItem::previous_pts_us
int64_t previous_pts_us
Definition: f_graphmonitor.c:37
GraphMonitorContext::gray
uint8_t gray[4]
Definition: f_graphmonitor.c:58
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
FLAG_TB
@ FLAG_TB
Definition: f_graphmonitor.c:82
opt.h
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, const int len, uint8_t *color)
Definition: f_graphmonitor.c:182
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:436
out
FILE * out
Definition: movenc.c:55
color
Definition: vf_paletteuse.c:512
FLAG_RATE
@ FLAG_RATE
Definition: f_graphmonitor.c:85
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:248
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
GraphMonitorContext::opacity
float opacity
Definition: f_graphmonitor.c:44
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
pixdesc.h
AVFrame::width
int width
Definition: frame.h:447
w
uint8_t w
Definition: llviddspenc.c:38
OFFSET
#define OFFSET(x)
Definition: f_graphmonitor.c:96
AVOption
AVOption.
Definition: opt.h:346
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
MODE_MAX
@ MODE_MAX
Definition: f_graphmonitor.c:72
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
FLAG_FCIN
@ FLAG_FCIN
Definition: f_graphmonitor.c:78
GraphMonitorContext::cache_index
unsigned int cache_index
Definition: f_graphmonitor.c:63
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
MODE_NOEOF
@ MODE_NOEOF
Definition: f_graphmonitor.c:70
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_graphmonitor.c:559
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
video.h
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
create_frame
static int create_frame(AVFilterContext *ctx, int64_t pts)
Definition: f_graphmonitor.c:385
draw_items
static int draw_items(AVFilterContext *ctx, AVFilterContext *filter, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
Definition: f_graphmonitor.c:253
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:396
GraphMonitorContext::cache
CacheItem * cache
Definition: f_graphmonitor.c:61
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
ff_avf_agraphmonitor
const AVFilter ff_avf_agraphmonitor
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1442
FLAG_PTS_DELTA
@ FLAG_PTS_DELTA
Definition: f_graphmonitor.c:89
GraphMonitorContext::bg
uint8_t bg[4]
Definition: f_graphmonitor.c:59
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:422
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
FLAG_FCOUT
@ FLAG_FCOUT
Definition: f_graphmonitor.c:79
pts
static int64_t pts
Definition: transcode_aac.c:644
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: f_graphmonitor.c:150
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
GraphMonitorContext::green
uint8_t green[4]
Definition: f_graphmonitor.c:56
av_cold
#define av_cold
Definition: attributes.h:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
mask
static const uint16_t mask[17]
Definition: lzw.c:38
GraphMonitorContext::eof
int eof
Definition: f_graphmonitor.c:49
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:497
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:679
FLAG_PTS
@ FLAG_PTS
Definition: f_graphmonitor.c:80
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
GraphMonitorContext::white
uint8_t white[4]
Definition: f_graphmonitor.c:53
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
GraphMonitorContext::pts
int64_t pts
Definition: f_graphmonitor.c:51
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
FLAG_SIZE
@ FLAG_SIZE
Definition: f_graphmonitor.c:84
FLAG_EOF
@ FLAG_EOF
Definition: f_graphmonitor.c:86
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
GraphMonitorContext::red
uint8_t red[4]
Definition: f_graphmonitor.c:55
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
FLAG_SC_DELTA
@ FLAG_SC_DELTA
Definition: f_graphmonitor.c:92
AV_RN32
#define AV_RN32(p)
Definition: intreadwrite.h:362
AVFilterContext::name
char * name
name of this filter instance
Definition: avfilter.h:412
MODE_FULL
@ MODE_FULL
Definition: f_graphmonitor.c:67
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:33
VF
#define VF
Definition: f_graphmonitor.c:97
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1389
ff_inlink_queued_frames
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1405
activate
static int activate(AVFilterContext *ctx)
Definition: f_graphmonitor.c:481
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
GraphMonitorContext::next_pts
int64_t next_pts
Definition: f_graphmonitor.c:52
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:374
init
static av_cold int init(AVFilterContext *ctx)
Definition: f_graphmonitor.c:138
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
MODE_NOZERO
@ MODE_NOZERO
Definition: f_graphmonitor.c:69
GraphMonitorContext
Definition: f_graphmonitor.c:40
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:887
FLAG_QUEUE
@ FLAG_QUEUE
Definition: f_graphmonitor.c:77
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
xga_font_data.h
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
graphmonitor_outputs
static const AVFilterPad graphmonitor_outputs[]
Definition: f_graphmonitor.c:569
FLAG_NONE
@ FLAG_NONE
Definition: f_graphmonitor.c:76
GraphMonitorContext::cache_size
unsigned int cache_size
Definition: f_graphmonitor.c:62
FLAG_FC_DELTA
@ FLAG_FC_DELTA
Definition: f_graphmonitor.c:91
filter_have_eof
static int filter_have_eof(AVFilterContext *filter)
Definition: f_graphmonitor.c:213
GraphMonitorContext::blue
uint8_t blue[4]
Definition: f_graphmonitor.c:57
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
FLAG_SCOUT
@ FLAG_SCOUT
Definition: f_graphmonitor.c:88
CacheItem
Definition: f_graphmonitor.c:36
graphmonitor_options
static const AVOption graphmonitor_options[]
Definition: f_graphmonitor.c:100
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
GraphMonitorContext::frame_rate
AVRational frame_rate
Definition: f_graphmonitor.c:47
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
filter_have_queued
static int filter_have_queued(AVFilterContext *filter)
Definition: f_graphmonitor.c:232
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
GraphMonitorContext::w
int w
Definition: f_graphmonitor.c:43
FLAG_DISABLED
@ FLAG_DISABLED
Definition: f_graphmonitor.c:93
VFR
#define VFR
Definition: f_graphmonitor.c:98
clear_image
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
Definition: f_graphmonitor.c:166
config_output
static int config_output(AVFilterLink *outlink)
Definition: f_graphmonitor.c:538
SIZE_SPECIFIER
#define SIZE_SPECIFIER
Definition: internal.h:129
FLAG_TIME_DELTA
@ FLAG_TIME_DELTA
Definition: f_graphmonitor.c:90
AVFrame::height
int height
Definition: frame.h:447
GraphMonitorContext::h
int h
Definition: f_graphmonitor.c:43
status
ov_status_e status
Definition: dnn_backend_openvino.c:121
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
FLAG_FMT
@ FLAG_FMT
Definition: f_graphmonitor.c:83
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1593
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
MODE_COMPACT
@ MODE_COMPACT
Definition: f_graphmonitor.c:68
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
audio.h
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
GraphMonitorContext::flags
int flags
Definition: f_graphmonitor.c:46
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
GraphMonitorContext::eof_frames
int eof_frames
Definition: f_graphmonitor.c:50
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:234
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:420
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2038
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
GraphMonitorContext::yellow
uint8_t yellow[4]
Definition: f_graphmonitor.c:54
AVFILTER_DEFINE_CLASS_EXT
AVFILTER_DEFINE_CLASS_EXT(graphmonitor, "(a)graphmonitor", graphmonitor_options)
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
snprintf
#define snprintf
Definition: snprintf.h:34
ff_vf_graphmonitor
const AVFilter ff_vf_graphmonitor
FLAG_TIME
@ FLAG_TIME
Definition: f_graphmonitor.c:81
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:235
FLAG_SCIN
@ FLAG_SCIN
Definition: f_graphmonitor.c:87