FFmpeg
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config_components.h"
22 
23 #include "float.h"
24 
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/eval.h"
27 #include "libavutil/intreadwrite.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/timestamp.h"
31 #include "audio.h"
32 #include "avfilter.h"
33 #include "filters.h"
34 #include "formats.h"
35 #include "internal.h"
36 #include "video.h"
37 
38 typedef struct CacheItem {
39  int64_t previous_pts_us;
40 } CacheItem;
41 
42 typedef struct GraphMonitorContext {
43  const AVClass *class;
44 
45  int w, h;
46  float opacity;
47  int mode;
48  int flags;
50 
51  int eof;
53  int64_t pts;
54  int64_t next_pts;
55  uint8_t white[4];
56  uint8_t yellow[4];
57  uint8_t red[4];
58  uint8_t green[4];
59  uint8_t blue[4];
60  uint8_t gray[4];
61  uint8_t bg[4];
62 
64  unsigned int cache_size;
65  unsigned int cache_index;
67 
68 enum {
69  MODE_FULL = 0,
74  MODE_MAX = 15
75 };
76 
77 enum {
78  FLAG_NONE = 0 << 0,
79  FLAG_QUEUE = 1 << 0,
80  FLAG_FCIN = 1 << 1,
81  FLAG_FCOUT = 1 << 2,
82  FLAG_PTS = 1 << 3,
83  FLAG_TIME = 1 << 4,
84  FLAG_TB = 1 << 5,
85  FLAG_FMT = 1 << 6,
86  FLAG_SIZE = 1 << 7,
87  FLAG_RATE = 1 << 8,
88  FLAG_EOF = 1 << 9,
89  FLAG_SCIN = 1 << 10,
90  FLAG_SCOUT = 1 << 11,
91  FLAG_PTS_DELTA = 1 << 12,
92  FLAG_TIME_DELTA = 1 << 13,
93  FLAG_FC_DELTA = 1 << 14,
94  FLAG_SC_DELTA = 1 << 15,
95  FLAG_DISABLED = 1 << 16,
96 };
97 
98 #define OFFSET(x) offsetof(GraphMonitorContext, x)
99 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
100 #define VFR AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
101 
102 static const AVOption graphmonitor_options[] = {
103  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
104  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
105  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VFR },
106  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VFR },
107  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, MODE_MAX, VFR, .unit = "mode" },
108  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, MODE_MAX, VFR, .unit = "mode" },
109  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FULL}, 0, 0, VFR, .unit = "mode" },
110  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_COMPACT},0, 0, VFR, .unit = "mode" },
111  { "nozero", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NOZERO}, 0, 0, VFR, .unit = "mode" },
112  { "noeof", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_NOEOF}, 0, 0, VFR, .unit = "mode" },
113  { "nodisabled",NULL,0,AV_OPT_TYPE_CONST, {.i64=MODE_NODISABLED},0,0,VFR,.unit = "mode" },
114  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=FLAG_QUEUE}, 0, INT_MAX, VFR, .unit = "flags" },
115  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=FLAG_QUEUE}, 0, INT_MAX, VFR, .unit = "flags" },
116  { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_NONE}, 0, 0, VFR, .unit = "flags" },
117  { "all", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INT_MAX}, 0, 0, VFR, .unit = "flags" },
118  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_QUEUE}, 0, 0, VFR, .unit = "flags" },
119  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FCOUT}, 0, 0, VFR, .unit = "flags" },
120  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FCIN}, 0, 0, VFR, .unit = "flags" },
121  { "frame_count_delta",NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FC_DELTA},0, 0, VFR, .unit = "flags" },
122  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_PTS}, 0, 0, VFR, .unit = "flags" },
123  { "pts_delta", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_PTS_DELTA},0,0, VFR, .unit = "flags" },
124  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_TIME}, 0, 0, VFR, .unit = "flags" },
125  { "time_delta", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_TIME_DELTA},0,0,VFR, .unit = "flags" },
126  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_TB}, 0, 0, VFR, .unit = "flags" },
127  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_FMT}, 0, 0, VFR, .unit = "flags" },
128  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_SIZE}, 0, 0, VFR, .unit = "flags" },
129  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_RATE}, 0, 0, VFR, .unit = "flags" },
130  { "eof", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_EOF}, 0, 0, VFR, .unit = "flags" },
131  { "sample_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_SCOUT}, 0, 0, VFR, .unit = "flags" },
132  { "sample_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_SCIN}, 0, 0, VFR, .unit = "flags" },
133  { "sample_count_delta",NULL,0, AV_OPT_TYPE_CONST, {.i64=FLAG_SC_DELTA},0, 0, VFR, .unit = "flags" },
134  { "disabled", NULL, 0, AV_OPT_TYPE_CONST, {.i64=FLAG_DISABLED},0, 0, VFR, .unit = "flags" },
135  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
136  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
137  { NULL }
138 };
139 
141 {
142  GraphMonitorContext *s = ctx->priv;
143 
144  s->cache = av_fast_realloc(NULL, &s->cache_size,
145  8192 * sizeof(*(s->cache)));
146  if (!s->cache)
147  return AVERROR(ENOMEM);
148 
149  return 0;
150 }
151 
153 {
154  AVFilterLink *outlink = ctx->outputs[0];
155  static const enum AVPixelFormat pix_fmts[] = {
158  };
159  int ret;
160 
162  if ((ret = ff_formats_ref(fmts_list, &outlink->incfg.formats)) < 0)
163  return ret;
164 
165  return 0;
166 }
167 
169 {
170  const int h = out->height;
171  const int w = out->width;
172  uint8_t *dst = out->data[0];
173  int bg = AV_RN32(s->bg);
174 
175  for (int j = 0; j < w; j++)
176  AV_WN32(dst + j * 4, bg);
177  dst += out->linesize[0];
178  for (int i = 1; i < h; i++) {
179  memcpy(dst, out->data[0], w * 4);
180  dst += out->linesize[0];
181  }
182 }
183 
184 static void drawtext(AVFrame *pic, int x, int y, const char *txt,
185  const int len, uint8_t *color)
186 {
187  const uint8_t *font;
188  int font_height;
189  int i;
190 
191  font = avpriv_cga_font, font_height = 8;
192 
193  if (y + 8 >= pic->height ||
194  x + len * 8 >= pic->width)
195  return;
196 
197  for (i = 0; txt[i]; i++) {
198  int char_y, mask;
199 
200  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
201  for (char_y = 0; char_y < font_height; char_y++) {
202  for (mask = 0x80; mask; mask >>= 1) {
203  if (font[txt[i] * font_height + char_y] & mask) {
204  p[0] = color[0];
205  p[1] = color[1];
206  p[2] = color[2];
207  }
208  p += 4;
209  }
210  p += pic->linesize[0] - 8 * 4;
211  }
212  }
213 }
214 
216 {
217  for (int j = 0; j < filter->nb_inputs; j++) {
218  AVFilterLink *l = filter->inputs[j];
219 
220  if (!ff_outlink_get_status(l))
221  return 0;
222  }
223 
224  for (int j = 0; j < filter->nb_outputs; j++) {
225  AVFilterLink *l = filter->outputs[j];
226 
227  if (!ff_outlink_get_status(l))
228  return 0;
229  }
230 
231  return 1;
232 }
233 
235 {
236  for (int j = 0; j < filter->nb_inputs; j++) {
237  AVFilterLink *l = filter->inputs[j];
238  size_t frames = ff_inlink_queued_frames(l);
239 
240  if (frames)
241  return 1;
242  }
243 
244  for (int j = 0; j < filter->nb_outputs; j++) {
245  AVFilterLink *l = filter->outputs[j];
246  size_t frames = ff_inlink_queued_frames(l);
247 
248  if (frames)
249  return 1;
250  }
251 
252  return 0;
253 }
254 
257  AVFrame *out,
258  int xpos, int ypos,
259  AVFilterLink *l,
260  size_t frames)
261 {
262  GraphMonitorContext *s = ctx->priv;
263  int64_t previous_pts_us = s->cache[s->cache_index].previous_pts_us;
264  int64_t current_pts_us = l->current_pts_us;
265  const int flags = s->flags;
266  const int mode = s->mode;
267  char buffer[1024] = { 0 };
268  int len = 0;
269 
270  if (flags & FLAG_FMT) {
271  if (l->type == AVMEDIA_TYPE_VIDEO) {
272  len = snprintf(buffer, sizeof(buffer)-1, " | format: %s",
274  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
275  len = snprintf(buffer, sizeof(buffer)-1, " | format: %s",
277  }
278  drawtext(out, xpos, ypos, buffer, len, s->white);
279  xpos += len * 8;
280  }
281  if (flags & FLAG_SIZE) {
282  if (l->type == AVMEDIA_TYPE_VIDEO) {
283  len = snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
284  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
285  len = snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->ch_layout.nb_channels);
286  }
287  drawtext(out, xpos, ypos, buffer, len, s->white);
288  xpos += len * 8;
289  }
290  if (flags & FLAG_RATE) {
291  if (l->type == AVMEDIA_TYPE_VIDEO) {
292  len = snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
293  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
294  len = snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
295  }
296  drawtext(out, xpos, ypos, buffer, len, s->white);
297  xpos += len * 8;
298  }
299  if (flags & FLAG_TB) {
300  len = snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
301  drawtext(out, xpos, ypos, buffer, len, s->white);
302  xpos += len * 8;
303  }
304  if ((flags & FLAG_QUEUE) && (!(mode & MODE_NOZERO) || frames)) {
305  len = snprintf(buffer, sizeof(buffer)-1, " | queue: ");
306  drawtext(out, xpos, ypos, buffer, len, s->white);
307  xpos += len * 8;
308  len = snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
309  drawtext(out, xpos, ypos, buffer, len, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
310  xpos += len * 8;
311  }
312  if ((flags & FLAG_FCIN) && (!(mode & MODE_NOZERO) || l->frame_count_in)) {
313  len = snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
314  drawtext(out, xpos, ypos, buffer, len, s->white);
315  xpos += len * 8;
316  }
317  if ((flags & FLAG_FCOUT) && (!(mode & MODE_NOZERO) || l->frame_count_out)) {
318  len = snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
319  drawtext(out, xpos, ypos, buffer, len, s->white);
320  xpos += len * 8;
321  }
322  if ((flags & FLAG_FC_DELTA) && (!(mode & MODE_NOZERO) || (l->frame_count_in - l->frame_count_out))) {
323  len = snprintf(buffer, sizeof(buffer)-1, " | delta: %"PRId64, l->frame_count_in - l->frame_count_out);
324  drawtext(out, xpos, ypos, buffer, len, s->white);
325  xpos += len * 8;
326  }
327  if ((flags & FLAG_SCIN) && (!(mode & MODE_NOZERO) || l->sample_count_in)) {
328  len = snprintf(buffer, sizeof(buffer)-1, " | sin: %"PRId64, l->sample_count_in);
329  drawtext(out, xpos, ypos, buffer, len, s->white);
330  xpos += len * 8;
331  }
332  if ((flags & FLAG_SCOUT) && (!(mode & MODE_NOZERO) || l->sample_count_out)) {
333  len = snprintf(buffer, sizeof(buffer)-1, " | sout: %"PRId64, l->sample_count_out);
334  drawtext(out, xpos, ypos, buffer, len, s->white);
335  xpos += len * 8;
336  }
337  if ((flags & FLAG_SC_DELTA) && (!(mode & MODE_NOZERO) || (l->sample_count_in - l->sample_count_out))) {
338  len = snprintf(buffer, sizeof(buffer)-1, " | sdelta: %"PRId64, l->sample_count_in - l->sample_count_out);
339  drawtext(out, xpos, ypos, buffer, len, s->white);
340  xpos += len * 8;
341  }
342  if ((flags & FLAG_PTS) && (!(mode & MODE_NOZERO) || current_pts_us)) {
343  len = snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(current_pts_us));
344  drawtext(out, xpos, ypos, buffer, len, s->white);
345  xpos += len * 8;
346  }
347  if ((flags & FLAG_PTS_DELTA) && (!(mode & MODE_NOZERO) || (current_pts_us - previous_pts_us))) {
348  len = snprintf(buffer, sizeof(buffer)-1, " | pts_delta: %s", av_ts2str(current_pts_us - previous_pts_us));
349  drawtext(out, xpos, ypos, buffer, len, s->white);
350  xpos += len * 8;
351  }
352  if ((flags & FLAG_TIME) && (!(mode & MODE_NOZERO) || current_pts_us)) {
353  len = snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(current_pts_us, &AV_TIME_BASE_Q));
354  drawtext(out, xpos, ypos, buffer, len, s->white);
355  xpos += len * 8;
356  }
357  if ((flags & FLAG_TIME_DELTA) && (!(mode & MODE_NOZERO) || (current_pts_us - previous_pts_us))) {
358  len = snprintf(buffer, sizeof(buffer)-1, " | time_delta: %s", av_ts2timestr(current_pts_us - previous_pts_us, &AV_TIME_BASE_Q));
359  drawtext(out, xpos, ypos, buffer, len, s->white);
360  xpos += len * 8;
361  }
362  if ((flags & FLAG_EOF) && ff_outlink_get_status(l)) {
363  len = snprintf(buffer, sizeof(buffer)-1, " | eof");
364  drawtext(out, xpos, ypos, buffer, len, s->blue);
365  xpos += len * 8;
366  }
367  if ((flags & FLAG_DISABLED) && filter->is_disabled) {
368  len = snprintf(buffer, sizeof(buffer)-1, " | off");
369  drawtext(out, xpos, ypos, buffer, len, s->gray);
370  xpos += len * 8;
371  }
372 
373  s->cache[s->cache_index].previous_pts_us = l->current_pts_us;
374 
375  if (s->cache_index + 1 >= s->cache_size / sizeof(*(s->cache))) {
376  void *ptr = av_fast_realloc(s->cache, &s->cache_size, s->cache_size * 2);
377 
378  if (!ptr)
379  return AVERROR(ENOMEM);
380  s->cache = ptr;
381  }
382  s->cache_index++;
383 
384  return 0;
385 }
386 
387 static int create_frame(AVFilterContext *ctx, int64_t pts)
388 {
389  GraphMonitorContext *s = ctx->priv;
390  AVFilterLink *outlink = ctx->outputs[0];
391  int ret, len, xpos, ypos = 0;
392  char buffer[1024];
393  AVFrame *out;
394 
395  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
396  if (!out)
397  return AVERROR(ENOMEM);
398 
399  s->bg[3] = 255 * s->opacity;
400  clear_image(s, out, outlink);
401 
402  s->cache_index = 0;
403 
404  for (int i = 0; i < ctx->graph->nb_filters; i++) {
405  AVFilterContext *filter = ctx->graph->filters[i];
406 
407  if ((s->mode & MODE_COMPACT) && !filter_have_queued(filter))
408  continue;
409 
410  if ((s->mode & MODE_NOEOF) && filter_have_eof(filter))
411  continue;
412 
413  if ((s->mode & MODE_NODISABLED) && filter->is_disabled)
414  continue;
415 
416  xpos = 0;
417  len = strlen(filter->name);
418  drawtext(out, xpos, ypos, filter->name, len, s->white);
419  xpos += len * 8 + 10;
420  len = strlen(filter->filter->name);
421  drawtext(out, xpos, ypos, filter->filter->name, len, s->white);
422  ypos += 10;
423  for (int j = 0; j < filter->nb_inputs; j++) {
424  AVFilterLink *l = filter->inputs[j];
425  size_t frames = ff_inlink_queued_frames(l);
426 
427  if ((s->mode & MODE_COMPACT) && !frames)
428  continue;
429 
430  if ((s->mode & MODE_NOEOF) && ff_outlink_get_status(l))
431  continue;
432 
433  xpos = 10;
434  len = snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
435  drawtext(out, xpos, ypos, buffer, len, s->white);
436  xpos += len * 8;
437  len = strlen(l->src->name);
438  drawtext(out, xpos, ypos, l->src->name, len, s->white);
439  xpos += len * 8 + 10;
440  ret = draw_items(ctx, filter, out, xpos, ypos, l, frames);
441  if (ret < 0)
442  goto error;
443  ypos += 10;
444  }
445 
446  ypos += 2;
447  for (int j = 0; j < filter->nb_outputs; j++) {
448  AVFilterLink *l = filter->outputs[j];
449  size_t frames = ff_inlink_queued_frames(l);
450 
451  if ((s->mode & MODE_COMPACT) && !frames)
452  continue;
453 
454  if ((s->mode & MODE_NOEOF) && ff_outlink_get_status(l))
455  continue;
456 
457  xpos = 10;
458  len = snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
459  drawtext(out, xpos, ypos, buffer, len, s->white);
460  xpos += len * 8;
461  len = strlen(l->dst->name);
462  drawtext(out, xpos, ypos, l->dst->name, len, s->white);
463  xpos += len * 8 + 10;
464  ret = draw_items(ctx, filter, out, xpos, ypos, l, frames);
465  if (ret < 0)
466  goto error;
467  ypos += 10;
468  }
469  ypos += 5;
470  }
471 
472  out->pts = pts;
473  out->duration = 1;
474  s->pts = pts + 1;
475  if (s->eof_frames)
476  s->eof_frames = 0;
477  return ff_filter_frame(outlink, out);
478 error:
479  av_frame_free(&out);
480  return ret;
481 }
482 
484 {
485  GraphMonitorContext *s = ctx->priv;
486  AVFilterLink *inlink = ctx->inputs[0];
487  AVFilterLink *outlink = ctx->outputs[0];
488  int64_t pts = AV_NOPTS_VALUE;
489  int status;
490 
492 
493  if (!s->eof && ff_inlink_queued_frames(inlink)) {
494  AVFrame *frame = NULL;
495  int ret;
496 
498  if (ret < 0)
499  return ret;
500  if (ret > 0) {
501  pts = frame->pts;
503  }
504  }
505 
506  if (pts != AV_NOPTS_VALUE) {
507  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
508  if (s->pts == AV_NOPTS_VALUE)
509  s->pts = pts;
510  s->next_pts = pts;
511  } else if (s->eof) {
512  s->next_pts = s->pts + 1;
513  }
514 
515  if (s->eof && s->eof_frames == 0) {
516  ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
517  return 0;
518  }
519 
520  if (s->eof || (s->pts < s->next_pts && ff_outlink_frame_wanted(outlink)))
521  return create_frame(ctx, s->pts);
522 
523  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
524  s->eof = 1;
525  s->eof_frames = 1;
526  ff_filter_set_ready(ctx, 100);
527  return 0;
528  }
529 
530  if (!s->eof) {
532  } else {
533  ff_filter_set_ready(ctx, 100);
534  return 0;
535  }
536 
537  return FFERROR_NOT_READY;
538 }
539 
540 static int config_output(AVFilterLink *outlink)
541 {
542  GraphMonitorContext *s = outlink->src->priv;
543 
544  s->white[0] = s->white[1] = s->white[2] = 255;
545  s->yellow[0] = s->yellow[1] = 255;
546  s->red[0] = 255;
547  s->green[1] = 255;
548  s->blue[2] = 255;
549  s->gray[0] = s->gray[1] = s->gray[2] = 128;
550  s->pts = AV_NOPTS_VALUE;
551  s->next_pts = AV_NOPTS_VALUE;
552  outlink->w = s->w;
553  outlink->h = s->h;
554  outlink->sample_aspect_ratio = (AVRational){1,1};
555  outlink->frame_rate = s->frame_rate;
556  outlink->time_base = av_inv_q(s->frame_rate);
557 
558  return 0;
559 }
560 
562 {
563  GraphMonitorContext *s = ctx->priv;
564 
565  av_freep(&s->cache);
566  s->cache_size = s->cache_index = 0;
567 }
568 
569 AVFILTER_DEFINE_CLASS_EXT(graphmonitor, "(a)graphmonitor", graphmonitor_options);
570 
572  {
573  .name = "default",
574  .type = AVMEDIA_TYPE_VIDEO,
575  .config_props = config_output,
576  },
577 };
578 
579 #if CONFIG_GRAPHMONITOR_FILTER
580 
582  .name = "graphmonitor",
583  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
584  .priv_size = sizeof(GraphMonitorContext),
585  .priv_class = &graphmonitor_class,
586  .init = init,
587  .uninit = uninit,
588  .activate = activate,
592  .process_command = ff_filter_process_command,
593 };
594 
595 #endif // CONFIG_GRAPHMONITOR_FILTER
596 
597 #if CONFIG_AGRAPHMONITOR_FILTER
598 
600  .name = "agraphmonitor",
601  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
602  .priv_class = &graphmonitor_class,
603  .priv_size = sizeof(GraphMonitorContext),
604  .init = init,
605  .uninit = uninit,
606  .activate = activate,
610  .process_command = ff_filter_process_command,
611 };
612 #endif // CONFIG_AGRAPHMONITOR_FILTER
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
GraphMonitorContext::mode
int mode
Definition: f_graphmonitor.c:47
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
FLAG_FMT
@ FLAG_FMT
Definition: f_graphmonitor.c:85
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
CacheItem::previous_pts_us
int64_t previous_pts_us
Definition: f_graphmonitor.c:39
GraphMonitorContext::gray
uint8_t gray[4]
Definition: f_graphmonitor.c:60
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, const int len, uint8_t *color)
Definition: f_graphmonitor.c:184
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:435
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:511
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:248
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
FLAG_NONE
@ FLAG_NONE
Definition: f_graphmonitor.c:78
FLAG_SIZE
@ FLAG_SIZE
Definition: f_graphmonitor.c:86
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
GraphMonitorContext::opacity
float opacity
Definition: f_graphmonitor.c:46
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
pixdesc.h
AVFrame::width
int width
Definition: frame.h:447
w
uint8_t w
Definition: llviddspenc.c:38
OFFSET
#define OFFSET(x)
Definition: f_graphmonitor.c:98
AVOption
AVOption.
Definition: opt.h:346
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
MODE_NOZERO
@ MODE_NOZERO
Definition: f_graphmonitor.c:71
float.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
GraphMonitorContext::cache_index
unsigned int cache_index
Definition: f_graphmonitor.c:65
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_graphmonitor.c:561
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
video.h
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
create_frame
static int create_frame(AVFilterContext *ctx, int64_t pts)
Definition: f_graphmonitor.c:387
draw_items
static int draw_items(AVFilterContext *ctx, AVFilterContext *filter, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
Definition: f_graphmonitor.c:255
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:396
GraphMonitorContext::cache
CacheItem * cache
Definition: f_graphmonitor.c:63
FLAG_PTS
@ FLAG_PTS
Definition: f_graphmonitor.c:82
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
ff_avf_agraphmonitor
const AVFilter ff_avf_agraphmonitor
FLAG_SC_DELTA
@ FLAG_SC_DELTA
Definition: f_graphmonitor.c:94
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1445
GraphMonitorContext::bg
uint8_t bg[4]
Definition: f_graphmonitor.c:61
AVFilterContext::priv
void * priv
private data for use by the filter
Definition: avfilter.h:422
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
MODE_MAX
@ MODE_MAX
Definition: f_graphmonitor.c:74
pts
static int64_t pts
Definition: transcode_aac.c:643
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: f_graphmonitor.c:152
AVRational::num
int num
Numerator.
Definition: rational.h:59
FLAG_FC_DELTA
@ FLAG_FC_DELTA
Definition: f_graphmonitor.c:93
FLAG_TB
@ FLAG_TB
Definition: f_graphmonitor.c:84
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
GraphMonitorContext::green
uint8_t green[4]
Definition: f_graphmonitor.c:58
av_cold
#define av_cold
Definition: attributes.h:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
mask
static const uint16_t mask[17]
Definition: lzw.c:38
GraphMonitorContext::eof
int eof
Definition: f_graphmonitor.c:51
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
av_fast_realloc
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:495
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:678
FLAG_QUEUE
@ FLAG_QUEUE
Definition: f_graphmonitor.c:79
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
GraphMonitorContext::white
uint8_t white[4]
Definition: f_graphmonitor.c:55
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
GraphMonitorContext::pts
int64_t pts
Definition: f_graphmonitor.c:53
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
MODE_NOEOF
@ MODE_NOEOF
Definition: f_graphmonitor.c:72
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
GraphMonitorContext::red
uint8_t red[4]
Definition: f_graphmonitor.c:57
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
AV_RN32
#define AV_RN32(p)
Definition: intreadwrite.h:362
AVFilterContext::name
char * name
name of this filter instance
Definition: avfilter.h:412
FLAG_EOF
@ FLAG_EOF
Definition: f_graphmonitor.c:88
ff_audio_default_filterpad
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
Definition: audio.c:33
VF
#define VF
Definition: f_graphmonitor.c:99
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1392
ff_inlink_queued_frames
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1408
activate
static int activate(AVFilterContext *ctx)
Definition: f_graphmonitor.c:483
FLAG_DISABLED
@ FLAG_DISABLED
Definition: f_graphmonitor.c:95
eval.h
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
GraphMonitorContext::next_pts
int64_t next_pts
Definition: f_graphmonitor.c:54
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:374
init
static av_cold int init(AVFilterContext *ctx)
Definition: f_graphmonitor.c:140
FLAG_RATE
@ FLAG_RATE
Definition: f_graphmonitor.c:87
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GraphMonitorContext
Definition: f_graphmonitor.c:42
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:890
FLAG_FCOUT
@ FLAG_FCOUT
Definition: f_graphmonitor.c:81
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
xga_font_data.h
MODE_COMPACT
@ MODE_COMPACT
Definition: f_graphmonitor.c:70
FLAG_TIME_DELTA
@ FLAG_TIME_DELTA
Definition: f_graphmonitor.c:92
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
graphmonitor_outputs
static const AVFilterPad graphmonitor_outputs[]
Definition: f_graphmonitor.c:571
GraphMonitorContext::cache_size
unsigned int cache_size
Definition: f_graphmonitor.c:64
filter_have_eof
static int filter_have_eof(AVFilterContext *filter)
Definition: f_graphmonitor.c:215
FLAG_PTS_DELTA
@ FLAG_PTS_DELTA
Definition: f_graphmonitor.c:91
GraphMonitorContext::blue
uint8_t blue[4]
Definition: f_graphmonitor.c:59
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
CacheItem
Definition: f_graphmonitor.c:38
graphmonitor_options
static const AVOption graphmonitor_options[]
Definition: f_graphmonitor.c:102
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
GraphMonitorContext::frame_rate
AVRational frame_rate
Definition: f_graphmonitor.c:49
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
filter_have_queued
static int filter_have_queued(AVFilterContext *filter)
Definition: f_graphmonitor.c:234
MODE_FULL
@ MODE_FULL
Definition: f_graphmonitor.c:69
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
GraphMonitorContext::w
int w
Definition: f_graphmonitor.c:45
VFR
#define VFR
Definition: f_graphmonitor.c:100
clear_image
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
Definition: f_graphmonitor.c:168
config_output
static int config_output(AVFilterLink *outlink)
Definition: f_graphmonitor.c:540
SIZE_SPECIFIER
#define SIZE_SPECIFIER
Definition: internal.h:141
AVFrame::height
int height
Definition: frame.h:447
GraphMonitorContext::h
int h
Definition: f_graphmonitor.c:45
status
ov_status_e status
Definition: dnn_backend_openvino.c:120
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1596
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
MODE_NODISABLED
@ MODE_NODISABLED
Definition: f_graphmonitor.c:73
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
FLAG_TIME
@ FLAG_TIME
Definition: f_graphmonitor.c:83
GraphMonitorContext::flags
int flags
Definition: f_graphmonitor.c:48
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
GraphMonitorContext::eof_frames
int eof_frames
Definition: f_graphmonitor.c:52
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:234
timestamp.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:420
FLAG_SCIN
@ FLAG_SCIN
Definition: f_graphmonitor.c:89
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2038
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
GraphMonitorContext::yellow
uint8_t yellow[4]
Definition: f_graphmonitor.c:56
AVFILTER_DEFINE_CLASS_EXT
AVFILTER_DEFINE_CLASS_EXT(graphmonitor, "(a)graphmonitor", graphmonitor_options)
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
snprintf
#define snprintf
Definition: snprintf.h:34
ff_vf_graphmonitor
const AVFilter ff_vf_graphmonitor
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2882
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:234
FLAG_SCOUT
@ FLAG_SCOUT
Definition: f_graphmonitor.c:90
FLAG_FCIN
@ FLAG_FCIN
Definition: f_graphmonitor.c:80