FFmpeg
avf_ahistogram.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/opt.h"
23 #include "libavutil/parseutils.h"
24 #include "avfilter.h"
25 #include "filters.h"
26 #include "formats.h"
27 #include "audio.h"
28 #include "video.h"
29 #include "internal.h"
30 
36 
37 typedef struct AudioHistogramContext {
38  const AVClass *class;
40  int w, h;
42  uint64_t *achistogram;
43  uint64_t *shistogram;
44  int ascale;
45  int scale;
46  float phisto;
48  int apos;
49  int ypos;
50  int slide;
51  int dmode;
52  int dchannels;
53  int count;
56  AVFrame *in[101];
57  int first;
60 
61 #define OFFSET(x) offsetof(AudioHistogramContext, x)
62 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
63 
64 static const AVOption ahistogram_options[] = {
65  { "dmode", "set method to display channels", OFFSET(dmode), AV_OPT_TYPE_INT, {.i64=SINGLE}, 0, NB_DMODES-1, FLAGS, "dmode" },
66  { "single", "all channels use single histogram", 0, AV_OPT_TYPE_CONST, {.i64=SINGLE}, 0, 0, FLAGS, "dmode" },
67  { "separate", "each channel have own histogram", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "dmode" },
68  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
69  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
70  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
71  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
72  { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
73  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
74  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
75  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
76  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
77  { "rlog", "reverse logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=RLOG}, 0, 0, FLAGS, "scale" },
78  { "ascale", "set amplitude scale", OFFSET(ascale), AV_OPT_TYPE_INT, {.i64=ALOG}, LINEAR, NB_ASCALES-1, FLAGS, "ascale" },
79  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=ALOG}, 0, 0, FLAGS, "ascale" },
80  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=ALINEAR}, 0, 0, FLAGS, "ascale" },
81  { "acount", "how much frames to accumulate", OFFSET(count), AV_OPT_TYPE_INT, {.i64=1}, -1, 100, FLAGS },
82  { "rheight", "set histogram ratio of window height", OFFSET(phisto), AV_OPT_TYPE_FLOAT, {.dbl=0.10}, 0, 1, FLAGS },
83  { "slide", "set sonogram sliding", OFFSET(slide), AV_OPT_TYPE_INT, {.i64=REPLACE}, 0, NB_SLIDES-1, FLAGS, "slide" },
84  { "replace", "replace old rows with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
85  { "scroll", "scroll from top to bottom", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
86  { NULL }
87 };
88 
89 AVFILTER_DEFINE_CLASS(ahistogram);
90 
92 {
95  AVFilterLink *inlink = ctx->inputs[0];
96  AVFilterLink *outlink = ctx->outputs[0];
98  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE };
99  int ret = AVERROR(EINVAL);
100 
101  formats = ff_make_format_list(sample_fmts);
102  if ((ret = ff_formats_ref (formats, &inlink->out_formats )) < 0 ||
103  (layouts = ff_all_channel_counts()) == NULL ||
104  (ret = ff_channel_layouts_ref (layouts, &inlink->out_channel_layouts)) < 0)
105  return ret;
106 
107  formats = ff_all_samplerates();
108  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
109  return ret;
110 
111  formats = ff_make_format_list(pix_fmts);
112  if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
113  return ret;
114 
115  return 0;
116 }
117 
119 {
120  AVFilterContext *ctx = inlink->dst;
121  AudioHistogramContext *s = ctx->priv;
122 
124  s->dchannels = s->dmode == SINGLE ? 1 : inlink->channels;
125  s->shistogram = av_calloc(s->w, s->dchannels * sizeof(*s->shistogram));
126  if (!s->shistogram)
127  return AVERROR(ENOMEM);
128 
129  s->achistogram = av_calloc(s->w, s->dchannels * sizeof(*s->achistogram));
130  if (!s->achistogram)
131  return AVERROR(ENOMEM);
132 
133  return 0;
134 }
135 
136 static int config_output(AVFilterLink *outlink)
137 {
138  AudioHistogramContext *s = outlink->src->priv;
139 
140  outlink->w = s->w;
141  outlink->h = s->h;
142  outlink->sample_aspect_ratio = (AVRational){1,1};
143  outlink->frame_rate = s->frame_rate;
144 
145  s->histogram_h = s->h * s->phisto;
146  s->ypos = s->h * s->phisto;
147 
148  if (s->dmode == SEPARATE) {
149  s->combine_buffer = av_malloc_array(outlink->w * 3, sizeof(*s->combine_buffer));
150  if (!s->combine_buffer)
151  return AVERROR(ENOMEM);
152  }
153 
154  return 0;
155 }
156 
158 {
159  AVFilterContext *ctx = inlink->dst;
160  AVFilterLink *outlink = ctx->outputs[0];
161  AudioHistogramContext *s = ctx->priv;
162  const int H = s->histogram_h;
163  const int w = s->w;
164  int c, y, n, p, bin;
165  uint64_t acmax = 1;
166 
167  if (!s->out || s->out->width != outlink->w ||
168  s->out->height != outlink->h) {
169  av_frame_free(&s->out);
170  s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
171  if (!s->out) {
172  av_frame_free(&in);
173  return AVERROR(ENOMEM);
174  }
175  for (n = H; n < s->h; n++) {
176  memset(s->out->data[0] + n * s->out->linesize[0], 0, w);
177  memset(s->out->data[1] + n * s->out->linesize[0], 127, w);
178  memset(s->out->data[2] + n * s->out->linesize[0], 127, w);
179  memset(s->out->data[3] + n * s->out->linesize[0], 0, w);
180  }
181  }
182 
183  if (s->dmode == SEPARATE) {
184  for (y = 0; y < w; y++) {
185  s->combine_buffer[3 * y ] = 0;
186  s->combine_buffer[3 * y + 1] = 127.5;
187  s->combine_buffer[3 * y + 2] = 127.5;
188  }
189  }
190 
191  for (n = 0; n < H; n++) {
192  memset(s->out->data[0] + n * s->out->linesize[0], 0, w);
193  memset(s->out->data[1] + n * s->out->linesize[0], 127, w);
194  memset(s->out->data[2] + n * s->out->linesize[0], 127, w);
195  memset(s->out->data[3] + n * s->out->linesize[0], 0, w);
196  }
197  s->out->pts = in->pts;
198 
199  s->first = s->frame_count;
200 
201  switch (s->ascale) {
202  case ALINEAR:
203  for (c = 0; c < inlink->channels; c++) {
204  const float *src = (const float *)in->extended_data[c];
205  uint64_t *achistogram = &s->achistogram[(s->dmode == SINGLE ? 0: c) * w];
206 
207  for (n = 0; n < in->nb_samples; n++) {
208  bin = lrint(av_clipf(fabsf(src[n]), 0, 1) * (w - 1));
209 
210  achistogram[bin]++;
211  }
212 
213  if (s->in[s->first] && s->count >= 0) {
214  uint64_t *shistogram = &s->shistogram[(s->dmode == SINGLE ? 0: c) * w];
215  const float *src2 = (const float *)s->in[s->first]->extended_data[c];
216 
217  for (n = 0; n < in->nb_samples; n++) {
218  bin = lrint(av_clipf(fabsf(src2[n]), 0, 1) * (w - 1));
219 
220  shistogram[bin]++;
221  }
222  }
223  }
224  break;
225  case ALOG:
226  for (c = 0; c < inlink->channels; c++) {
227  const float *src = (const float *)in->extended_data[c];
228  uint64_t *achistogram = &s->achistogram[(s->dmode == SINGLE ? 0: c) * w];
229 
230  for (n = 0; n < in->nb_samples; n++) {
231  bin = lrint(av_clipf(1 + log10(fabsf(src[n])) / 6, 0, 1) * (w - 1));
232 
233  achistogram[bin]++;
234  }
235 
236  if (s->in[s->first] && s->count >= 0) {
237  uint64_t *shistogram = &s->shistogram[(s->dmode == SINGLE ? 0: c) * w];
238  const float *src2 = (const float *)s->in[s->first]->extended_data[c];
239 
240  for (n = 0; n < in->nb_samples; n++) {
241  bin = lrint(av_clipf(1 + log10(fabsf(src2[n])) / 6, 0, 1) * (w - 1));
242 
243  shistogram[bin]++;
244  }
245  }
246  }
247  break;
248  }
249 
250  av_frame_free(&s->in[s->frame_count]);
251  s->in[s->frame_count] = in;
252  s->frame_count++;
253  if (s->frame_count > s->count)
254  s->frame_count = 0;
255 
256  for (n = 0; n < w * s->dchannels; n++) {
257  acmax = FFMAX(s->achistogram[n] - s->shistogram[n], acmax);
258  }
259 
260  for (c = 0; c < s->dchannels; c++) {
261  uint64_t *shistogram = &s->shistogram[c * w];
262  uint64_t *achistogram = &s->achistogram[c * w];
263  float yf, uf, vf;
264 
265  if (s->dmode == SEPARATE) {
266  yf = 256.0f / s->dchannels;
267  uf = yf * M_PI;
268  vf = yf * M_PI;
269  uf *= 0.5 * sin((2 * M_PI * c) / s->dchannels);
270  vf *= 0.5 * cos((2 * M_PI * c) / s->dchannels);
271  }
272 
273  for (n = 0; n < w; n++) {
274  double a, aa;
275  int h;
276 
277  a = achistogram[n] - shistogram[n];
278 
279  switch (s->scale) {
280  case LINEAR:
281  aa = a / (double)acmax;
282  break;
283  case SQRT:
284  aa = sqrt(a) / sqrt(acmax);
285  break;
286  case CBRT:
287  aa = cbrt(a) / cbrt(acmax);
288  break;
289  case LOG:
290  aa = log2(a + 1) / log2(acmax + 1);
291  break;
292  case RLOG:
293  aa = 1. - log2(a + 1) / log2(acmax + 1);
294  if (aa == 1.)
295  aa = 0;
296  break;
297  default:
298  av_assert0(0);
299  }
300 
301  h = aa * (H - 1);
302 
303  if (s->dmode == SINGLE) {
304 
305  for (y = H - h; y < H; y++) {
306  s->out->data[0][y * s->out->linesize[0] + n] = 255;
307  s->out->data[3][y * s->out->linesize[0] + n] = 255;
308  }
309 
310  if (s->h - H > 0) {
311  h = aa * 255;
312 
313  s->out->data[0][s->ypos * s->out->linesize[0] + n] = h;
314  s->out->data[1][s->ypos * s->out->linesize[1] + n] = 127;
315  s->out->data[2][s->ypos * s->out->linesize[2] + n] = 127;
316  s->out->data[3][s->ypos * s->out->linesize[3] + n] = 255;
317  }
318  } else if (s->dmode == SEPARATE) {
319  float *out = &s->combine_buffer[3 * n];
320  int old;
321 
322  old = s->out->data[0][(H - h) * s->out->linesize[0] + n];
323  for (y = H - h; y < H; y++) {
324  if (s->out->data[0][y * s->out->linesize[0] + n] != old)
325  break;
326  old = s->out->data[0][y * s->out->linesize[0] + n];
327  s->out->data[0][y * s->out->linesize[0] + n] = yf;
328  s->out->data[1][y * s->out->linesize[1] + n] = 128+uf;
329  s->out->data[2][y * s->out->linesize[2] + n] = 128+vf;
330  s->out->data[3][y * s->out->linesize[3] + n] = 255;
331  }
332 
333  out[0] += aa * yf;
334  out[1] += aa * uf;
335  out[2] += aa * vf;
336  }
337  }
338  }
339 
340  if (s->h - H > 0) {
341  if (s->dmode == SEPARATE) {
342  for (n = 0; n < w; n++) {
343  float *cb = &s->combine_buffer[3 * n];
344 
345  s->out->data[0][s->ypos * s->out->linesize[0] + n] = cb[0];
346  s->out->data[1][s->ypos * s->out->linesize[1] + n] = cb[1];
347  s->out->data[2][s->ypos * s->out->linesize[2] + n] = cb[2];
348  s->out->data[3][s->ypos * s->out->linesize[3] + n] = 255;
349  }
350  }
351 
352  if (s->slide == SCROLL) {
353  for (p = 0; p < 4; p++) {
354  for (y = s->h; y >= H + 1; y--) {
355  memmove(s->out->data[p] + (y ) * s->out->linesize[p],
356  s->out->data[p] + (y-1) * s->out->linesize[p], w);
357  }
358  }
359  }
360 
361  s->ypos++;
362  if (s->slide == SCROLL || s->ypos >= s->h)
363  s->ypos = H;
364  }
365 
366  return ff_filter_frame(outlink, av_frame_clone(s->out));
367 }
368 
370 {
371  AVFilterLink *inlink = ctx->inputs[0];
372  AVFilterLink *outlink = ctx->outputs[0];
373  AudioHistogramContext *s = ctx->priv;
374  AVFrame *in;
375  int ret;
376 
377  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
378 
379  ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
380  if (ret < 0)
381  return ret;
382  if (ret > 0)
383  return filter_frame(inlink, in);
384 
385  FF_FILTER_FORWARD_STATUS(inlink, outlink);
386  FF_FILTER_FORWARD_WANTED(outlink, inlink);
387 
388  return FFERROR_NOT_READY;
389 }
390 
392 {
393  AudioHistogramContext *s = ctx->priv;
394  int i;
395 
396  av_frame_free(&s->out);
397  av_freep(&s->shistogram);
398  av_freep(&s->achistogram);
400  for (i = 0; i < 101; i++)
401  av_frame_free(&s->in[i]);
402 }
403 
404 static const AVFilterPad ahistogram_inputs[] = {
405  {
406  .name = "default",
407  .type = AVMEDIA_TYPE_AUDIO,
408  .config_props = config_input,
409  },
410  { NULL }
411 };
412 
413 static const AVFilterPad ahistogram_outputs[] = {
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_VIDEO,
417  .config_props = config_output,
418  },
419  { NULL }
420 };
421 
423  .name = "ahistogram",
424  .description = NULL_IF_CONFIG_SMALL("Convert input audio to histogram video output."),
425  .uninit = uninit,
426  .query_formats = query_formats,
427  .priv_size = sizeof(AudioHistogramContext),
428  .activate = activate,
429  .inputs = ahistogram_inputs,
430  .outputs = ahistogram_outputs,
431  .priv_class = &ahistogram_class,
432 };
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
static const AVFilterPad ahistogram_inputs[]
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
FF_FILTER_FORWARD_STATUS(inlink, outlink)
int num
Numerator.
Definition: rational.h:59
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
return FFERROR_NOT_READY
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
#define log2(x)
Definition: libm.h:404
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
DisplayScale
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
AmplitudeScale
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:112
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
#define av_cold
Definition: attributes.h:82
AVOptions.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static int query_formats(AVFilterContext *ctx)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
#define OFFSET(x)
static const AVOption ahistogram_options[]
static int config_output(AVFilterLink *outlink)
SlideMode
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define cbrt
Definition: tablegen.h:35
simple assert() macros that are a bit more flexible than ISO C assert().
static int config_input(AVFilterLink *inlink)
GLsizei count
Definition: opengl_enc.c:108
#define FFMAX(a, b)
Definition: common.h:94
#define FLAGS
AVFILTER_DEFINE_CLASS(ahistogram)
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
static int activate(AVFilterContext *ctx)
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
int n
Definition: avisynth_c.h:760
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:85
static const AVFilterPad ahistogram_outputs[]
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1500
HistogramMode
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
offset must point to AVRational
Definition: opt.h:236
const char * name
Filter name.
Definition: avfilter.h:148
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static av_cold void uninit(AVFilterContext *ctx)
int den
Denominator.
Definition: rational.h:60
FF_FILTER_FORWARD_WANTED(outlink, inlink)
#define H
Definition: pixlet.c:39
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
AVFilter ff_avf_ahistogram
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int height
Definition: frame.h:353
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
#define av_malloc_array(a, b)
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:410
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
DisplayMode