FFmpeg
vf_photosensitivity.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Vladimir Panteleev
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "avfilter.h"
27 
28 #include "formats.h"
29 #include "internal.h"
30 #include "video.h"
31 
32 #define MAX_FRAMES 240
33 #define GRID_SIZE 8
34 #define NUM_CHANNELS 3
35 
36 typedef struct PhotosensitivityFrame {
39 
40 typedef struct PhotosensitivityContext {
41  const AVClass *class;
42 
43  int nb_frames;
44  int skip;
46  int bypass;
47 
49 
50  /* Circular buffer */
51  int history[MAX_FRAMES];
53 
57 
58 #define OFFSET(x) offsetof(PhotosensitivityContext, x)
59 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
60 
62  { "frames", "set how many frames to use", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64=30}, 2, MAX_FRAMES, FLAGS },
63  { "f", "set how many frames to use", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64=30}, 2, MAX_FRAMES, FLAGS },
64  { "threshold", "set detection threshold factor (lower is stricter)", OFFSET(threshold_multiplier), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX, FLAGS },
65  { "t", "set detection threshold factor (lower is stricter)", OFFSET(threshold_multiplier), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX, FLAGS },
66  { "skip", "set pixels to skip when sampling frames", OFFSET(skip), AV_OPT_TYPE_INT, {.i64=1}, 1, 1024, FLAGS },
67  { "bypass", "leave frames unchanged", OFFSET(bypass), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
68  { NULL }
69 };
70 
71 AVFILTER_DEFINE_CLASS(photosensitivity);
72 
74 {
75  static const enum AVPixelFormat pixel_fmts[] = {
79  };
81  if (!formats)
82  return AVERROR(ENOMEM);
83  return ff_set_common_formats(ctx, formats);
84 }
85 
87 {
90  int skip;
92 
93 #define NUM_CELLS (GRID_SIZE * GRID_SIZE)
94 
95 static int convert_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
96 {
97  int cell, gx, gy, x0, x1, y0, y1, x, y, c, area;
98  int sum[NUM_CHANNELS];
99  const uint8_t *p;
100 
102 
103  const int slice_start = (NUM_CELLS * jobnr) / nb_jobs;
104  const int slice_end = (NUM_CELLS * (jobnr+1)) / nb_jobs;
105 
106  int width = td->in->width, height = td->in->height, linesize = td->in->linesize[0], skip = td->skip;
107  const uint8_t *data = td->in->data[0];
108 
109  for (cell = slice_start; cell < slice_end; cell++) {
110  gx = cell % GRID_SIZE;
111  gy = cell / GRID_SIZE;
112 
113  x0 = width * gx / GRID_SIZE;
114  x1 = width * (gx+1) / GRID_SIZE;
115  y0 = height * gy / GRID_SIZE;
116  y1 = height * (gy+1) / GRID_SIZE;
117 
118  for (c = 0; c < NUM_CHANNELS; c++) {
119  sum[c] = 0;
120  }
121  for (y = y0; y < y1; y += skip) {
122  p = data + y * linesize + x0 * NUM_CHANNELS;
123  for (x = x0; x < x1; x += skip) {
124  //av_log(NULL, AV_LOG_VERBOSE, "%d %d %d : (%d,%d) (%d,%d) -> %d,%d | *%d\n", c, gx, gy, x0, y0, x1, y1, x, y, (int)row);
125  sum[0] += p[0];
126  sum[1] += p[1];
127  sum[2] += p[2];
128  p += NUM_CHANNELS * skip;
129  // TODO: variable size
130  }
131  }
132 
133  area = ((x1 - x0 + skip - 1) / skip) * ((y1 - y0 + skip - 1) / skip);
134  for (c = 0; c < NUM_CHANNELS; c++) {
135  if (area)
136  sum[c] /= area;
137  td->out->grid[gy][gx][c] = sum[c];
138  }
139  }
140  return 0;
141 }
142 
144 {
146  td.in = in;
147  td.out = out;
148  td.skip = skip;
150 }
151 
153 {
156  uint16_t s_mul;
158 
159 static int blend_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
160 {
161  int x, y;
162  uint8_t *t, *s;
163 
165  const uint16_t s_mul = td->s_mul;
166  const uint16_t t_mul = 0x100 - s_mul;
167  const int slice_start = (td->target->height * jobnr) / nb_jobs;
168  const int slice_end = (td->target->height * (jobnr+1)) / nb_jobs;
169  const int linesize = td->target->linesize[0];
170 
171  for (y = slice_start; y < slice_end; y++) {
172  t = td->target->data[0] + y * td->target->linesize[0];
173  s = td->source->data[0] + y * td->source->linesize[0];
174  for (x = 0; x < linesize; x++) {
175  *t = (*t * t_mul + *s * s_mul) >> 8;
176  t++; s++;
177  }
178  }
179  return 0;
180 }
181 
182 static void blend_frame(AVFilterContext *ctx, AVFrame *target, AVFrame *source, float factor)
183 {
185  td.target = target;
186  td.source = source;
187  td.s_mul = (uint16_t)(factor * 0x100);
188  ctx->internal->execute(ctx, blend_frame_partial, &td, NULL, FFMIN(ctx->outputs[0]->h, ff_filter_get_nb_threads(ctx)));
189 }
190 
192 {
193  int badness, x, y, c;
194  badness = 0;
195  for (c = 0; c < NUM_CHANNELS; c++) {
196  for (y = 0; y < GRID_SIZE; y++) {
197  for (x = 0; x < GRID_SIZE; x++) {
198  badness += abs((int)a->grid[y][x][c] - (int)b->grid[y][x][c]);
199  //av_log(NULL, AV_LOG_VERBOSE, "%d - %d -> %d \n", a->grid[y][x], b->grid[y][x], badness);
200  //av_log(NULL, AV_LOG_VERBOSE, "%d -> %d \n", abs((int)a->grid[y][x] - (int)b->grid[y][x]), badness);
201  }
202  }
203  }
204  return badness;
205 }
206 
208 {
209  /* const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); */
210  AVFilterContext *ctx = inlink->dst;
212 
213  s->badness_threshold = (int)(GRID_SIZE * GRID_SIZE * 4 * 256 * s->nb_frames * s->threshold_multiplier / 128);
214 
215  return 0;
216 }
217 
219 {
220  int this_badness, current_badness, fixed_badness, new_badness, i, res;
222  AVFrame *src, *out;
223  int free_in = 0;
224  float factor;
225  AVDictionary **metadata;
226 
227  AVFilterContext *ctx = inlink->dst;
228  AVFilterLink *outlink = ctx->outputs[0];
230 
231  /* weighted moving average */
232  current_badness = 0;
233  for (i = 1; i < s->nb_frames; i++)
234  current_badness += i * s->history[(s->history_pos + i) % s->nb_frames];
235  current_badness /= s->nb_frames;
236 
237  convert_frame(ctx, in, &ef, s->skip);
238  this_badness = get_badness(&ef, &s->last_frame_e);
239  new_badness = current_badness + this_badness;
240  av_log(s, AV_LOG_VERBOSE, "badness: %6d -> %6d / %6d (%3d%% - %s)\n",
241  current_badness, new_badness, s->badness_threshold,
242  100 * new_badness / s->badness_threshold, new_badness < s->badness_threshold ? "OK" : "EXCEEDED");
243 
244  fixed_badness = new_badness;
245  if (new_badness < s->badness_threshold || !s->last_frame_av || s->bypass) {
246  factor = 1; /* for metadata */
248  s->last_frame_av = src = in;
249  s->last_frame_e = ef;
250  s->history[s->history_pos] = this_badness;
251  } else {
252  factor = (float)(s->badness_threshold - current_badness) / (new_badness - current_badness);
253  if (factor <= 0) {
254  /* just duplicate the frame */
255  s->history[s->history_pos] = 0; /* frame was duplicated, thus, delta is zero */
256  } else {
258  if (res) {
259  av_frame_free(&in);
260  return res;
261  }
262  blend_frame(ctx, s->last_frame_av, in, factor);
263 
264  convert_frame(ctx, s->last_frame_av, &ef, s->skip);
265  this_badness = get_badness(&ef, &s->last_frame_e);
266  fixed_badness = current_badness + this_badness;
267  av_log(s, AV_LOG_VERBOSE, " fixed: %6d -> %6d / %6d (%3d%%) factor=%5.3f\n",
268  current_badness, fixed_badness, s->badness_threshold,
269  100 * new_badness / s->badness_threshold, factor);
270  s->last_frame_e = ef;
271  s->history[s->history_pos] = this_badness;
272  }
273  src = s->last_frame_av;
274  free_in = 1;
275  }
276  s->history_pos = (s->history_pos + 1) % s->nb_frames;
277 
278  out = ff_get_video_buffer(outlink, in->width, in->height);
279  if (!out) {
280  if (free_in == 1)
281  av_frame_free(&in);
282  return AVERROR(ENOMEM);
283  }
284  av_frame_copy_props(out, in);
285  metadata = &out->metadata;
286  if (metadata) {
287  char value[128];
288 
289  snprintf(value, sizeof(value), "%f", (float)new_badness / s->badness_threshold);
290  av_dict_set(metadata, "lavfi.photosensitivity.badness", value, 0);
291 
292  snprintf(value, sizeof(value), "%f", (float)fixed_badness / s->badness_threshold);
293  av_dict_set(metadata, "lavfi.photosensitivity.fixed-badness", value, 0);
294 
295  snprintf(value, sizeof(value), "%f", (float)this_badness / s->badness_threshold);
296  av_dict_set(metadata, "lavfi.photosensitivity.frame-badness", value, 0);
297 
298  snprintf(value, sizeof(value), "%f", factor);
299  av_dict_set(metadata, "lavfi.photosensitivity.factor", value, 0);
300  }
301  av_frame_copy(out, src);
302  if (free_in == 1)
303  av_frame_free(&in);
304  return ff_filter_frame(outlink, out);
305 }
306 
308 {
310 
312 }
313 
314 static const AVFilterPad inputs[] = {
315  {
316  .name = "default",
317  .type = AVMEDIA_TYPE_VIDEO,
318  .filter_frame = filter_frame,
319  .config_props = config_input,
320  },
321  { NULL }
322 };
323 
324 static const AVFilterPad outputs[] = {
325  {
326  .name = "default",
327  .type = AVMEDIA_TYPE_VIDEO,
328  },
329  { NULL }
330 };
331 
333  .name = "photosensitivity",
334  .description = NULL_IF_CONFIG_SMALL("Filter out photosensitive epilepsy seizure-inducing flashes."),
335  .priv_size = sizeof(PhotosensitivityContext),
336  .priv_class = &photosensitivity_class,
337  .uninit = uninit,
339  .inputs = inputs,
340  .outputs = outputs,
341 };
#define NULL
Definition: coverity.c:32
static int query_formats(AVFilterContext *ctx)
static void convert_frame(AVFilterContext *ctx, AVFrame *in, PhotosensitivityFrame *out, int skip)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
PhotosensitivityFrame last_frame_e
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
misc image utilities
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
static void blend_frame(AVFilterContext *ctx, AVFrame *target, AVFrame *source, float factor)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
static const AVOption photosensitivity_options[]
static int config_input(AVFilterLink *inlink)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
static const AVFilterPad inputs[]
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
#define OFFSET(x)
uint8_t grid[GRID_SIZE][GRID_SIZE][4]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
#define NUM_CELLS
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
#define height
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
PhotosensitivityFrame * out
AVDictionary * metadata
metadata.
Definition: frame.h:581
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
#define MAX_FRAMES
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define td
Definition: regdef.h:70
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
const char * arg
Definition: jacosubdec.c:66
static int blend_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int get_badness(PhotosensitivityFrame *a, PhotosensitivityFrame *b)
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
AVFilter ff_vf_photosensitivity
#define b
Definition: input.c:41
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define FFMIN(a, b)
Definition: common.h:96
#define width
In the ELBG jargon, a cell is the set of points that are closest to a codebook entry.
Definition: elbg.c:39
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFormatContext * ctx
Definition: movenc.c:48
static av_cold void uninit(AVFilterContext *ctx)
#define s(width, name)
Definition: cbs_vp9.c:257
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define abs(x)
Definition: cuda_runtime.h:35
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
AVFILTER_DEFINE_CLASS(photosensitivity)
static const AVFilterPad outputs[]
static const int factor[16]
Definition: vf_pp7.c:75
const char * name
Filter name.
Definition: avfilter.h:148
static int convert_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int
avfilter_execute_func * execute
Definition: internal.h:155
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2036
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
#define FLAGS
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define NUM_CHANNELS
#define GRID_SIZE