FFmpeg
vf_find_rect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 /**
22  * @todo switch to dualinput
23  */
24 
25 #include "libavutil/imgutils.h"
26 #include "libavutil/opt.h"
27 #include "internal.h"
28 
29 #include "lavfutils.h"
30 
31 #define MAX_MIPMAPS 5
32 
33 typedef struct FOCContext {
34  AVClass *class;
35  float threshold;
36  int mipmaps;
37  int xmin, ymin, xmax, ymax;
38  char *obj_filename;
39  int last_x, last_y;
43  int discard;
44 } FOCContext;
45 
46 #define OFFSET(x) offsetof(FOCContext, x)
47 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
48 static const AVOption find_rect_options[] = {
49  { "object", "object bitmap filename", OFFSET(obj_filename), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
50  { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl = 0.5}, 0, 1.0, FLAGS },
51  { "mipmaps", "set mipmaps", OFFSET(mipmaps), AV_OPT_TYPE_INT, {.i64 = 3}, 1, MAX_MIPMAPS, FLAGS },
52  { "xmin", "", OFFSET(xmin), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
53  { "ymin", "", OFFSET(ymin), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
54  { "xmax", "", OFFSET(xmax), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
55  { "ymax", "", OFFSET(ymax), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
56  { "discard", "", OFFSET(discard), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
57  { NULL }
58 };
59 
60 AVFILTER_DEFINE_CLASS(find_rect);
61 
63 {
64  static const enum AVPixelFormat pix_fmts[] = {
68  };
69 
70  return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
71 }
72 
74 {
75  int x, y;
77  uint8_t *src, *dst;
78  if (!frame)
79  return NULL;
80 
81  frame->format = in->format;
82  frame->width = (in->width + 1) / 2;
83  frame->height = (in->height+ 1) / 2;
84 
85  if (av_frame_get_buffer(frame, 0) < 0) {
86  av_frame_free(&frame);
87  return NULL;
88  }
89  src = in ->data[0];
90  dst = frame->data[0];
91 
92  for(y = 0; y < frame->height; y++) {
93  for(x = 0; x < frame->width; x++) {
94  dst[x] = ( src[2*x+0]
95  + src[2*x+1]
96  + src[2*x+0 + in->linesize[0]]
97  + src[2*x+1 + in->linesize[0]]
98  + 2) >> 2;
99  }
100  src += 2*in->linesize[0];
101  dst += frame->linesize[0];
102  }
103  return frame;
104 }
105 
106 static float compare(const AVFrame *haystack, const AVFrame *obj, int offx, int offy)
107 {
108  int x,y;
109  int o_sum_v = 0;
110  int h_sum_v = 0;
111  int64_t oo_sum_v = 0;
112  int64_t hh_sum_v = 0;
113  int64_t oh_sum_v = 0;
114  float c;
115  int n = obj->height * obj->width;
116  const uint8_t *odat = obj ->data[0];
117  const uint8_t *hdat = haystack->data[0] + offx + offy * haystack->linesize[0];
118  int64_t o_sigma, h_sigma;
119 
120  for(y = 0; y < obj->height; y++) {
121  for(x = 0; x < obj->width; x++) {
122  int o_v = odat[x];
123  int h_v = hdat[x];
124  o_sum_v += o_v;
125  h_sum_v += h_v;
126  oo_sum_v += o_v * o_v;
127  hh_sum_v += h_v * h_v;
128  oh_sum_v += o_v * h_v;
129  }
130  odat += obj->linesize[0];
131  hdat += haystack->linesize[0];
132  }
133  o_sigma = n*oo_sum_v - o_sum_v*(int64_t)o_sum_v;
134  h_sigma = n*hh_sum_v - h_sum_v*(int64_t)h_sum_v;
135 
136  if (o_sigma == 0 || h_sigma == 0)
137  return 1.0;
138 
139  c = (n*oh_sum_v - o_sum_v*(int64_t)h_sum_v) / (sqrt(o_sigma)*sqrt(h_sigma));
140 
141  return 1 - fabs(c);
142 }
143 
145 {
146  AVFilterContext *ctx = inlink->dst;
147  FOCContext *foc = ctx->priv;
148 
149  if (foc->xmax <= 0)
150  foc->xmax = inlink->w - foc->obj_frame->width;
151  if (foc->ymax <= 0)
152  foc->ymax = inlink->h - foc->obj_frame->height;
153 
154  return 0;
155 }
156 
157 static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
158 {
159  int x, y;
160 
161  if (pass + 1 <= maxpass) {
162  int sub_x, sub_y;
163  search(foc, pass+1, maxpass, xmin>>1, (xmax+1)>>1, ymin>>1, (ymax+1)>>1, &sub_x, &sub_y, 2.0);
164  xmin = FFMAX(xmin, 2*sub_x - 4);
165  xmax = FFMIN(xmax, 2*sub_x + 4);
166  ymin = FFMAX(ymin, 2*sub_y - 4);
167  ymax = FFMIN(ymax, 2*sub_y + 4);
168  }
169 
170  for (y = ymin; y <= ymax; y++) {
171  for (x = xmin; x <= xmax; x++) {
172  float score = compare(foc->haystack_frame[pass], foc->needle_frame[pass], x, y);
173  if (score < best_score) {
174  best_score = score;
175  *best_x = x;
176  *best_y = y;
177  }
178  }
179  }
180  return best_score;
181 }
182 
184 {
185  AVFilterContext *ctx = inlink->dst;
186  FOCContext *foc = ctx->priv;
187  float best_score;
188  int best_x, best_y;
189  int i;
190  char buf[32];
191 
192  foc->haystack_frame[0] = av_frame_clone(in);
193  for (i=1; i<foc->mipmaps; i++) {
194  foc->haystack_frame[i] = downscale(foc->haystack_frame[i-1]);
195  }
196 
197  best_score = search(foc, 0, 0,
198  FFMAX(foc->xmin, foc->last_x - 8),
199  FFMIN(foc->xmax, foc->last_x + 8),
200  FFMAX(foc->ymin, foc->last_y - 8),
201  FFMIN(foc->ymax, foc->last_y + 8),
202  &best_x, &best_y, 2.0);
203 
204  best_score = search(foc, 0, foc->mipmaps - 1, foc->xmin, foc->xmax, foc->ymin, foc->ymax,
205  &best_x, &best_y, best_score);
206 
207  for (i=0; i<MAX_MIPMAPS; i++) {
208  av_frame_free(&foc->haystack_frame[i]);
209  }
210 
211  if (best_score > foc->threshold) {
212  if (foc->discard) {
213  av_frame_free(&in);
214  return 0;
215  } else {
216  return ff_filter_frame(ctx->outputs[0], in);
217  }
218  }
219 
220  av_log(ctx, AV_LOG_INFO, "Found at n=%"PRId64" pts_time=%f x=%d y=%d with score=%f\n",
221  inlink->frame_count_out, TS2D(in->pts) * av_q2d(inlink->time_base),
222  best_x, best_y, best_score);
223  foc->last_x = best_x;
224  foc->last_y = best_y;
225 
226  snprintf(buf, sizeof(buf), "%f", best_score);
227 
229 
230  av_dict_set_int(&in->metadata, "lavfi.rect.w", foc->obj_frame->width, 0);
231  av_dict_set_int(&in->metadata, "lavfi.rect.h", foc->obj_frame->height, 0);
232  av_dict_set_int(&in->metadata, "lavfi.rect.x", best_x, 0);
233  av_dict_set_int(&in->metadata, "lavfi.rect.y", best_y, 0);
234  av_dict_set(&in->metadata, "lavfi.rect.score", buf, 0);
235 
236  return ff_filter_frame(ctx->outputs[0], in);
237 }
238 
240 {
241  FOCContext *foc = ctx->priv;
242  int i;
243 
244  for (i = 0; i < MAX_MIPMAPS; i++) {
245  av_frame_free(&foc->needle_frame[i]);
246  av_frame_free(&foc->haystack_frame[i]);
247  }
248 
249  if (foc->obj_frame)
250  av_freep(&foc->obj_frame->data[0]);
251  av_frame_free(&foc->obj_frame);
252 }
253 
255 {
256  FOCContext *foc = ctx->priv;
257  int ret, i;
258 
259  if (!foc->obj_filename) {
260  av_log(ctx, AV_LOG_ERROR, "object filename not set\n");
261  return AVERROR(EINVAL);
262  }
263 
264  foc->obj_frame = av_frame_alloc();
265  if (!foc->obj_frame)
266  return AVERROR(ENOMEM);
267 
268  if ((ret = ff_load_image(foc->obj_frame->data, foc->obj_frame->linesize,
269  &foc->obj_frame->width, &foc->obj_frame->height,
270  &foc->obj_frame->format, foc->obj_filename, ctx)) < 0)
271  return ret;
272 
273  if (foc->obj_frame->format != AV_PIX_FMT_GRAY8) {
274  av_log(ctx, AV_LOG_ERROR, "object image is not a grayscale image\n");
275  return AVERROR(EINVAL);
276  }
277 
278  foc->needle_frame[0] = av_frame_clone(foc->obj_frame);
279  for (i = 1; i < foc->mipmaps; i++) {
280  foc->needle_frame[i] = downscale(foc->needle_frame[i-1]);
281  if (!foc->needle_frame[i])
282  return AVERROR(ENOMEM);
283  }
284 
285  return 0;
286 }
287 
288 static const AVFilterPad foc_inputs[] = {
289  {
290  .name = "default",
291  .type = AVMEDIA_TYPE_VIDEO,
292  .config_props = config_input,
293  .filter_frame = filter_frame,
294  },
295  { NULL }
296 };
297 
298 static const AVFilterPad foc_outputs[] = {
299  {
300  .name = "default",
301  .type = AVMEDIA_TYPE_VIDEO,
302  },
303  { NULL }
304 };
305 
307  .name = "find_rect",
308  .description = NULL_IF_CONFIG_SMALL("Find a user specified object."),
309  .priv_size = sizeof(FOCContext),
310  .init = init,
311  .uninit = uninit,
313  .inputs = foc_inputs,
314  .outputs = foc_outputs,
315  .priv_class = &find_rect_class,
316 };
static float compare(const AVFrame *haystack, const AVFrame *obj, int offx, int offy)
Definition: vf_find_rect.c:106
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVOption.
Definition: opt.h:248
misc image utilities
AVFilter ff_vf_find_rect
Definition: vf_find_rect.c:306
#define OFFSET(x)
Definition: vf_find_rect.c:46
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_find_rect.c:183
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1094
uint8_t
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:191
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
Definition: vf_find_rect.c:157
AVOptions.
#define FLAGS
Definition: vf_find_rect.c:47
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AVFrame * obj_frame
Definition: vf_find_rect.c:40
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_find_rect.c:239
AVDictionary * metadata
metadata.
Definition: frame.h:604
static const AVFilterPad foc_outputs[]
Definition: vf_find_rect.c:298
static AVFrame * downscale(AVFrame *in)
Definition: vf_find_rect.c:73
#define MAX_MIPMAPS
Definition: vf_find_rect.c:31
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
static const AVFilterPad foc_inputs[]
Definition: vf_find_rect.c:288
static const AVOption find_rect_options[]
Definition: vf_find_rect.c:48
#define src
Definition: vp8dsp.c:255
int width
Definition: frame.h:376
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:117
void * priv
private data for use by the filter
Definition: avfilter.h:356
#define FFMAX(a, b)
Definition: common.h:103
#define pass
Definition: fft_template.c:603
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define FFMIN(a, b)
Definition: common.h:105
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_load_image(uint8_t *data[4], int linesize[4], int *w, int *h, enum AVPixelFormat *pix_fmt, const char *filename, void *log_ctx)
Load image from filename and put the resulting image in data.
Definition: lavfutils.c:25
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:391
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AVFrame * haystack_frame[MAX_MIPMAPS]
Definition: vf_find_rect.c:42
#define TS2D(ts)
Definition: internal.h:208
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
static av_cold int init(AVFilterContext *ctx)
Definition: vf_find_rect.c:254
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
AVFILTER_DEFINE_CLASS(find_rect)
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:338
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:612
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
float threshold
Definition: vf_find_rect.c:35
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
static int config_input(AVFilterLink *inlink)
Definition: vf_find_rect.c:144
static int query_formats(AVFilterContext *ctx)
Definition: vf_find_rect.c:62
An instance of a filter.
Definition: avfilter.h:341
int height
Definition: frame.h:376
#define av_freep(p)
char * obj_filename
Definition: vf_find_rect.c:38
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int i
Definition: input.c:407
Miscellaneous utilities which make use of the libavformat library.
AVFrame * needle_frame[MAX_MIPMAPS]
Definition: vf_find_rect.c:41