FFmpeg
vf_yadif.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
3  * 2010 James Darnley <james.darnley@gmail.com>
4 
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avassert.h"
23 #include "libavutil/cpu.h"
24 #include "libavutil/common.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/imgutils.h"
27 #include "avfilter.h"
28 #include "formats.h"
29 #include "internal.h"
30 #include "video.h"
31 #include "yadif.h"
32 
33 typedef struct ThreadData {
34  AVFrame *frame;
35  int plane;
36  int w, h;
37  int parity;
38  int tff;
39 } ThreadData;
40 
41 #define CHECK(j)\
42  { int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\
43  + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
44  + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\
45  if (score < spatial_score) {\
46  spatial_score= score;\
47  spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
48 
49 /* The is_not_edge argument here controls when the code will enter a branch
50  * which reads up to and including x-3 and x+3. */
51 
52 #define FILTER(start, end, is_not_edge) \
53  for (x = start; x < end; x++) { \
54  int c = cur[mrefs]; \
55  int d = (prev2[0] + next2[0])>>1; \
56  int e = cur[prefs]; \
57  int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
58  int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
59  int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
60  int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
61  int spatial_pred = (c+e) >> 1; \
62  \
63  if (is_not_edge) {\
64  int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \
65  + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \
66  CHECK(-1) CHECK(-2) }} }} \
67  CHECK( 1) CHECK( 2) }} }} \
68  }\
69  \
70  if (!(mode&2)) { \
71  int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
72  int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
73  int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
74  int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
75  \
76  diff = FFMAX3(diff, min, -max); \
77  } \
78  \
79  if (spatial_pred > d + diff) \
80  spatial_pred = d + diff; \
81  else if (spatial_pred < d - diff) \
82  spatial_pred = d - diff; \
83  \
84  dst[0] = spatial_pred; \
85  \
86  dst++; \
87  cur++; \
88  prev++; \
89  next++; \
90  prev2++; \
91  next2++; \
92  }
93 
94 static void filter_line_c(void *dst1,
95  void *prev1, void *cur1, void *next1,
96  int w, int prefs, int mrefs, int parity, int mode)
97 {
98  uint8_t *dst = dst1;
99  uint8_t *prev = prev1;
100  uint8_t *cur = cur1;
101  uint8_t *next = next1;
102  int x;
103  uint8_t *prev2 = parity ? prev : cur ;
104  uint8_t *next2 = parity ? cur : next;
105 
106  /* The function is called with the pointers already pointing to data[3] and
107  * with 6 subtracted from the width. This allows the FILTER macro to be
108  * called so that it processes all the pixels normally. A constant value of
109  * true for is_not_edge lets the compiler ignore the if statement. */
110  FILTER(0, w, 1)
111 }
112 
113 #define MAX_ALIGN 8
114 static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1,
115  int w, int prefs, int mrefs, int parity, int mode)
116 {
117  uint8_t *dst = dst1;
118  uint8_t *prev = prev1;
119  uint8_t *cur = cur1;
120  uint8_t *next = next1;
121  int x;
122  uint8_t *prev2 = parity ? prev : cur ;
123  uint8_t *next2 = parity ? cur : next;
124 
125  const int edge = MAX_ALIGN - 1;
126 
127  /* Only edge pixels need to be processed here. A constant value of false
128  * for is_not_edge should let the compiler ignore the whole branch. */
129  FILTER(0, 3, 0)
130 
131  dst = (uint8_t*)dst1 + w - edge;
132  prev = (uint8_t*)prev1 + w - edge;
133  cur = (uint8_t*)cur1 + w - edge;
134  next = (uint8_t*)next1 + w - edge;
135  prev2 = (uint8_t*)(parity ? prev : cur);
136  next2 = (uint8_t*)(parity ? cur : next);
137 
138  FILTER(w - edge, w - 3, 1)
139  FILTER(w - 3, w, 0)
140 }
141 
142 
143 static void filter_line_c_16bit(void *dst1,
144  void *prev1, void *cur1, void *next1,
145  int w, int prefs, int mrefs, int parity,
146  int mode)
147 {
148  uint16_t *dst = dst1;
149  uint16_t *prev = prev1;
150  uint16_t *cur = cur1;
151  uint16_t *next = next1;
152  int x;
153  uint16_t *prev2 = parity ? prev : cur ;
154  uint16_t *next2 = parity ? cur : next;
155  mrefs /= 2;
156  prefs /= 2;
157 
158  FILTER(0, w, 1)
159 }
160 
161 static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1,
162  int w, int prefs, int mrefs, int parity, int mode)
163 {
164  uint16_t *dst = dst1;
165  uint16_t *prev = prev1;
166  uint16_t *cur = cur1;
167  uint16_t *next = next1;
168  int x;
169  uint16_t *prev2 = parity ? prev : cur ;
170  uint16_t *next2 = parity ? cur : next;
171 
172  const int edge = MAX_ALIGN / 2 - 1;
173 
174  mrefs /= 2;
175  prefs /= 2;
176 
177  FILTER(0, 3, 0)
178 
179  dst = (uint16_t*)dst1 + w - edge;
180  prev = (uint16_t*)prev1 + w - edge;
181  cur = (uint16_t*)cur1 + w - edge;
182  next = (uint16_t*)next1 + w - edge;
183  prev2 = (uint16_t*)(parity ? prev : cur);
184  next2 = (uint16_t*)(parity ? cur : next);
185 
186  FILTER(w - edge, w - 3, 1)
187  FILTER(w - 3, w, 0)
188 }
189 
190 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
191 {
192  YADIFContext *s = ctx->priv;
193  ThreadData *td = arg;
194  int refs = s->cur->linesize[td->plane];
195  int df = (s->csp->comp[td->plane].depth + 7) / 8;
196  int pix_3 = 3 * df;
197  int slice_start = (td->h * jobnr ) / nb_jobs;
198  int slice_end = (td->h * (jobnr+1)) / nb_jobs;
199  int y;
200  int edge = 3 + MAX_ALIGN / df - 1;
201 
202  /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
203  * we need to call the c variant which avoids this for border pixels
204  */
205  for (y = slice_start; y < slice_end; y++) {
206  if ((y ^ td->parity) & 1) {
207  uint8_t *prev = &s->prev->data[td->plane][y * refs];
208  uint8_t *cur = &s->cur ->data[td->plane][y * refs];
209  uint8_t *next = &s->next->data[td->plane][y * refs];
210  uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]];
211  int mode = y == 1 || y + 2 == td->h ? 2 : s->mode;
212  s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3,
213  next + pix_3, td->w - edge,
214  y + 1 < td->h ? refs : -refs,
215  y ? -refs : refs,
216  td->parity ^ td->tff, mode);
217  s->filter_edges(dst, prev, cur, next, td->w,
218  y + 1 < td->h ? refs : -refs,
219  y ? -refs : refs,
220  td->parity ^ td->tff, mode);
221  } else {
222  memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]],
223  &s->cur->data[td->plane][y * refs], td->w * df);
224  }
225  }
226  return 0;
227 }
228 
229 static void filter(AVFilterContext *ctx, AVFrame *dstpic,
230  int parity, int tff)
231 {
232  YADIFContext *yadif = ctx->priv;
233  ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff };
234  int i;
235 
236  for (i = 0; i < yadif->csp->nb_components; i++) {
237  int w = dstpic->width;
238  int h = dstpic->height;
239 
240  if (i == 1 || i == 2) {
241  w = AV_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w);
242  h = AV_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h);
243  }
244 
245 
246  td.w = w;
247  td.h = h;
248  td.plane = i;
249 
250  ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ff_filter_get_nb_threads(ctx)));
251  }
252 
253  emms_c();
254 }
255 
257 {
258  YADIFContext *yadif = ctx->priv;
259 
260  av_frame_free(&yadif->prev);
261  av_frame_free(&yadif->cur );
262  av_frame_free(&yadif->next);
263 }
264 
266 {
267  static const enum AVPixelFormat pix_fmts[] = {
306  };
307 
308  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
309  if (!fmts_list)
310  return AVERROR(ENOMEM);
311  return ff_set_common_formats(ctx, fmts_list);
312 }
313 
315 {
316  AVFilterContext *ctx = link->src;
317  YADIFContext *s = ctx->priv;
318 
319  link->time_base.num = ctx->inputs[0]->time_base.num;
320  link->time_base.den = ctx->inputs[0]->time_base.den * 2;
321  link->w = ctx->inputs[0]->w;
322  link->h = ctx->inputs[0]->h;
323 
324  if(s->mode & 1)
325  link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
326  (AVRational){2, 1});
327 
328  if (link->w < 3 || link->h < 3) {
329  av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
330  return AVERROR(EINVAL);
331  }
332 
333  s->csp = av_pix_fmt_desc_get(link->format);
334  s->filter = filter;
335  if (s->csp->comp[0].depth > 8) {
338  } else {
341  }
342 
343  if (ARCH_X86)
345 
346  return 0;
347 }
348 
349 
350 static const AVClass yadif_class = {
351  .class_name = "yadif",
352  .item_name = av_default_item_name,
353  .option = ff_yadif_options,
354  .version = LIBAVUTIL_VERSION_INT,
355  .category = AV_CLASS_CATEGORY_FILTER,
356 };
357 
359  {
360  .name = "default",
361  .type = AVMEDIA_TYPE_VIDEO,
362  .filter_frame = ff_yadif_filter_frame,
363  },
364  { NULL }
365 };
366 
368  {
369  .name = "default",
370  .type = AVMEDIA_TYPE_VIDEO,
371  .request_frame = ff_yadif_request_frame,
372  .config_props = config_props,
373  },
374  { NULL }
375 };
376 
378  .name = "yadif",
379  .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
380  .priv_size = sizeof(YADIFContext),
381  .priv_class = &yadif_class,
382  .uninit = uninit,
384  .inputs = avfilter_vf_yadif_inputs,
385  .outputs = avfilter_vf_yadif_outputs,
387 };
#define NULL
Definition: coverity.c:32
static const AVFilterPad avfilter_vf_yadif_inputs[]
Definition: vf_yadif.c:358
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:397
#define MAX_ALIGN
Definition: vf_yadif.c:113
int tff
Definition: vf_bwdif.c:57
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
AVFrame * frame
Definition: dsddec.c:65
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
void(* filter)(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: yadif.h:64
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
int ff_yadif_request_frame(AVFilterLink *link)
Definition: yadif_common.c:159
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:403
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
static void filter_line_c(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:94
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
static const AVClass yadif_class
Definition: vf_yadif.c:350
AVFrame * cur
Definition: yadif.h:59
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:402
AVFrame * next
Definition: yadif.h:60
AVFrame * dst
Definition: vf_blend.c:55
int plane
Definition: vf_blend.c:57
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
static void filter(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff)
Definition: vf_yadif.c:229
static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:114
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
AVFrame * prev
Definition: yadif.h:61
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
int parity
Definition: vf_bwdif.c:56
const char * arg
Definition: jacosubdec.c:66
simple assert() macros that are a bit more flexible than ISO C assert().
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_yadif.c:190
static int query_formats(AVFilterContext *ctx)
Definition: vf_yadif.c:265
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
void(* filter_edges)(void *dst, void *prev, void *cur, void *next, int w, int prefs, int mrefs, int parity, int mode)
Definition: yadif.h:72
static const AVFilterPad avfilter_vf_yadif_outputs[]
Definition: vf_yadif.c:367
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:385
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVFormatContext * ctx
Definition: movenc.c:48
static int config_props(AVFilterLink *link)
Definition: vf_yadif.c:314
AVFilter ff_vf_yadif
Definition: vf_yadif.c:377
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:386
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:405
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:395
Used for passing data between threads.
Definition: dsddec.c:64
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
AVFrame * cur
Definition: vf_w3fdif.c:344
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
Definition: yadif_common.c:92
const char * name
Filter name.
Definition: avfilter.h:148
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:384
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:396
void(* filter_line)(void *dst, void *prev, void *cur, void *next, int w, int prefs, int mrefs, int parity, int mode)
Required alignment for filter_line.
Definition: yadif.h:69
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:404
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
const AVPixFmtDescriptor * csp
Definition: yadif.h:75
#define FILTER(start, end, is_not_edge)
Definition: vf_yadif.c:52
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
common internal and external API header
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:161
const AVOption ff_yadif_options[]
Definition: yadif_common.c:198
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
int den
Denominator.
Definition: rational.h:60
avfilter_execute_func * execute
Definition: internal.h:155
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2036
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_yadif.c:256
av_cold void ff_yadif_init_x86(YADIFContext *yadif)
Definition: vf_yadif_init.c:60
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int height
Definition: frame.h:353
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
#define df(A, B)
Definition: vf_xbr.c:90
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
int mode
YADIFMode.
Definition: yadif.h:53
static void filter_line_c_16bit(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode)
Definition: vf_yadif.c:143
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58