FFmpeg
vf_pad.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008 vmrsss
3  * Copyright (c) 2009 Stefano Sabatini
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * video padding filter
25  */
26 
27 #include <float.h> /* DBL_MAX */
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "video.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/common.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/colorspace.h"
38 #include "libavutil/imgutils.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/mathematics.h"
41 #include "libavutil/opt.h"
42 
43 #include "drawutils.h"
44 
45 static const char *const var_names[] = {
46  "in_w", "iw",
47  "in_h", "ih",
48  "out_w", "ow",
49  "out_h", "oh",
50  "x",
51  "y",
52  "a",
53  "sar",
54  "dar",
55  "hsub",
56  "vsub",
57  NULL
58 };
59 
60 enum var_name {
73 };
74 
76 {
78 }
79 
80 enum EvalMode {
84 };
85 
86 typedef struct PadContext {
87  const AVClass *class;
88  int w, h; ///< output dimensions, a value of 0 will result in the input size
89  int x, y; ///< offsets of the input area with respect to the padded area
90  int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues
93 
94  char *w_expr; ///< width expression string
95  char *h_expr; ///< height expression string
96  char *x_expr; ///< width expression string
97  char *y_expr; ///< height expression string
98  uint8_t rgba_color[4]; ///< color for the padding area
101 
102  int eval_mode; ///< expression evaluation mode
103 } PadContext;
104 
106 {
107  AVFilterContext *ctx = inlink->dst;
108  PadContext *s = ctx->priv;
109  AVRational adjusted_aspect = s->aspect;
110  int ret;
111  double var_values[VARS_NB], res;
112  char *expr;
113 
114  ff_draw_init(&s->draw, inlink->format, 0);
115  ff_draw_color(&s->draw, &s->color, s->rgba_color);
116 
117  var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
118  var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
119  var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
120  var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
121  var_values[VAR_A] = (double) inlink->w / inlink->h;
122  var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
123  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
124  var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
125  var_values[VAR_HSUB] = 1 << s->draw.hsub_max;
126  var_values[VAR_VSUB] = 1 << s->draw.vsub_max;
127 
128  /* evaluate width and height */
129  av_expr_parse_and_eval(&res, (expr = s->w_expr),
130  var_names, var_values,
131  NULL, NULL, NULL, NULL, NULL, 0, ctx);
132  s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
133  if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
134  var_names, var_values,
135  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
136  goto eval_fail;
137  s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
138  if (!s->h)
139  var_values[VAR_OUT_H] = var_values[VAR_OH] = s->h = inlink->h;
140 
141  /* evaluate the width again, as it may depend on the evaluated output height */
142  if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
143  var_names, var_values,
144  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
145  goto eval_fail;
146  s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
147  if (!s->w)
148  var_values[VAR_OUT_W] = var_values[VAR_OW] = s->w = inlink->w;
149 
150  if (adjusted_aspect.num && adjusted_aspect.den) {
151  adjusted_aspect = av_div_q(adjusted_aspect, inlink->sample_aspect_ratio);
152  if (s->h < av_rescale(s->w, adjusted_aspect.den, adjusted_aspect.num)) {
153  s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = av_rescale(s->w, adjusted_aspect.den, adjusted_aspect.num);
154  } else {
155  s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = av_rescale(s->h, adjusted_aspect.num, adjusted_aspect.den);
156  }
157  }
158 
159  /* evaluate x and y */
160  av_expr_parse_and_eval(&res, (expr = s->x_expr),
161  var_names, var_values,
162  NULL, NULL, NULL, NULL, NULL, 0, ctx);
163  s->x = var_values[VAR_X] = res;
164  if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
165  var_names, var_values,
166  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
167  goto eval_fail;
168  s->y = var_values[VAR_Y] = res;
169  /* evaluate x again, as it may depend on the evaluated y value */
170  if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
171  var_names, var_values,
172  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
173  goto eval_fail;
174  s->x = var_values[VAR_X] = res;
175 
176  if (s->x < 0 || s->x + inlink->w > s->w)
177  s->x = var_values[VAR_X] = (s->w - inlink->w) / 2;
178  if (s->y < 0 || s->y + inlink->h > s->h)
179  s->y = var_values[VAR_Y] = (s->h - inlink->h) / 2;
180 
181  /* sanity check params */
182  if (s->w < 0 || s->h < 0) {
183  av_log(ctx, AV_LOG_ERROR, "Negative values are not acceptable.\n");
184  return AVERROR(EINVAL);
185  }
186 
187  s->w = ff_draw_round_to_sub(&s->draw, 0, -1, s->w);
188  s->h = ff_draw_round_to_sub(&s->draw, 1, -1, s->h);
189  s->x = ff_draw_round_to_sub(&s->draw, 0, -1, s->x);
190  s->y = ff_draw_round_to_sub(&s->draw, 1, -1, s->y);
191  s->in_w = ff_draw_round_to_sub(&s->draw, 0, -1, inlink->w);
192  s->in_h = ff_draw_round_to_sub(&s->draw, 1, -1, inlink->h);
193  s->inlink_w = inlink->w;
194  s->inlink_h = inlink->h;
195 
196  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X\n",
197  inlink->w, inlink->h, s->w, s->h, s->x, s->y,
198  s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], s->rgba_color[3]);
199 
200  if (s->x < 0 || s->y < 0 ||
201  s->w <= 0 || s->h <= 0 ||
202  (unsigned)s->x + (unsigned)inlink->w > s->w ||
203  (unsigned)s->y + (unsigned)inlink->h > s->h) {
204  av_log(ctx, AV_LOG_ERROR,
205  "Input area %d:%d:%d:%d not within the padded area 0:0:%d:%d or zero-sized\n",
206  s->x, s->y, s->x + inlink->w, s->y + inlink->h, s->w, s->h);
207  return AVERROR(EINVAL);
208  }
209 
210  return 0;
211 
212 eval_fail:
213  av_log(ctx, AV_LOG_ERROR,
214  "Error when evaluating the expression '%s'\n", expr);
215  return ret;
216 
217 }
218 
219 static int config_output(AVFilterLink *outlink)
220 {
221  PadContext *s = outlink->src->priv;
222 
223  outlink->w = s->w;
224  outlink->h = s->h;
225  return 0;
226 }
227 
229 {
230  PadContext *s = inlink->dst->priv;
231  AVFrame *frame;
232  int plane;
233 
234  if (s->inlink_w <= 0)
235  return NULL;
236 
237  frame = ff_get_video_buffer(inlink->dst->outputs[0],
238  w + (s->w - s->in_w),
239  h + (s->h - s->in_h) + (s->x > 0));
240 
241  if (!frame)
242  return NULL;
243 
244  frame->width = w;
245  frame->height = h;
246 
247  for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
248  int hsub = s->draw.hsub[plane];
249  int vsub = s->draw.vsub[plane];
250  frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
251  (s->y >> vsub) * frame->linesize[plane];
252  }
253 
254  return frame;
255 }
256 
257 /* check whether each plane in this buffer can be padded without copying */
259 {
260  int planes[4] = { -1, -1, -1, -1}, *p = planes;
261  int i, j;
262 
263  /* get all planes in this buffer */
264  for (i = 0; i < FF_ARRAY_ELEMS(planes) && frame->data[i]; i++) {
265  if (av_frame_get_plane_buffer(frame, i) == buf)
266  *p++ = i;
267  }
268 
269  /* for each plane in this buffer, check that it can be padded without
270  * going over buffer bounds or other planes */
271  for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) {
272  int hsub = s->draw.hsub[planes[i]];
273  int vsub = s->draw.vsub[planes[i]];
274 
275  uint8_t *start = frame->data[planes[i]];
276  uint8_t *end = start + (frame->height >> vsub) *
277  frame->linesize[planes[i]];
278 
279  /* amount of free space needed before the start and after the end
280  * of the plane */
281  ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] +
282  (s->y >> vsub) * frame->linesize[planes[i]];
283  ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) *
284  s->draw.pixelstep[planes[i]] +
285  ((s->h - s->y - frame->height) >> vsub) * frame->linesize[planes[i]];
286 
287  if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]])
288  return 1;
289  if (start - buf->data < req_start ||
290  (buf->data + buf->size) - end < req_end)
291  return 1;
292 
293  for (j = 0; j < FF_ARRAY_ELEMS(planes) && planes[j] >= 0; j++) {
294  int vsub1 = s->draw.vsub[planes[j]];
295  uint8_t *start1 = frame->data[planes[j]];
296  uint8_t *end1 = start1 + (frame->height >> vsub1) *
297  frame->linesize[planes[j]];
298  if (i == j)
299  continue;
300 
301  if (FFSIGN(start - end1) != FFSIGN(start - end1 - req_start) ||
302  FFSIGN(end - start1) != FFSIGN(end - start1 + req_end))
303  return 1;
304  }
305  }
306 
307  return 0;
308 }
309 
311 {
312  int i;
313 
314  if (!av_frame_is_writable(frame))
315  return 1;
316 
317  for (i = 0; i < 4 && frame->buf[i]; i++)
318  if (buffer_needs_copy(s, frame, frame->buf[i]))
319  return 1;
320  return 0;
321 }
322 
324 {
325  PadContext *s = inlink->dst->priv;
326  AVFilterLink *outlink = inlink->dst->outputs[0];
327  AVFrame *out;
328  int needs_copy;
329  if(s->eval_mode == EVAL_MODE_FRAME && (
330  in->width != s->inlink_w
331  || in->height != s->inlink_h
332  || in->format != outlink->format
334  int ret;
335 
336  inlink->dst->inputs[0]->format = in->format;
337  inlink->dst->inputs[0]->w = in->width;
338  inlink->dst->inputs[0]->h = in->height;
339 
342 
343 
344  if ((ret = config_input(inlink)) < 0) {
345  s->inlink_w = -1;
346  return ret;
347  }
348  if ((ret = config_output(outlink)) < 0) {
349  s->inlink_w = -1;
350  return ret;
351  }
352  }
353 
354  needs_copy = frame_needs_copy(s, in);
355 
356  if (needs_copy) {
357  av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
358  out = ff_get_video_buffer(inlink->dst->outputs[0],
359  FFMAX(inlink->w, s->w),
360  FFMAX(inlink->h, s->h));
361  if (!out) {
362  av_frame_free(&in);
363  return AVERROR(ENOMEM);
364  }
365 
366  av_frame_copy_props(out, in);
367  } else {
368  int i;
369 
370  out = in;
371  for (i = 0; i < 4 && out->data[i] && out->linesize[i]; i++) {
372  int hsub = s->draw.hsub[i];
373  int vsub = s->draw.vsub[i];
374  out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] +
375  (s->y >> vsub) * out->linesize[i];
376  }
377  }
378 
379  /* top bar */
380  if (s->y) {
381  ff_fill_rectangle(&s->draw, &s->color,
382  out->data, out->linesize,
383  0, 0, s->w, s->y);
384  }
385 
386  /* bottom bar */
387  if (s->h > s->y + s->in_h) {
388  ff_fill_rectangle(&s->draw, &s->color,
389  out->data, out->linesize,
390  0, s->y + s->in_h, s->w, s->h - s->y - s->in_h);
391  }
392 
393  /* left border */
394  ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
395  0, s->y, s->x, in->height);
396 
397  if (needs_copy) {
399  out->data, out->linesize, in->data, in->linesize,
400  s->x, s->y, 0, 0, in->width, in->height);
401  }
402 
403  /* right border */
404  ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
405  s->x + s->in_w, s->y, s->w - s->x - s->in_w,
406  in->height);
407 
408  out->width = s->w;
409  out->height = s->h;
410 
411  if (in != out)
412  av_frame_free(&in);
413  return ff_filter_frame(inlink->dst->outputs[0], out);
414 }
415 
416 #define OFFSET(x) offsetof(PadContext, x)
417 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
418 
419 static const AVOption pad_options[] = {
420  { "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
421  { "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
422  { "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
423  { "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
424  { "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
425  { "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
426  { "color", "set the color of the padded area border", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
427  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
428  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
429  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
430  { "aspect", "pad to fit an aspect instead of a resolution", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, DBL_MAX, FLAGS },
431  { NULL }
432 };
433 
435 
437  {
438  .name = "default",
439  .type = AVMEDIA_TYPE_VIDEO,
440  .config_props = config_input,
441  .get_video_buffer = get_video_buffer,
442  .filter_frame = filter_frame,
443  },
444  { NULL }
445 };
446 
448  {
449  .name = "default",
450  .type = AVMEDIA_TYPE_VIDEO,
451  .config_props = config_output,
452  },
453  { NULL }
454 };
455 
457  .name = "pad",
458  .description = NULL_IF_CONFIG_SMALL("Pad the input video."),
459  .priv_size = sizeof(PadContext),
460  .priv_class = &pad_class,
462  .inputs = avfilter_vf_pad_inputs,
463  .outputs = avfilter_vf_pad_outputs,
464 };
int plane
Definition: avisynth_c.h:384
AVFilterFormats * ff_draw_supported_pixel_formats(unsigned flags)
Return the list of pixel formats supported by the draw functions.
Definition: drawutils.c:731
#define NULL
Definition: coverity.c:32
AVRational aspect
Definition: vf_pad.c:92
static AVFrame * get_video_buffer(AVFilterLink *inlink, int w, int h)
Definition: vf_pad.c:228
#define FLAGS
Definition: vf_pad.c:417
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
void ff_copy_rectangle2(FFDrawContext *draw, uint8_t *dst[], int dst_linesize[], uint8_t *src[], int src_linesize[], int dst_x, int dst_y, int src_x, int src_y, int w, int h)
Copy a rectangle from an image to another.
Definition: drawutils.c:296
AVOption.
Definition: opt.h:246
misc image utilities
uint8_t hsub[MAX_PLANES]
Definition: drawutils.h:54
Main libavfilter public API header.
FFDrawColor color
Definition: vf_pad.c:100
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
int in_h
width and height for the padded input video, which has to be aligned to the chroma values in order to...
Definition: vf_pad.c:90
int x
Definition: vf_pad.c:89
int num
Numerator.
Definition: rational.h:59
char * x_expr
width expression string
Definition: vf_pad.c:96
static const AVFilterPad avfilter_vf_pad_outputs[]
Definition: vf_pad.c:447
static int config_output(AVFilterLink *outlink)
Definition: vf_pad.c:219
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
Definition: vf_pad.c:65
int w
Definition: vf_pad.c:88
int ff_draw_round_to_sub(FFDrawContext *draw, int sub_dir, int round_dir, int value)
Round a dimension according to subsampling.
Definition: drawutils.c:719
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
AVFILTER_DEFINE_CLASS(pad)
uint8_t
Definition: vf_pad.c:68
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Definition: vf_pad.c:67
static const char *const var_names[]
Definition: vf_pad.c:45
Definition: vf_pad.c:62
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:748
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
Various defines for YUV<->RGB conversion.
Definition: vf_pad.c:69
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int config_input(AVFilterLink *inlink)
Definition: vf_pad.c:105
void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4])
Prepare a color.
Definition: drawutils.c:231
uint8_t vsub_max
Definition: drawutils.h:57
static const AVOption pad_options[]
Definition: vf_pad.c:419
#define FFMAX(a, b)
Definition: common.h:94
var_name
Definition: aeval.c:46
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define NAN
Definition: mathematics.h:64
static int frame_needs_copy(PadContext *s, AVFrame *frame)
Definition: vf_pad.c:310
#define FFSIGN(a)
Definition: common.h:73
static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
Definition: vf_pad.c:258
AVFormatContext * ctx
Definition: movenc.c:48
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:659
Definition: vf_pad.c:61
char * w_expr
width expression string
Definition: vf_pad.c:94
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int y
offsets of the input area with respect to the padded area
Definition: vf_pad.c:89
#define s(width, name)
Definition: cbs_vp9.c:257
int in_w
Definition: vf_pad.c:90
int inlink_w
Definition: vf_pad.c:91
Definition: vf_pad.c:63
#define OFFSET(x)
Definition: vf_pad.c:416
EvalMode
Definition: af_volume.h:39
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
if(ret)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
uint8_t rgba_color[4]
color for the padding area
Definition: vf_pad.c:98
misc drawing utilities
Definition: vf_pad.c:72
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static const AVFilterPad avfilter_vf_pad_inputs[]
Definition: vf_pad.c:436
int eval_mode
expression evaluation mode
Definition: vf_pad.c:102
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t hsub_max
Definition: drawutils.h:56
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Definition: vf_pad.c:66
const char * name
Filter name.
Definition: avfilter.h:148
int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags)
Init a draw context.
Definition: drawutils.c:178
Definition: vf_pad.c:64
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
int size
Size of data in bytes.
Definition: buffer.h:93
static int query_formats(AVFilterContext *ctx)
Definition: vf_pad.c:75
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
A reference to a data buffer.
Definition: buffer.h:81
common internal and external API header
int inlink_h
Definition: vf_pad.c:91
int den
Denominator.
Definition: rational.h:60
FFDrawContext draw
Definition: vf_pad.c:99
static const struct @317 planes[]
char * y_expr
height expression string
Definition: vf_pad.c:97
int pixelstep[MAX_PLANES]
Definition: drawutils.h:52
AVFilter ff_vf_pad
Definition: vf_pad.c:456
int h
output dimensions, a value of 0 will result in the input size
Definition: vf_pad.c:88
char * h_expr
height expression string
Definition: vf_pad.c:95
void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, uint8_t *dst[], int dst_linesize[], int dst_x, int dst_y, int w, int h)
Fill a rectangle with an uniform color.
Definition: drawutils.c:318
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
uint8_t vsub[MAX_PLANES]
Definition: drawutils.h:55
void INT64 start
Definition: avisynth_c.h:766
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_pad.c:323
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
simple arithmetic expression evaluator