FFmpeg
vf_zoompan.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "filters.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "video.h"
30 #include "libswscale/swscale.h"
31 
32 static const char *const var_names[] = {
33  "in_w", "iw",
34  "in_h", "ih",
35  "out_w", "ow",
36  "out_h", "oh",
37  "in",
38  "on",
39  "duration",
40  "pduration",
41  "in_time", "it",
42  "out_time", "time", "ot",
43  "frame",
44  "zoom",
45  "pzoom",
46  "x", "px",
47  "y", "py",
48  "a",
49  "sar",
50  "dar",
51  "hsub",
52  "vsub",
53  NULL
54 };
55 
56 enum var_name {
78 };
79 
80 typedef struct ZPcontext {
81  const AVClass *class;
83  char *x_expr_str;
84  char *y_expr_str;
86 
87  AVExpr *zoom_expr, *x_expr, *y_expr;
88 
89  int w, h;
90  double x, y;
91  double prev_zoom;
93  struct SwsContext *sws;
94  int64_t frame_count;
97  double var_values[VARS_NB];
98  int nb_frames;
100  int finished;
102 } ZPContext;
103 
104 #define OFFSET(x) offsetof(ZPContext, x)
105 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
106 static const AVOption zoompan_options[] = {
107  { "zoom", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
108  { "z", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
109  { "x", "set the x expression", OFFSET(x_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
110  { "y", "set the y expression", OFFSET(y_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
111  { "d", "set the duration expression", OFFSET(duration_expr_str), AV_OPT_TYPE_STRING, {.str="90"}, .flags = FLAGS },
112  { "s", "set the output image size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, .flags = FLAGS },
113  { "fps", "set the output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, INT_MAX, .flags = FLAGS },
114  { NULL }
115 };
116 
117 AVFILTER_DEFINE_CLASS(zoompan);
118 
120 {
121  ZPContext *s = ctx->priv;
122 
123  s->prev_zoom = 1;
124  return 0;
125 }
126 
127 static int config_output(AVFilterLink *outlink)
128 {
129  AVFilterContext *ctx = outlink->src;
130  ZPContext *s = ctx->priv;
131  int ret;
132 
133  outlink->w = s->w;
134  outlink->h = s->h;
135  outlink->time_base = av_inv_q(s->framerate);
136  outlink->frame_rate = s->framerate;
137  s->desc = av_pix_fmt_desc_get(outlink->format);
138  s->finished = 1;
139 
140  ret = av_expr_parse(&s->zoom_expr, s->zoom_expr_str, var_names, NULL, NULL, NULL, NULL, 0, ctx);
141  if (ret < 0)
142  return ret;
143 
144  ret = av_expr_parse(&s->x_expr, s->x_expr_str, var_names, NULL, NULL, NULL, NULL, 0, ctx);
145  if (ret < 0)
146  return ret;
147 
148  ret = av_expr_parse(&s->y_expr, s->y_expr_str, var_names, NULL, NULL, NULL, NULL, 0, ctx);
149  if (ret < 0)
150  return ret;
151 
152  return 0;
153 }
154 
155 static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i,
156  double *zoom, double *dx, double *dy)
157 {
158  ZPContext *s = ctx->priv;
159  AVFilterLink *outlink = ctx->outputs[0];
160  AVFilterLink *inlink = ctx->inputs[0];
161  int64_t pts = s->frame_count;
162  int k, x, y, w, h, ret = 0;
163  uint8_t *input[4];
164  int px[4], py[4];
165  AVFrame *out;
166 
167  var_values[VAR_PX] = s->x;
168  var_values[VAR_PY] = s->y;
169  var_values[VAR_PZOOM] = s->prev_zoom;
170  var_values[VAR_PDURATION] = s->prev_nb_frames;
171  var_values[VAR_IN_TIME] = var_values[VAR_IT] = in->pts == AV_NOPTS_VALUE ?
172  NAN : in->pts * av_q2d(inlink->time_base);
173  var_values[VAR_OUT_TIME] = pts * av_q2d(outlink->time_base);
174  var_values[VAR_TIME] = var_values[VAR_OT] = var_values[VAR_OUT_TIME];
175  var_values[VAR_FRAME] = i;
176  var_values[VAR_ON] = outlink->frame_count_in;
177 
178  *zoom = av_expr_eval(s->zoom_expr, var_values, NULL);
179 
180  *zoom = av_clipd(*zoom, 1, 10);
181  var_values[VAR_ZOOM] = *zoom;
182  w = in->width * (1.0 / *zoom);
183  h = in->height * (1.0 / *zoom);
184 
185  *dx = av_expr_eval(s->x_expr, var_values, NULL);
186 
187  x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0));
188  var_values[VAR_X] = *dx;
189  x &= ~((1 << s->desc->log2_chroma_w) - 1);
190 
191  *dy = av_expr_eval(s->y_expr, var_values, NULL);
192 
193  y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0));
194  var_values[VAR_Y] = *dy;
195  y &= ~((1 << s->desc->log2_chroma_h) - 1);
196 
197  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
198  if (!out) {
199  ret = AVERROR(ENOMEM);
200  return ret;
201  }
202 
203  px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w);
204  px[0] = px[3] = x;
205 
206  py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h);
207  py[0] = py[3] = y;
208 
209  s->sws = sws_alloc_context();
210  if (!s->sws) {
211  ret = AVERROR(ENOMEM);
212  goto error;
213  }
214 
215  for (k = 0; in->data[k]; k++)
216  input[k] = in->data[k] + py[k] * in->linesize[k] + px[k];
217 
218  av_opt_set_int(s->sws, "srcw", w, 0);
219  av_opt_set_int(s->sws, "srch", h, 0);
220  av_opt_set_int(s->sws, "src_format", in->format, 0);
221  av_opt_set_int(s->sws, "dstw", outlink->w, 0);
222  av_opt_set_int(s->sws, "dsth", outlink->h, 0);
223  av_opt_set_int(s->sws, "dst_format", outlink->format, 0);
224  av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0);
225 
226  if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0)
227  goto error;
228 
229  sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize);
230 
231  out->pts = pts;
232  s->frame_count++;
233 
234  ret = ff_filter_frame(outlink, out);
235  sws_freeContext(s->sws);
236  s->sws = NULL;
237  s->current_frame++;
238 
239  if (s->current_frame >= s->nb_frames) {
240  if (*dx != -1)
241  s->x = *dx;
242  if (*dy != -1)
243  s->y = *dy;
244  if (*zoom != -1)
245  s->prev_zoom = *zoom;
246  s->prev_nb_frames = s->nb_frames;
247  s->nb_frames = 0;
248  s->current_frame = 0;
249  av_frame_free(&s->in);
250  s->finished = 1;
251  }
252  return ret;
253 error:
254  sws_freeContext(s->sws);
255  s->sws = NULL;
256  av_frame_free(&out);
257  return ret;
258 }
259 
261 {
262  ZPContext *s = ctx->priv;
263  AVFilterLink *inlink = ctx->inputs[0];
264  AVFilterLink *outlink = ctx->outputs[0];
265  int status, ret = 0;
266  int64_t pts;
267 
268  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
269 
270  if (s->in && ff_outlink_frame_wanted(outlink)) {
271  double zoom = -1, dx = -1, dy = -1;
272 
273  ret = output_single_frame(ctx, s->in, s->var_values, s->current_frame,
274  &zoom, &dx, &dy);
275  if (ret < 0)
276  return ret;
277  }
278 
279  if (!s->in && (ret = ff_inlink_consume_frame(inlink, &s->in)) > 0) {
280  double zoom = -1, dx = -1, dy = -1, nb_frames;
281 
282  s->finished = 0;
283  s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = s->in->width;
284  s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = s->in->height;
285  s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = s->w;
286  s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = s->h;
287  s->var_values[VAR_IN] = inlink->frame_count_out - 1;
288  s->var_values[VAR_ON] = outlink->frame_count_in;
289  s->var_values[VAR_PX] = s->x;
290  s->var_values[VAR_PY] = s->y;
291  s->var_values[VAR_X] = 0;
292  s->var_values[VAR_Y] = 0;
293  s->var_values[VAR_PZOOM] = s->prev_zoom;
294  s->var_values[VAR_ZOOM] = 1;
296  s->var_values[VAR_A] = (double) s->in->width / s->in->height;
297  s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
298  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
300  s->var_values[VAR_HSUB] = 1 << s->desc->log2_chroma_w;
301  s->var_values[VAR_VSUB] = 1 << s->desc->log2_chroma_h;
302 
303  if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str,
304  var_names, s->var_values,
305  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
306  av_frame_free(&s->in);
307  return ret;
308  }
309 
310  s->var_values[VAR_DURATION] = s->nb_frames = nb_frames;
311 
312  ret = output_single_frame(ctx, s->in, s->var_values, s->current_frame,
313  &zoom, &dx, &dy);
314  if (ret < 0)
315  return ret;
316  }
317  if (ret < 0) {
318  return ret;
319  } else if (s->finished && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
320  ff_outlink_set_status(outlink, status, pts);
321  return 0;
322  } else {
323  if (ff_outlink_frame_wanted(outlink) && s->finished)
324  ff_inlink_request_frame(inlink);
325  return 0;
326  }
327 }
328 
330 {
331  static const enum AVPixelFormat pix_fmts[] = {
343  };
344 
345  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
346  if (!fmts_list)
347  return AVERROR(ENOMEM);
348  return ff_set_common_formats(ctx, fmts_list);
349 }
350 
352 {
353  ZPContext *s = ctx->priv;
354 
355  sws_freeContext(s->sws);
356  s->sws = NULL;
357  av_expr_free(s->x_expr);
358  av_expr_free(s->y_expr);
360  av_frame_free(&s->in);
361 }
362 
363 static const AVFilterPad inputs[] = {
364  {
365  .name = "default",
366  .type = AVMEDIA_TYPE_VIDEO,
367  },
368  { NULL }
369 };
370 
371 static const AVFilterPad outputs[] = {
372  {
373  .name = "default",
374  .type = AVMEDIA_TYPE_VIDEO,
375  .config_props = config_output,
376  },
377  { NULL }
378 };
379 
381  .name = "zoompan",
382  .description = NULL_IF_CONFIG_SMALL("Apply Zoom & Pan effect."),
383  .priv_size = sizeof(ZPContext),
384  .priv_class = &zoompan_class,
385  .init = init,
386  .uninit = uninit,
388  .activate = activate,
389  .inputs = inputs,
390  .outputs = outputs,
391 };
AVExpr * y_expr
Definition: vf_zoompan.c:87
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1489
#define NULL
Definition: coverity.c:32
struct SwsContext * sws
Definition: vf_zoompan.c:93
double var_values[VARS_NB]
Definition: vf_zoompan.c:97
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
#define SWS_BICUBIC
Definition: swscale.h:60
AVOption.
Definition: opt.h:248
static int activate(AVFilterContext *ctx)
Definition: vf_zoompan.c:260
int prev_nb_frames
Definition: vf_zoompan.c:92
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
char * zoom_expr_str
Definition: vf_zoompan.c:82
Main libavfilter public API header.
AVRational framerate
Definition: vf_zoompan.c:101
static int query_formats(AVFilterContext *ctx)
Definition: vf_zoompan.c:329
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
int nb_frames
Definition: vf_zoompan.c:98
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static void error(const char *err)
static const AVFilterPad inputs[]
Definition: vf_zoompan.c:363
double y
Definition: vf_zoompan.c:90
int framerate
Definition: h264_levels.c:65
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1615
int current_frame
Definition: vf_zoompan.c:99
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
AVFrame * in
Definition: vf_zoompan.c:96
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:347
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
uint8_t
#define av_cold
Definition: attributes.h:88
av_warn_unused_result int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
Definition: utils.c:1174
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
Definition: eval.c:157
#define FLAGS
Definition: vf_zoompan.c:105
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
AVExpr * x_expr
Definition: vf_zoompan.c:87
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
external API header
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1444
const AVPixFmtDescriptor * desc
Definition: vf_zoompan.c:95
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:776
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
int width
Definition: frame.h:366
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
static const char *const var_names[]
Definition: vf_zoompan.c:32
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
void * priv
private data for use by the filter
Definition: avfilter.h:354
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
double x
Definition: vf_zoompan.c:90
simple assert() macros that are a bit more flexible than ISO C assert().
static const AVOption zoompan_options[]
Definition: vf_zoompan.c:106
static int config_output(AVFilterLink *outlink)
Definition: vf_zoompan.c:127
#define FFMAX(a, b)
Definition: common.h:94
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
var_name
Definition: aeval.c:46
#define NAN
Definition: mathematics.h:64
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2332
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_zoompan.c:351
#define s(width, name)
Definition: cbs_vp9.c:257
int finished
Definition: vf_zoompan.c:100
char * y_expr_str
Definition: vf_zoompan.c:84
#define OFFSET(x)
Definition: vf_zoompan.c:104
static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i, double *zoom, double *dx, double *dy)
Definition: vf_zoompan.c:155
char * duration_expr_str
Definition: vf_zoompan.c:85
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
AVExpr * zoom_expr
Definition: vf_zoompan.c:87
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVFilter ff_vf_zoompan
Definition: vf_zoompan.c:380
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:744
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
static const AVFilterPad outputs[]
Definition: vf_zoompan.c:371
Rational number (pair of numerator and denominator).
Definition: rational.h:58
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
offset must point to AVRational
Definition: opt.h:238
const char * name
Filter name.
Definition: avfilter.h:149
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
int64_t frame_count
Definition: vf_zoompan.c:94
offset must point to two consecutive integers
Definition: opt.h:235
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
Definition: utils.c:1088
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:351
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
static int64_t pts
static av_cold int init(AVFilterContext *ctx)
Definition: vf_zoompan.c:119
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
int den
Denominator.
Definition: rational.h:60
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
A list of supported formats for one end of a filter link.
Definition: formats.h:65
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
An instance of a filter.
Definition: avfilter.h:339
int height
Definition: frame.h:366
FILE * out
Definition: movenc.c:54
char * x_expr_str
Definition: vf_zoompan.c:83
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFILTER_DEFINE_CLASS(zoompan)
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
double prev_zoom
Definition: vf_zoompan.c:91