FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_zoompan.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/avassert.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixdesc.h"
25 #include "avfilter.h"
26 #include "formats.h"
27 #include "internal.h"
28 #include "video.h"
29 #include "libswscale/swscale.h"
30 
31 static const char *const var_names[] = {
32  "in_w", "iw",
33  "in_h", "ih",
34  "out_w", "ow",
35  "out_h", "oh",
36  "in",
37  "on",
38  "duration",
39  "pduration",
40  "time",
41  "frame",
42  "zoom",
43  "pzoom",
44  "x", "px",
45  "y", "py",
46  "a",
47  "sar",
48  "dar",
49  "hsub",
50  "vsub",
51  NULL
52 };
53 
54 enum var_name {
75 };
76 
77 typedef struct ZPcontext {
78  const AVClass *class;
80  char *x_expr_str;
81  char *y_expr_str;
83  int w, h;
84  double x, y;
85  double prev_zoom;
87  struct SwsContext *sws;
88  int64_t frame_count;
91  double var_values[VARS_NB];
92  int nb_frames;
94  int finished;
96 } ZPContext;
97 
98 #define OFFSET(x) offsetof(ZPContext, x)
99 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
100 static const AVOption zoompan_options[] = {
101  { "zoom", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
102  { "z", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS },
103  { "x", "set the x expression", OFFSET(x_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
104  { "y", "set the y expression", OFFSET(y_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS },
105  { "d", "set the duration expression", OFFSET(duration_expr_str), AV_OPT_TYPE_STRING, {.str="90"}, .flags = FLAGS },
106  { "s", "set the output image size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, .flags = FLAGS },
107  { "fps", "set the output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, INT_MAX, .flags = FLAGS },
108  { NULL }
109 };
110 
111 AVFILTER_DEFINE_CLASS(zoompan);
112 
114 {
115  ZPContext *s = ctx->priv;
116 
117  s->prev_zoom = 1;
118  return 0;
119 }
120 
121 static int config_output(AVFilterLink *outlink)
122 {
123  AVFilterContext *ctx = outlink->src;
124  ZPContext *s = ctx->priv;
125 
126  outlink->w = s->w;
127  outlink->h = s->h;
128  outlink->time_base = av_inv_q(s->framerate);
129  outlink->frame_rate = s->framerate;
130  s->desc = av_pix_fmt_desc_get(outlink->format);
131 
132  return 0;
133 }
134 
135 static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i,
136  double *zoom, double *dx, double *dy)
137 {
138  ZPContext *s = ctx->priv;
139  AVFilterLink *outlink = ctx->outputs[0];
140  int64_t pts = s->frame_count;
141  int k, x, y, w, h, ret = 0;
142  uint8_t *input[4];
143  int px[4], py[4];
144  AVFrame *out;
145 
146  var_values[VAR_PX] = s->x;
147  var_values[VAR_PY] = s->y;
148  var_values[VAR_PZOOM] = s->prev_zoom;
149  var_values[VAR_PDURATION] = s->prev_nb_frames;
150  var_values[VAR_TIME] = pts * av_q2d(outlink->time_base);
151  var_values[VAR_FRAME] = i;
152  var_values[VAR_ON] = outlink->frame_count + 1;
153  if ((ret = av_expr_parse_and_eval(zoom, s->zoom_expr_str,
154  var_names, var_values,
155  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
156  return ret;
157 
158  *zoom = av_clipd(*zoom, 1, 10);
159  var_values[VAR_ZOOM] = *zoom;
160  w = in->width * (1.0 / *zoom);
161  h = in->height * (1.0 / *zoom);
162 
163  if ((ret = av_expr_parse_and_eval(dx, s->x_expr_str,
164  var_names, var_values,
165  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
166  return ret;
167  x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0));
168  var_values[VAR_X] = *dx;
169  x &= ~((1 << s->desc->log2_chroma_w) - 1);
170 
171  if ((ret = av_expr_parse_and_eval(dy, s->y_expr_str,
172  var_names, var_values,
173  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
174  return ret;
175  y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0));
176  var_values[VAR_Y] = *dy;
177  y &= ~((1 << s->desc->log2_chroma_h) - 1);
178 
179  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
180  if (!out) {
181  ret = AVERROR(ENOMEM);
182  return ret;
183  }
184 
185  px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w);
186  px[0] = px[3] = x;
187 
188  py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h);
189  py[0] = py[3] = y;
190 
191  s->sws = sws_alloc_context();
192  if (!s->sws) {
193  ret = AVERROR(ENOMEM);
194  return ret;
195  }
196 
197  for (k = 0; in->data[k]; k++)
198  input[k] = in->data[k] + py[k] * in->linesize[k] + px[k];
199 
200  av_opt_set_int(s->sws, "srcw", w, 0);
201  av_opt_set_int(s->sws, "srch", h, 0);
202  av_opt_set_int(s->sws, "src_format", in->format, 0);
203  av_opt_set_int(s->sws, "dstw", outlink->w, 0);
204  av_opt_set_int(s->sws, "dsth", outlink->h, 0);
205  av_opt_set_int(s->sws, "dst_format", outlink->format, 0);
206  av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0);
207 
208  if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0)
209  return ret;
210 
211  sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize);
212 
213  out->pts = pts;
214  s->frame_count++;
215 
216  ret = ff_filter_frame(outlink, out);
217  sws_freeContext(s->sws);
218  s->sws = NULL;
219  s->current_frame++;
220  return ret;
221 }
222 
223 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
224 {
225  AVFilterContext *ctx = inlink->dst;
226  AVFilterLink *outlink = ctx->outputs[0];
227  ZPContext *s = ctx->priv;
228  double nb_frames;
229  int ret;
230 
231  av_assert0(s->in == NULL);
232 
233  s->finished = 0;
234  s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = in->width;
235  s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = in->height;
236  s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = s->w;
237  s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = s->h;
238  s->var_values[VAR_IN] = inlink->frame_count + 1;
239  s->var_values[VAR_ON] = outlink->frame_count + 1;
240  s->var_values[VAR_PX] = s->x;
241  s->var_values[VAR_PY] = s->y;
242  s->var_values[VAR_X] = 0;
243  s->var_values[VAR_Y] = 0;
244  s->var_values[VAR_PZOOM] = s->prev_zoom;
245  s->var_values[VAR_ZOOM] = 1;
247  s->var_values[VAR_A] = (double) in->width / in->height;
248  s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
249  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
251  s->var_values[VAR_HSUB] = 1 << s->desc->log2_chroma_w;
252  s->var_values[VAR_VSUB] = 1 << s->desc->log2_chroma_h;
253 
254  if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str,
255  var_names, s->var_values,
256  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
257  av_frame_free(&in);
258  return ret;
259  }
260 
261  s->var_values[VAR_DURATION] = s->nb_frames = nb_frames;
262  s->in = in;
263 
264  return 0;
265 }
266 
267 static int request_frame(AVFilterLink *outlink)
268 {
269  AVFilterContext *ctx = outlink->src;
270  ZPContext *s = ctx->priv;
271  AVFrame *in = s->in;
272  double zoom=-1, dx=-1, dy=-1;
273  int ret = -1;
274 
275  if (in) {
276  ret = output_single_frame(ctx, in, s->var_values, s->current_frame,
277  &zoom, &dx, &dy);
278  if (ret < 0)
279  goto fail;
280  }
281 
282  if (s->current_frame >= s->nb_frames) {
283  if (dx != -1)
284  s->x = dx;
285  if (dy != -1)
286  s->y = dy;
287  if (zoom != -1)
288  s->prev_zoom = zoom;
289  s->prev_nb_frames = s->nb_frames;
290  s->nb_frames = 0;
291  s->current_frame = 0;
292  av_frame_free(&s->in);
293  s->finished = 1;
294  ret = ff_request_frame(ctx->inputs[0]);
295  }
296 
297 fail:
298  sws_freeContext(s->sws);
299  s->sws = NULL;
300 
301  return ret;
302 }
303 
304 static int poll_frame(AVFilterLink *link)
305 {
306  ZPContext *s = link->src->priv;
307  return s->nb_frames - s->current_frame;
308 }
309 
311 {
312  static const enum AVPixelFormat pix_fmts[] = {
324  };
325 
326  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
327  if (!fmts_list)
328  return AVERROR(ENOMEM);
329  return ff_set_common_formats(ctx, fmts_list);
330 }
331 
333 {
334  ZPContext *s = ctx->priv;
335 
336  sws_freeContext(s->sws);
337  s->sws = NULL;
338 }
339 
340 static const AVFilterPad inputs[] = {
341  {
342  .name = "default",
343  .type = AVMEDIA_TYPE_VIDEO,
344  .filter_frame = filter_frame,
345  .needs_fifo = 1,
346  },
347  { NULL }
348 };
349 
350 static const AVFilterPad outputs[] = {
351  {
352  .name = "default",
353  .type = AVMEDIA_TYPE_VIDEO,
354  .config_props = config_output,
355  .poll_frame = poll_frame,
356  .request_frame = request_frame,
357  },
358  { NULL }
359 };
360 
362  .name = "zoompan",
363  .description = NULL_IF_CONFIG_SMALL("Apply Zoom & Pan effect."),
364  .priv_size = sizeof(ZPContext),
365  .priv_class = &zoompan_class,
366  .init = init,
367  .uninit = uninit,
369  .inputs = inputs,
370  .outputs = outputs,
372 };
#define NULL
Definition: coverity.c:32
struct SwsContext * sws
Definition: vf_zoompan.c:87
const char * s
Definition: avisynth_c.h:768
double var_values[VARS_NB]
Definition: vf_zoompan.c:91
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2266
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
#define SWS_BICUBIC
Definition: swscale.h:60
AVOption.
Definition: opt.h:245
int prev_nb_frames
Definition: vf_zoompan.c:86
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
char * zoom_expr_str
Definition: vf_zoompan.c:79
Main libavfilter public API header.
AVRational framerate
Definition: vf_zoompan.c:95
static int query_formats(AVFilterContext *ctx)
Definition: vf_zoompan.c:310
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:180
int num
Numerator.
Definition: rational.h:59
int nb_frames
Definition: vf_zoompan.c:92
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:76
static const AVFilterPad inputs[]
Definition: vf_zoompan.c:340
double y
Definition: vf_zoompan.c:84
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int current_frame
Definition: vf_zoompan.c:93
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
AVFrame * in
Definition: vf_zoompan.c:90
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:59
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:315
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1189
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:102
uint8_t
#define av_cold
Definition: attributes.h:82
av_warn_unused_result int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter)
Initialize the swscaler context sws_context.
Definition: utils.c:1147
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
#define FLAGS
Definition: vf_zoompan.c:99
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:101
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
external API header
A filter pad used for either input or output.
Definition: internal.h:53
const AVPixFmtDescriptor * desc
Definition: vf_zoompan.c:89
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:723
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:188
int width
width and height of the video frame
Definition: frame.h:236
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
static const char *const var_names[]
Definition: vf_zoompan.c:31
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
void * priv
private data for use by the filter
Definition: avfilter.h:322
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:539
double x
Definition: vf_zoompan.c:84
simple assert() macros that are a bit more flexible than ISO C assert().
static const AVOption zoompan_options[]
Definition: vf_zoompan.c:100
static int config_output(AVFilterLink *outlink)
Definition: vf_zoompan.c:121
static int request_frame(AVFilterLink *outlink)
Definition: vf_zoompan.c:267
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:83
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
var_name
Definition: aeval.c:46
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
AVFormatContext * ctx
Definition: movenc.c:48
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2274
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_zoompan.c:332
int finished
Definition: vf_zoompan.c:94
char * y_expr_str
Definition: vf_zoompan.c:81
#define OFFSET(x)
Definition: vf_zoompan.c:98
static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i, double *zoom, double *dx, double *dy)
Definition: vf_zoompan.c:135
static int poll_frame(AVFilterLink *link)
Definition: vf_zoompan.c:304
char * duration_expr_str
Definition: vf_zoompan.c:82
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
AVFilter ff_vf_zoompan
Definition: vf_zoompan.c:361
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:189
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:753
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:68
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
static const AVFilterPad outputs[]
Definition: vf_zoompan.c:350
Rational number (pair of numerator and denominator).
Definition: rational.h:58
offset must point to AVRational
Definition: opt.h:235
const char * name
Filter name.
Definition: avfilter.h:148
int64_t frame_count
Definition: vf_zoompan.c:88
offset must point to two consecutive integers
Definition: opt.h:232
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
Definition: utils.c:1061
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:319
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
static int64_t pts
Global timestamp for the audio frames.
static int flags
Definition: cpu.c:47
static av_cold int init(AVFilterContext *ctx)
Definition: vf_zoompan.c:113
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
Y , 8bpp.
Definition: pixfmt.h:70
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:229
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:69
int den
Denominator.
Definition: rational.h:60
A list of supported formats for one end of a filter link.
Definition: formats.h:64
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:272
An instance of a filter.
Definition: avfilter.h:307
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
char * x_expr_str
Definition: vf_zoompan.c:80
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:100
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:369
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_zoompan.c:223
AVFILTER_DEFINE_CLASS(zoompan)
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
double prev_zoom
Definition: vf_zoompan.c:85