FFmpeg
vf_libopencv.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * libopencv wrapper functions
24  */
25 
26 #include "config.h"
27 #if HAVE_OPENCV2_CORE_CORE_C_H
28 #include <opencv2/core/core_c.h>
29 #include <opencv2/imgproc/imgproc_c.h>
30 #else
31 #include <opencv/cv.h>
32 #include <opencv/cxcore.h>
33 #endif
34 #include "libavutil/avstring.h"
35 #include "libavutil/common.h"
36 #include "libavutil/file.h"
37 #include "libavutil/opt.h"
38 #include "avfilter.h"
39 #include "formats.h"
40 #include "internal.h"
41 #include "video.h"
42 
43 static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt)
44 {
45  IplImage *tmpimg;
46  int depth, channels_nb;
47 
48  if (pixfmt == AV_PIX_FMT_GRAY8) { depth = IPL_DEPTH_8U; channels_nb = 1; }
49  else if (pixfmt == AV_PIX_FMT_BGRA) { depth = IPL_DEPTH_8U; channels_nb = 4; }
50  else if (pixfmt == AV_PIX_FMT_BGR24) { depth = IPL_DEPTH_8U; channels_nb = 3; }
51  else return;
52 
53  tmpimg = cvCreateImageHeader((CvSize){frame->width, frame->height}, depth, channels_nb);
54  *img = *tmpimg;
55  img->imageData = img->imageDataOrigin = frame->data[0];
56  img->dataOrder = IPL_DATA_ORDER_PIXEL;
57  img->origin = IPL_ORIGIN_TL;
58  img->widthStep = frame->linesize[0];
59 }
60 
61 static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt)
62 {
63  frame->linesize[0] = img->widthStep;
64  frame->data[0] = img->imageData;
65 }
66 
68 {
69  static const enum AVPixelFormat pix_fmts[] = {
71  };
72  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
73  if (!fmts_list)
74  return AVERROR(ENOMEM);
75  return ff_set_common_formats(ctx, fmts_list);
76 }
77 
78 typedef struct OCVContext {
79  const AVClass *class;
80  char *name;
81  char *params;
82  int (*init)(AVFilterContext *ctx, const char *args);
84  void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
85  void *priv;
86 } OCVContext;
87 
88 typedef struct SmoothContext {
89  int type;
90  int param1, param2;
91  double param3, param4;
93 
94 static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
95 {
96  OCVContext *s = ctx->priv;
98  char type_str[128] = "gaussian";
99 
100  smooth->param1 = 3;
101  smooth->param2 = 0;
102  smooth->param3 = 0.0;
103  smooth->param4 = 0.0;
104 
105  if (args)
106  sscanf(args, "%127[^|]|%d|%d|%lf|%lf", type_str, &smooth->param1, &smooth->param2, &smooth->param3, &smooth->param4);
107 
108  if (!strcmp(type_str, "blur" )) smooth->type = CV_BLUR;
109  else if (!strcmp(type_str, "blur_no_scale")) smooth->type = CV_BLUR_NO_SCALE;
110  else if (!strcmp(type_str, "median" )) smooth->type = CV_MEDIAN;
111  else if (!strcmp(type_str, "gaussian" )) smooth->type = CV_GAUSSIAN;
112  else if (!strcmp(type_str, "bilateral" )) smooth->type = CV_BILATERAL;
113  else {
114  av_log(ctx, AV_LOG_ERROR, "Smoothing type '%s' unknown.\n", type_str);
115  return AVERROR(EINVAL);
116  }
117 
118  if (smooth->param1 < 0 || !(smooth->param1%2)) {
119  av_log(ctx, AV_LOG_ERROR,
120  "Invalid value '%d' for param1, it has to be a positive odd number\n",
121  smooth->param1);
122  return AVERROR(EINVAL);
123  }
124  if ((smooth->type == CV_BLUR || smooth->type == CV_BLUR_NO_SCALE || smooth->type == CV_GAUSSIAN) &&
125  (smooth->param2 < 0 || (smooth->param2 && !(smooth->param2%2)))) {
126  av_log(ctx, AV_LOG_ERROR,
127  "Invalid value '%d' for param2, it has to be zero or a positive odd number\n",
128  smooth->param2);
129  return AVERROR(EINVAL);
130  }
131 
132  av_log(ctx, AV_LOG_VERBOSE, "type:%s param1:%d param2:%d param3:%f param4:%f\n",
133  type_str, smooth->param1, smooth->param2, smooth->param3, smooth->param4);
134  return 0;
135 }
136 
137 static void smooth_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
138 {
139  OCVContext *s = ctx->priv;
140  SmoothContext *smooth = s->priv;
141  cvSmooth(inimg, outimg, smooth->type, smooth->param1, smooth->param2, smooth->param3, smooth->param4);
142 }
143 
144 static int read_shape_from_file(int *cols, int *rows, int **values, const char *filename,
145  void *log_ctx)
146 {
147  uint8_t *buf, *p, *pend;
148  size_t size;
149  int ret, i, j, w;
150 
151  if ((ret = av_file_map(filename, &buf, &size, 0, log_ctx)) < 0)
152  return ret;
153 
154  /* prescan file to get the number of lines and the maximum width */
155  w = 0;
156  for (i = 0; i < size; i++) {
157  if (buf[i] == '\n') {
158  if (*rows == INT_MAX) {
159  av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of rows in the file\n");
160  ret = AVERROR_INVALIDDATA;
161  goto end;
162  }
163  ++(*rows);
164  *cols = FFMAX(*cols, w);
165  w = 0;
166  } else if (w == INT_MAX) {
167  av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of columns in the file\n");
168  return AVERROR_INVALIDDATA;
169  }
170  w++;
171  }
172  if (*rows > (SIZE_MAX / sizeof(int) / *cols)) {
173  av_log(log_ctx, AV_LOG_ERROR, "File with size %dx%d is too big\n",
174  *rows, *cols);
175  ret = AVERROR_INVALIDDATA;
176  goto end;
177  }
178  if (!(*values = av_mallocz_array(sizeof(int) * *rows, *cols))) {
179  ret = AVERROR(ENOMEM);
180  goto end;
181  }
182 
183  /* fill *values */
184  p = buf;
185  pend = buf + size-1;
186  for (i = 0; i < *rows; i++) {
187  for (j = 0;; j++) {
188  if (p > pend || *p == '\n') {
189  p++;
190  break;
191  } else
192  (*values)[*cols*i + j] = !!av_isgraph(*(p++));
193  }
194  }
195 
196 end:
197  av_file_unmap(buf, size);
198 
199 #ifdef DEBUG
200  {
201  char *line;
202  if (!(line = av_malloc(*cols + 1)))
203  return AVERROR(ENOMEM);
204  for (i = 0; i < *rows; i++) {
205  for (j = 0; j < *cols; j++)
206  line[j] = (*values)[i * *cols + j] ? '@' : ' ';
207  line[j] = 0;
208  av_log(log_ctx, AV_LOG_DEBUG, "%3d: %s\n", i, line);
209  }
210  av_free(line);
211  }
212 #endif
213 
214  return 0;
215 }
216 
217 static int parse_iplconvkernel(IplConvKernel **kernel, char *buf, void *log_ctx)
218 {
219  char shape_filename[128] = "", shape_str[32] = "rect";
220  int cols = 0, rows = 0, anchor_x = 0, anchor_y = 0, shape = CV_SHAPE_RECT;
221  int *values = NULL, ret = 0;
222 
223  sscanf(buf, "%dx%d+%dx%d/%32[^=]=%127s", &cols, &rows, &anchor_x, &anchor_y, shape_str, shape_filename);
224 
225  if (!strcmp(shape_str, "rect" )) shape = CV_SHAPE_RECT;
226  else if (!strcmp(shape_str, "cross" )) shape = CV_SHAPE_CROSS;
227  else if (!strcmp(shape_str, "ellipse")) shape = CV_SHAPE_ELLIPSE;
228  else if (!strcmp(shape_str, "custom" )) {
229  shape = CV_SHAPE_CUSTOM;
230  if ((ret = read_shape_from_file(&cols, &rows, &values, shape_filename, log_ctx)) < 0)
231  return ret;
232  } else {
233  av_log(log_ctx, AV_LOG_ERROR,
234  "Shape unspecified or type '%s' unknown.\n", shape_str);
235  ret = AVERROR(EINVAL);
236  goto out;
237  }
238 
239  if (rows <= 0 || cols <= 0) {
240  av_log(log_ctx, AV_LOG_ERROR,
241  "Invalid non-positive values for shape size %dx%d\n", cols, rows);
242  ret = AVERROR(EINVAL);
243  goto out;
244  }
245 
246  if (anchor_x < 0 || anchor_y < 0 || anchor_x >= cols || anchor_y >= rows) {
247  av_log(log_ctx, AV_LOG_ERROR,
248  "Shape anchor %dx%d is not inside the rectangle with size %dx%d.\n",
249  anchor_x, anchor_y, cols, rows);
250  ret = AVERROR(EINVAL);
251  goto out;
252  }
253 
254  *kernel = cvCreateStructuringElementEx(cols, rows, anchor_x, anchor_y, shape, values);
255  if (!*kernel) {
256  ret = AVERROR(ENOMEM);
257  goto out;
258  }
259 
260  av_log(log_ctx, AV_LOG_VERBOSE, "Structuring element: w:%d h:%d x:%d y:%d shape:%s\n",
261  rows, cols, anchor_x, anchor_y, shape_str);
262 out:
263  av_freep(&values);
264  return ret;
265 }
266 
267 typedef struct DilateContext {
269  IplConvKernel *kernel;
270 } DilateContext;
271 
272 static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
273 {
274  OCVContext *s = ctx->priv;
275  DilateContext *dilate = s->priv;
276  char default_kernel_str[] = "3x3+0x0/rect";
277  char *kernel_str = NULL;
278  const char *buf = args;
279  int ret;
280 
281  if (args) {
282  kernel_str = av_get_token(&buf, "|");
283 
284  if (!kernel_str)
285  return AVERROR(ENOMEM);
286  }
287 
288  ret = parse_iplconvkernel(&dilate->kernel,
289  (!kernel_str || !*kernel_str) ? default_kernel_str
290  : kernel_str,
291  ctx);
292  av_free(kernel_str);
293  if (ret < 0)
294  return ret;
295 
296  if (!buf || sscanf(buf, "|%d", &dilate->nb_iterations) != 1)
297  dilate->nb_iterations = 1;
298  av_log(ctx, AV_LOG_VERBOSE, "iterations_nb:%d\n", dilate->nb_iterations);
299  if (dilate->nb_iterations <= 0) {
300  av_log(ctx, AV_LOG_ERROR, "Invalid non-positive value '%d' for nb_iterations\n",
301  dilate->nb_iterations);
302  return AVERROR(EINVAL);
303  }
304  return 0;
305 }
306 
308 {
309  OCVContext *s = ctx->priv;
310  DilateContext *dilate = s->priv;
311 
312  cvReleaseStructuringElement(&dilate->kernel);
313 }
314 
315 static void dilate_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
316 {
317  OCVContext *s = ctx->priv;
318  DilateContext *dilate = s->priv;
319  cvDilate(inimg, outimg, dilate->kernel, dilate->nb_iterations);
320 }
321 
322 static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
323 {
324  OCVContext *s = ctx->priv;
325  DilateContext *dilate = s->priv;
326  cvErode(inimg, outimg, dilate->kernel, dilate->nb_iterations);
327 }
328 
329 typedef struct OCVFilterEntry {
330  const char *name;
331  size_t priv_size;
332  int (*init)(AVFilterContext *ctx, const char *args);
334  void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
336 
340  { "smooth", sizeof(SmoothContext), smooth_init, NULL, smooth_end_frame_filter },
341 };
342 
344 {
345  OCVContext *s = ctx->priv;
346  int i;
347 
348  if (!s->name) {
349  av_log(ctx, AV_LOG_ERROR, "No libopencv filter name specified\n");
350  return AVERROR(EINVAL);
351  }
352  for (i = 0; i < FF_ARRAY_ELEMS(ocv_filter_entries); i++) {
353  const OCVFilterEntry *entry = &ocv_filter_entries[i];
354  if (!strcmp(s->name, entry->name)) {
355  s->init = entry->init;
356  s->uninit = entry->uninit;
358 
359  if (!(s->priv = av_mallocz(entry->priv_size)))
360  return AVERROR(ENOMEM);
361  return s->init(ctx, s->params);
362  }
363  }
364 
365  av_log(ctx, AV_LOG_ERROR, "No libopencv filter named '%s'\n", s->name);
366  return AVERROR(EINVAL);
367 }
368 
370 {
371  OCVContext *s = ctx->priv;
372 
373  if (s->uninit)
374  s->uninit(ctx);
375  av_freep(&s->priv);
376 }
377 
379 {
380  AVFilterContext *ctx = inlink->dst;
381  OCVContext *s = ctx->priv;
382  AVFilterLink *outlink= inlink->dst->outputs[0];
383  AVFrame *out;
384  IplImage inimg, outimg;
385 
386  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
387  if (!out) {
388  av_frame_free(&in);
389  return AVERROR(ENOMEM);
390  }
391  av_frame_copy_props(out, in);
392 
393  fill_iplimage_from_frame(&inimg , in , inlink->format);
394  fill_iplimage_from_frame(&outimg, out, inlink->format);
395  s->end_frame_filter(ctx, &inimg, &outimg);
396  fill_frame_from_iplimage(out, &outimg, inlink->format);
397 
398  av_frame_free(&in);
399 
400  return ff_filter_frame(outlink, out);
401 }
402 
403 #define OFFSET(x) offsetof(OCVContext, x)
404 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
405 static const AVOption ocv_options[] = {
406  { "filter_name", NULL, OFFSET(name), AV_OPT_TYPE_STRING, .flags = FLAGS },
407  { "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
408  { NULL }
409 };
410 
412 
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_VIDEO,
417  .filter_frame = filter_frame,
418  },
419  { NULL }
420 };
421 
423  {
424  .name = "default",
425  .type = AVMEDIA_TYPE_VIDEO,
426  },
427  { NULL }
428 };
429 
431  .name = "ocv",
432  .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
433  .priv_size = sizeof(OCVContext),
434  .priv_class = &ocv_class,
436  .init = init,
437  .uninit = uninit,
438  .inputs = avfilter_vf_ocv_inputs,
439  .outputs = avfilter_vf_ocv_outputs,
440 };
#define NULL
Definition: coverity.c:32
int(* init)(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:332
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int parse_iplconvkernel(IplConvKernel **kernel, char *buf, void *log_ctx)
Definition: vf_libopencv.c:217
char * name
Definition: vf_libopencv.c:80
void(* end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:84
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
AVFILTER_DEFINE_CLASS(ocv)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define FLAGS
Definition: vf_libopencv.c:404
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_libopencv.c:378
#define img
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
static void smooth_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:137
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
static av_cold void dilate_uninit(AVFilterContext *ctx)
Definition: vf_libopencv.c:307
Misc file utilities.
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
ptrdiff_t size
Definition: opengl_enc.c:100
#define av_log(a,...)
#define OFFSET(x)
Definition: vf_libopencv.c:403
A filter pad used for either input or output.
Definition: internal.h:54
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void av_file_unmap(uint8_t *bufptr, size_t size)
Unmap or free the buffer bufptr created by av_file_map().
Definition: file.c:144
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, int log_offset, void *log_ctx)
Read the file with name filename, and put its content in a newly allocated buffer or map it with mmap...
Definition: file.c:53
void(* uninit)(AVFilterContext *ctx)
Definition: vf_libopencv.c:333
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
const char * name
Definition: vf_libopencv.c:330
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
GLenum GLint * params
Definition: opengl_enc.c:113
int(* init)(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:82
#define FFMAX(a, b)
Definition: common.h:94
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:149
static void dilate_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:315
char * params
Definition: vf_libopencv.c:81
uint8_t w
Definition: llviddspenc.c:38
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static const AVFilterPad avfilter_vf_ocv_outputs[]
Definition: vf_libopencv.c:422
static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt)
Definition: vf_libopencv.c:43
AVS_Value args
Definition: avisynth_c.h:775
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
IplConvKernel * kernel
Definition: vf_libopencv.c:269
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
void * priv
Definition: vf_libopencv.c:85
static const AVFilterPad avfilter_vf_ocv_inputs[]
Definition: vf_libopencv.c:413
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:272
static float smooth(DeshakeOpenCLContext *deshake_ctx, float *gauss_kernel, int length, float max_val, AVFifoBuffer *values)
AVFilter ff_vf_ocv
Definition: vf_libopencv.c:430
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
static int read_shape_from_file(int *cols, int *rows, int **values, const char *filename, void *log_ctx)
Definition: vf_libopencv.c:144
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:322
static const AVOption ocv_options[]
Definition: vf_libopencv.c:405
static av_const int av_isgraph(int c)
Locale-independent conversion of ASCII isgraph.
Definition: avstring.h:214
int
Y , 8bpp.
Definition: pixfmt.h:74
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:33
common internal and external API header
void(* end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:334
static int query_formats(AVFilterContext *ctx)
Definition: vf_libopencv.c:67
#define av_free(p)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
enum AVPixelFormat pixfmt
Definition: kmsgrab.c:202
static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:94
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt)
Definition: vf_libopencv.c:61
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static const OCVFilterEntry ocv_filter_entries[]
Definition: vf_libopencv.c:337
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
void(* uninit)(AVFilterContext *ctx)
Definition: vf_libopencv.c:83
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
const char * name
Definition: opengl_enc.c:102