FFmpeg
vf_convolution_opencl.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Danil Iashchenko
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/common.h"
22 #include "libavutil/imgutils.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 #include "libavutil/avstring.h"
27 
28 
29 #include "avfilter.h"
30 #include "internal.h"
31 #include "opencl.h"
32 #include "opencl_source.h"
33 #include "video.h"
34 
35 typedef struct ConvolutionOpenCLContext {
37 
39  cl_kernel kernel;
40  cl_command_queue command_queue;
41 
42  char *matrix_str[4];
43 
44  cl_mem matrix[4];
45  cl_int matrix_sizes[4];
46  cl_int dims[4];
47  cl_float rdivs[4];
48  cl_float biases[4];
49 
50  cl_int planes;
51  cl_float scale;
52  cl_float delta;
53 
55 
57 {
59  const char *kernel_name;
60  cl_int cle;
61  int err;
62 
64  if (err < 0)
65  goto fail;
66 
67  ctx->command_queue = clCreateCommandQueue(ctx->ocf.hwctx->context,
68  ctx->ocf.hwctx->device_id,
69  0, &cle);
70  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create OpenCL "
71  "command queue %d.\n", cle);
72 
73  if (!strcmp(avctx->filter->name, "convolution_opencl")) {
74  kernel_name = "convolution_global";
75  } else if (!strcmp(avctx->filter->name, "sobel_opencl")) {
76  kernel_name = "sobel_global";
77  } else if (!strcmp(avctx->filter->name, "prewitt_opencl")){
78  kernel_name = "prewitt_global";
79  } else if (!strcmp(avctx->filter->name, "roberts_opencl")){
80  kernel_name = "roberts_global";
81  }
82  ctx->kernel = clCreateKernel(ctx->ocf.program, kernel_name, &cle);
83  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create "
84  "kernel %d.\n", cle);
85 
86  ctx->initialised = 1;
87  return 0;
88 
89 fail:
90  if (ctx->command_queue)
91  clReleaseCommandQueue(ctx->command_queue);
92  if (ctx->kernel)
93  clReleaseKernel(ctx->kernel);
94  return err;
95 }
96 
97 
98 
100 {
102  float *matrix = NULL;
103  size_t matrix_bytes;
104  cl_mem buffer;
105  cl_int cle;
106  int i, j;
107  int sscanf_err;
108  char *p, *arg, *saveptr = NULL;
109  float input_matrix[4][49];
110 
111  for (i = 0; i < 4; i++) {
112  ctx->biases[i] = ctx->biases[i] / 255.0;
113  }
114 
115  for (i = 0; i < 4; i++) {
116  p = ctx->matrix_str[i];
117  while (ctx->matrix_sizes[i] < 49) {
118  arg = av_strtok(p, " ", &saveptr);
119  if (!arg) {
120  break;
121  }
122  p = NULL;
123  sscanf_err = sscanf(arg, "%f", &input_matrix[i][ctx->matrix_sizes[i]]);
124  if (sscanf_err != 1) {
125  av_log(ctx, AV_LOG_ERROR, "Matrix is sequence of 9, 25 or 49 signed numbers\n");
126  return AVERROR(EINVAL);
127  }
128  ctx->matrix_sizes[i]++;
129  }
130  if (ctx->matrix_sizes[i] == 9) {
131  ctx->dims[i] = 3;
132  } else if (ctx->matrix_sizes[i] == 25) {
133  ctx->dims[i] = 5;
134  } else if (ctx->matrix_sizes[i] == 49) {
135  ctx->dims[i] = 7;
136  } else {
137  av_log(ctx, AV_LOG_ERROR, "Invalid matrix size:%d\n", ctx->matrix_sizes[i]);
138  return AVERROR(EINVAL);
139  }
140 
141  }
142 
143  for (j = 0; j < 4; j++) {
144  matrix_bytes = sizeof(float)*ctx->matrix_sizes[j];
145  matrix = av_malloc(matrix_bytes);
146  if (!matrix) {
147  av_freep(&matrix);
148  return AVERROR(ENOMEM);
149  }
150 
151  for (i = 0; i < ctx->matrix_sizes[j]; i++)
152  matrix[i] = input_matrix[j][i];
153 
154  buffer = clCreateBuffer(ctx->ocf.hwctx->context,
155  CL_MEM_READ_ONLY |
156  CL_MEM_COPY_HOST_PTR |
157  CL_MEM_HOST_NO_ACCESS,
158  matrix_bytes, matrix, &cle);
159  if (!buffer) {
160  av_log(avctx, AV_LOG_ERROR, "Failed to create matrix buffer: "
161  "%d.\n", cle);
162  av_freep(&matrix);
163  return AVERROR(EIO);
164  }
165  ctx->matrix[j] = buffer;
166  av_freep(&matrix);
167  }
168 
169  return 0;
170 }
171 
173 {
174  AVFilterContext *avctx = inlink->dst;
175  AVFilterLink *outlink = avctx->outputs[0];
177  AVFrame *output = NULL;
178  cl_int cle;
179  size_t global_work[2];
180  cl_mem src, dst;
181  int err, p;
182  size_t origin[3] = {0, 0, 0};
183  size_t region[3] = {0, 0, 1};
184 
185  av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
186  av_get_pix_fmt_name(input->format),
187  input->width, input->height, input->pts);
188 
189  if (!input->hw_frames_ctx)
190  return AVERROR(EINVAL);
191 
192  if (!ctx->initialised) {
193  err = convolution_opencl_init(avctx);
194  if (err < 0)
195  goto fail;
196 
197  if (!strcmp(avctx->filter->name, "convolution_opencl")) {
199  if (err < 0)
200  goto fail;
201  } else {
202  ctx->delta /= 255.0;
203  }
204 
205  }
206 
207  output = ff_get_video_buffer(outlink, outlink->w, outlink->h);
208  if (!output) {
209  err = AVERROR(ENOMEM);
210  goto fail;
211  }
212 
213  for (p = 0; p < FF_ARRAY_ELEMS(output->data); p++) {
214  src = (cl_mem) input->data[p];
215  dst = (cl_mem)output->data[p];
216 
217  if (!dst)
218  break;
219 
220  if (!strcmp(avctx->filter->name, "convolution_opencl")) {
221  CL_SET_KERNEL_ARG(ctx->kernel, 0, cl_mem, &dst);
222  CL_SET_KERNEL_ARG(ctx->kernel, 1, cl_mem, &src);
223  CL_SET_KERNEL_ARG(ctx->kernel, 2, cl_int, &ctx->dims[p]);
224  CL_SET_KERNEL_ARG(ctx->kernel, 3, cl_mem, &ctx->matrix[p]);
225  CL_SET_KERNEL_ARG(ctx->kernel, 4, cl_float, &ctx->rdivs[p]);
226  CL_SET_KERNEL_ARG(ctx->kernel, 5, cl_float, &ctx->biases[p]);
227 
228  err = ff_opencl_filter_work_size_from_image(avctx, global_work, output, p, 0);
229  if (err < 0)
230  goto fail;
231 
232  av_log(avctx, AV_LOG_DEBUG, "Run kernel on plane %d "
233  "(%"SIZE_SPECIFIER"x%"SIZE_SPECIFIER").\n",
234  p, global_work[0], global_work[1]);
235 
236  cle = clEnqueueNDRangeKernel(ctx->command_queue, ctx->kernel, 2, NULL,
237  global_work, NULL,
238  0, NULL, NULL);
239  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to enqueue "
240  "kernel: %d.\n", cle);
241  } else {
242  if (!(ctx->planes & (1 << p))) {
243  err = ff_opencl_filter_work_size_from_image(avctx, region, output, p, 0);
244  if (err < 0)
245  goto fail;
246 
247  cle = clEnqueueCopyImage(ctx->command_queue, src, dst,
248  origin, origin, region, 0, NULL, NULL);
249  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to copy plane %d: %d.\n",
250  p, cle);
251  } else {
252  CL_SET_KERNEL_ARG(ctx->kernel, 0, cl_mem, &dst);
253  CL_SET_KERNEL_ARG(ctx->kernel, 1, cl_mem, &src);
254  CL_SET_KERNEL_ARG(ctx->kernel, 2, cl_float, &ctx->scale);
255  CL_SET_KERNEL_ARG(ctx->kernel, 3, cl_float, &ctx->delta);
256 
257  err = ff_opencl_filter_work_size_from_image(avctx, global_work, output, p, 0);
258  if (err < 0)
259  goto fail;
260 
261  av_log(avctx, AV_LOG_DEBUG, "Run kernel on plane %d "
262  "(%"SIZE_SPECIFIER"x%"SIZE_SPECIFIER").\n",
263  p, global_work[0], global_work[1]);
264 
265  cle = clEnqueueNDRangeKernel(ctx->command_queue, ctx->kernel, 2, NULL,
266  global_work, NULL,
267  0, NULL, NULL);
268  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to enqueue "
269  "kernel: %d.\n", cle);
270  }
271  }
272  }
273 
274  cle = clFinish(ctx->command_queue);
275  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to finish command queue: %d.\n", cle);
276 
277  err = av_frame_copy_props(output, input);
278  if (err < 0)
279  goto fail;
280 
281  av_frame_free(&input);
282 
283  av_log(ctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n",
284  av_get_pix_fmt_name(output->format),
285  output->width, output->height, output->pts);
286 
287  return ff_filter_frame(outlink, output);
288 
289 fail:
290  clFinish(ctx->command_queue);
291  av_frame_free(&input);
292  av_frame_free(&output);
293  return err;
294 }
295 
297 {
299  cl_int cle;
300  int i;
301 
302  for (i = 0; i < 4; i++) {
303  clReleaseMemObject(ctx->matrix[i]);
304  }
305 
306  if (ctx->kernel) {
307  cle = clReleaseKernel(ctx->kernel);
308  if (cle != CL_SUCCESS)
309  av_log(avctx, AV_LOG_ERROR, "Failed to release "
310  "kernel: %d.\n", cle);
311  }
312 
313  if (ctx->command_queue) {
314  cle = clReleaseCommandQueue(ctx->command_queue);
315  if (cle != CL_SUCCESS)
316  av_log(avctx, AV_LOG_ERROR, "Failed to release "
317  "command queue: %d.\n", cle);
318  }
319 
321 }
322 
324  {
325  .name = "default",
326  .type = AVMEDIA_TYPE_VIDEO,
327  .filter_frame = &convolution_opencl_filter_frame,
328  .config_props = &ff_opencl_filter_config_input,
329  },
330  { NULL }
331 };
332 
334  {
335  .name = "default",
336  .type = AVMEDIA_TYPE_VIDEO,
337  .config_props = &ff_opencl_filter_config_output,
338  },
339  { NULL }
340 };
341 
342 #define OFFSET(x) offsetof(ConvolutionOpenCLContext, x)
343 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
344 
345 #if CONFIG_CONVOLUTION_OPENCL_FILTER
346 
347 static const AVOption convolution_opencl_options[] = {
348  { "0m", "set matrix for 2nd plane", OFFSET(matrix_str[0]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
349  { "1m", "set matrix for 2nd plane", OFFSET(matrix_str[1]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
350  { "2m", "set matrix for 3rd plane", OFFSET(matrix_str[2]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
351  { "3m", "set matrix for 4th plane", OFFSET(matrix_str[3]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
352  { "0rdiv", "set rdiv for 1nd plane", OFFSET(rdivs[0]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
353  { "1rdiv", "set rdiv for 2nd plane", OFFSET(rdivs[1]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
354  { "2rdiv", "set rdiv for 3rd plane", OFFSET(rdivs[2]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
355  { "3rdiv", "set rdiv for 4th plane", OFFSET(rdivs[3]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
356  { "0bias", "set bias for 1st plane", OFFSET(biases[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
357  { "1bias", "set bias for 2nd plane", OFFSET(biases[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
358  { "2bias", "set bias for 3rd plane", OFFSET(biases[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
359  { "3bias", "set bias for 4th plane", OFFSET(biases[3]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
360  { NULL }
361 };
362 
363 AVFILTER_DEFINE_CLASS(convolution_opencl);
364 
366  .name = "convolution_opencl",
367  .description = NULL_IF_CONFIG_SMALL("Apply convolution mask to input video"),
368  .priv_size = sizeof(ConvolutionOpenCLContext),
369  .priv_class = &convolution_opencl_class,
373  .inputs = convolution_opencl_inputs,
374  .outputs = convolution_opencl_outputs,
375  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
376 };
377 
378 #endif /* CONFIG_CONVOLUTION_OPENCL_FILTER */
379 
380 #if CONFIG_SOBEL_OPENCL_FILTER
381 
382 static const AVOption sobel_opencl_options[] = {
383  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
384  { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
385  { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
386  { NULL }
387 };
388 
389 AVFILTER_DEFINE_CLASS(sobel_opencl);
390 
392  .name = "sobel_opencl",
393  .description = NULL_IF_CONFIG_SMALL("Apply sobel operator"),
394  .priv_size = sizeof(ConvolutionOpenCLContext),
395  .priv_class = &sobel_opencl_class,
399  .inputs = convolution_opencl_inputs,
400  .outputs = convolution_opencl_outputs,
401  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
402 };
403 
404 #endif /* CONFIG_SOBEL_OPENCL_FILTER */
405 
406 #if CONFIG_PREWITT_OPENCL_FILTER
407 
408 static const AVOption prewitt_opencl_options[] = {
409  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
410  { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
411  { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
412  { NULL }
413 };
414 
415 AVFILTER_DEFINE_CLASS(prewitt_opencl);
416 
418  .name = "prewitt_opencl",
419  .description = NULL_IF_CONFIG_SMALL("Apply prewitt operator"),
420  .priv_size = sizeof(ConvolutionOpenCLContext),
421  .priv_class = &prewitt_opencl_class,
425  .inputs = convolution_opencl_inputs,
426  .outputs = convolution_opencl_outputs,
427  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
428 };
429 
430 #endif /* CONFIG_PREWITT_OPENCL_FILTER */
431 
432 #if CONFIG_ROBERTS_OPENCL_FILTER
433 
434 static const AVOption roberts_opencl_options[] = {
435  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
436  { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
437  { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
438  { NULL }
439 };
440 
441 AVFILTER_DEFINE_CLASS(roberts_opencl);
442 
444  .name = "roberts_opencl",
445  .description = NULL_IF_CONFIG_SMALL("Apply roberts operator"),
446  .priv_size = sizeof(ConvolutionOpenCLContext),
447  .priv_class = &roberts_opencl_class,
451  .inputs = convolution_opencl_inputs,
452  .outputs = convolution_opencl_outputs,
453  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
454 };
455 
456 #endif /* CONFIG_ROBERTS_OPENCL_FILTER */
#define NULL
Definition: coverity.c:32
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:385
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
int ff_opencl_filter_work_size_from_image(AVFilterContext *avctx, size_t *work_size, AVFrame *frame, int plane, int block_alignment)
Find the work size needed needed for a given plane of an image.
Definition: opencl.c:278
#define FLAGS
AVFilter ff_vf_roberts_opencl
int ff_opencl_filter_config_input(AVFilterLink *inlink)
Check that the input link contains a suitable hardware frames context and extract the device from it...
Definition: opencl.c:60
int ff_opencl_filter_query_formats(AVFilterContext *avctx)
Return that all inputs and outputs support only AV_PIX_FMT_OPENCL.
Definition: opencl.c:28
misc image utilities
Main libavfilter public API header.
Memory handling functions.
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
int ff_opencl_filter_init(AVFilterContext *avctx)
Initialise an OpenCL filter context.
Definition: opencl.c:147
static int convolution_opencl_filter_frame(AVFilterLink *inlink, AVFrame *input)
AVOpenCLDeviceContext * hwctx
Definition: opencl.h:41
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:634
AVFilter ff_vf_prewitt_opencl
const char * name
Pad name.
Definition: internal.h:60
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
cl_device_id device_id
The primary device ID of the device.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
int ff_opencl_filter_config_output(AVFilterLink *outlink)
Create a suitable hardware frames context for the output.
Definition: opencl.c:96
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
const char * arg
Definition: jacosubdec.c:66
#define OFFSET(x)
#define fail()
Definition: checkasm.h:121
static int convolution_opencl_make_filter_params(AVFilterContext *avctx)
AVFormatContext * ctx
Definition: movenc.c:48
static av_cold void convolution_opencl_uninit(AVFilterContext *avctx)
static const AVFilterPad convolution_opencl_outputs[]
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
#define CL_SET_KERNEL_ARG(kernel, arg_num, type, arg)
set argument to specific Kernel.
Definition: opencl.h:61
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
AVFilter ff_vf_sobel_opencl
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define CL_FAIL_ON_ERROR(errcode,...)
A helper macro to handle OpenCL errors.
Definition: opencl.h:74
#define SIZE_SPECIFIER
Definition: internal.h:262
AVFilter ff_vf_convolution_opencl
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
common internal and external API header
void ff_opencl_filter_uninit(AVFilterContext *avctx)
Uninitialise an OpenCL filter context.
Definition: opencl.c:156
static int convolution_opencl_init(AVFilterContext *avctx)
static const AVFilterPad convolution_opencl_inputs[]
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
cl_context context
The OpenCL context which will contain all operations and frames on this device.
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:353
#define av_freep(p)
cl_program program
Definition: opencl.h:43
const char * ff_opencl_source_convolution
int ff_opencl_filter_load_program(AVFilterContext *avctx, const char **program_source_array, int nb_strings)
Load a new OpenCL program from strings in memory.
Definition: opencl.c:171
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
GLuint buffer
Definition: opengl_enc.c:101