FFmpeg
vf_tonemap_opencl.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 #include <float.h>
19 
20 #include "libavutil/avassert.h"
21 #include "libavutil/common.h"
22 #include "libavutil/imgutils.h"
23 #include "libavutil/mem.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/pixdesc.h"
26 
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "opencl.h"
30 #include "opencl_source.h"
31 #include "video.h"
32 #include "colorspace.h"
33 
34 // TODO:
35 // - separate peak-detection from tone-mapping kernel to solve
36 // one-frame-delay issue.
37 // - more format support
38 
39 #define DETECTION_FRAMES 63
40 
50 };
51 
52 typedef struct TonemapOpenCLContext {
54 
55  enum AVColorSpace colorspace, colorspace_in, colorspace_out;
57  enum AVColorPrimaries primaries, primaries_in, primaries_out;
58  enum AVColorRange range, range_in, range_out;
60 
63  double peak;
64  double param;
65  double desat_param;
66  double target_peak;
69  cl_kernel kernel;
70  cl_command_queue command_queue;
71  cl_mem util_mem;
73 
74 static const char *linearize_funcs[AVCOL_TRC_NB] = {
75  [AVCOL_TRC_SMPTE2084] = "eotf_st2084",
76  [AVCOL_TRC_ARIB_STD_B67] = "inverse_oetf_hlg",
77 };
78 
79 static const char *delinearize_funcs[AVCOL_TRC_NB] = {
80  [AVCOL_TRC_BT709] = "inverse_eotf_bt1886",
81  [AVCOL_TRC_BT2020_10] = "inverse_eotf_bt1886",
82 };
83 
85  [AVCOL_PRI_BT709] = { 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 },
86  [AVCOL_PRI_BT2020] = { 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 },
87 };
88 
90  [AVCOL_PRI_BT709] = { 0.3127, 0.3290 },
91  [AVCOL_PRI_BT2020] = { 0.3127, 0.3290 },
92 };
93 
94 static const char *tonemap_func[TONEMAP_MAX] = {
95  [TONEMAP_NONE] = "direct",
96  [TONEMAP_LINEAR] = "linear",
97  [TONEMAP_GAMMA] = "gamma",
98  [TONEMAP_CLIP] = "clip",
99  [TONEMAP_REINHARD] = "reinhard",
100  [TONEMAP_HABLE] = "hable",
101  [TONEMAP_MOBIUS] = "mobius",
102 };
103 
105  double rgb2rgb[3][3]) {
106  double rgb2xyz[3][3], xyz2rgb[3][3];
107 
108  ff_fill_rgb2xyz_table(&primaries_table[out], &whitepoint_table[out], rgb2xyz);
109  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
110  ff_fill_rgb2xyz_table(&primaries_table[in], &whitepoint_table[in], rgb2xyz);
111  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
112 }
113 
114 #define OPENCL_SOURCE_NB 3
115 // Average light level for SDR signals. This is equal to a signal level of 0.5
116 // under a typical presentation gamma of about 2.0.
117 static const float sdr_avg = 0.25f;
118 
120 {
121  TonemapOpenCLContext *ctx = avctx->priv;
122  int rgb2rgb_passthrough = 1;
123  double rgb2rgb[3][3], rgb2yuv[3][3], yuv2rgb[3][3];
124  const struct LumaCoefficients *luma_src, *luma_dst;
125  cl_int cle;
126  int err;
127  AVBPrint header;
128  const char *opencl_sources[OPENCL_SOURCE_NB];
129 
131 
132  switch(ctx->tonemap) {
133  case TONEMAP_GAMMA:
134  if (isnan(ctx->param))
135  ctx->param = 1.8f;
136  break;
137  case TONEMAP_REINHARD:
138  if (!isnan(ctx->param))
139  ctx->param = (1.0f - ctx->param) / ctx->param;
140  break;
141  case TONEMAP_MOBIUS:
142  if (isnan(ctx->param))
143  ctx->param = 0.3f;
144  break;
145  }
146 
147  if (isnan(ctx->param))
148  ctx->param = 1.0f;
149 
150  // SDR peak is 1.0f
151  ctx->target_peak = 1.0f;
152  av_log(ctx, AV_LOG_DEBUG, "tone mapping transfer from %s to %s\n",
153  av_color_transfer_name(ctx->trc_in),
155  av_log(ctx, AV_LOG_DEBUG, "mapping colorspace from %s to %s\n",
156  av_color_space_name(ctx->colorspace_in),
158  av_log(ctx, AV_LOG_DEBUG, "mapping primaries from %s to %s\n",
159  av_color_primaries_name(ctx->primaries_in),
161  av_log(ctx, AV_LOG_DEBUG, "mapping range from %s to %s\n",
162  av_color_range_name(ctx->range_in),
164  // checking valid value just because of limited implementaion
165  // please remove when more functionalities are implemented
167  ctx->trc_out == AVCOL_TRC_BT2020_10);
168  av_assert0(ctx->trc_in == AVCOL_TRC_SMPTE2084||
169  ctx->trc_in == AVCOL_TRC_ARIB_STD_B67);
170  av_assert0(ctx->colorspace_in == AVCOL_SPC_BT2020_NCL ||
171  ctx->colorspace_in == AVCOL_SPC_BT709);
172  av_assert0(ctx->primaries_in == AVCOL_PRI_BT2020 ||
173  ctx->primaries_in == AVCOL_PRI_BT709);
174 
175  av_bprintf(&header, "__constant const float tone_param = %.4ff;\n",
176  ctx->param);
177  av_bprintf(&header, "__constant const float desat_param = %.4ff;\n",
178  ctx->desat_param);
179  av_bprintf(&header, "__constant const float target_peak = %.4ff;\n",
180  ctx->target_peak);
181  av_bprintf(&header, "__constant const float sdr_avg = %.4ff;\n", sdr_avg);
182  av_bprintf(&header, "__constant const float scene_threshold = %.4ff;\n",
183  ctx->scene_threshold);
184  av_bprintf(&header, "#define TONE_FUNC %s\n", tonemap_func[ctx->tonemap]);
185  av_bprintf(&header, "#define DETECTION_FRAMES %d\n", DETECTION_FRAMES);
186 
187  if (ctx->primaries_out != ctx->primaries_in) {
188  get_rgb2rgb_matrix(ctx->primaries_in, ctx->primaries_out, rgb2rgb);
189  rgb2rgb_passthrough = 0;
190  }
191  if (ctx->range_in == AVCOL_RANGE_JPEG)
192  av_bprintf(&header, "#define FULL_RANGE_IN\n");
193 
194  if (ctx->range_out == AVCOL_RANGE_JPEG)
195  av_bprintf(&header, "#define FULL_RANGE_OUT\n");
196 
197  av_bprintf(&header, "#define chroma_loc %d\n", (int)ctx->chroma_loc);
198 
199  if (rgb2rgb_passthrough)
200  av_bprintf(&header, "#define RGB2RGB_PASSTHROUGH\n");
201  else
202  ff_opencl_print_const_matrix_3x3(&header, "rgb2rgb", rgb2rgb);
203 
204 
205  luma_src = ff_get_luma_coefficients(ctx->colorspace_in);
206  if (!luma_src) {
207  err = AVERROR(EINVAL);
208  av_log(avctx, AV_LOG_ERROR, "unsupported input colorspace %d (%s)\n",
209  ctx->colorspace_in, av_color_space_name(ctx->colorspace_in));
210  goto fail;
211  }
212 
213  luma_dst = ff_get_luma_coefficients(ctx->colorspace_out);
214  if (!luma_dst) {
215  err = AVERROR(EINVAL);
216  av_log(avctx, AV_LOG_ERROR, "unsupported output colorspace %d (%s)\n",
218  goto fail;
219  }
220 
221  ff_fill_rgb2yuv_table(luma_dst, rgb2yuv);
222  ff_opencl_print_const_matrix_3x3(&header, "yuv_matrix", rgb2yuv);
223 
224  ff_fill_rgb2yuv_table(luma_src, rgb2yuv);
225  ff_matrix_invert_3x3(rgb2yuv, yuv2rgb);
226  ff_opencl_print_const_matrix_3x3(&header, "rgb_matrix", yuv2rgb);
227 
228  av_bprintf(&header, "constant float3 luma_src = {%.4ff, %.4ff, %.4ff};\n",
229  luma_src->cr, luma_src->cg, luma_src->cb);
230  av_bprintf(&header, "constant float3 luma_dst = {%.4ff, %.4ff, %.4ff};\n",
231  luma_dst->cr, luma_dst->cg, luma_dst->cb);
232 
233  av_bprintf(&header, "#define linearize %s\n", linearize_funcs[ctx->trc_in]);
234  av_bprintf(&header, "#define delinearize %s\n",
235  delinearize_funcs[ctx->trc_out]);
236 
237  if (ctx->trc_in == AVCOL_TRC_ARIB_STD_B67)
238  av_bprintf(&header, "#define ootf_impl ootf_hlg\n");
239 
240  if (ctx->trc_out == AVCOL_TRC_ARIB_STD_B67)
241  av_bprintf(&header, "#define inverse_ootf_impl inverse_ootf_hlg\n");
242 
243  av_log(avctx, AV_LOG_DEBUG, "Generated OpenCL header:\n%s\n", header.str);
244  opencl_sources[0] = header.str;
245  opencl_sources[1] = ff_opencl_source_tonemap;
246  opencl_sources[2] = ff_opencl_source_colorspace_common;
247  err = ff_opencl_filter_load_program(avctx, opencl_sources, OPENCL_SOURCE_NB);
248 
249  av_bprint_finalize(&header, NULL);
250  if (err < 0)
251  goto fail;
252 
253  ctx->command_queue = clCreateCommandQueue(ctx->ocf.hwctx->context,
254  ctx->ocf.hwctx->device_id,
255  0, &cle);
256  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create OpenCL "
257  "command queue %d.\n", cle);
258 
259  ctx->kernel = clCreateKernel(ctx->ocf.program, "tonemap", &cle);
260  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create kernel %d.\n", cle);
261 
262  ctx->util_mem =
263  clCreateBuffer(ctx->ocf.hwctx->context, 0,
264  (2 * DETECTION_FRAMES + 7) * sizeof(unsigned),
265  NULL, &cle);
266  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create util buffer: %d.\n", cle);
267 
268  ctx->initialised = 1;
269  return 0;
270 
271 fail:
272  av_bprint_finalize(&header, NULL);
273  if (ctx->util_mem)
274  clReleaseMemObject(ctx->util_mem);
275  if (ctx->command_queue)
276  clReleaseCommandQueue(ctx->command_queue);
277  if (ctx->kernel)
278  clReleaseKernel(ctx->kernel);
279  return err;
280 }
281 
283 {
284  AVFilterContext *avctx = outlink->src;
285  TonemapOpenCLContext *s = avctx->priv;
286  int ret;
287  if (s->format == AV_PIX_FMT_NONE)
288  av_log(avctx, AV_LOG_WARNING, "format not set, use default format NV12\n");
289  else {
290  if (s->format != AV_PIX_FMT_P010 &&
291  s->format != AV_PIX_FMT_NV12) {
292  av_log(avctx, AV_LOG_ERROR, "unsupported output format,"
293  "only p010/nv12 supported now\n");
294  return AVERROR(EINVAL);
295  }
296  }
297 
299  ret = ff_opencl_filter_config_output(outlink);
300  if (ret < 0)
301  return ret;
302 
303  return 0;
304 }
305 
306 static int launch_kernel(AVFilterContext *avctx, cl_kernel kernel,
307  AVFrame *output, AVFrame *input, float peak) {
308  TonemapOpenCLContext *ctx = avctx->priv;
309  int err = AVERROR(ENOSYS);
310  size_t global_work[2];
311  size_t local_work[2];
312  cl_int cle;
313 
314  CL_SET_KERNEL_ARG(kernel, 0, cl_mem, &output->data[0]);
315  CL_SET_KERNEL_ARG(kernel, 1, cl_mem, &input->data[0]);
316  CL_SET_KERNEL_ARG(kernel, 2, cl_mem, &output->data[1]);
317  CL_SET_KERNEL_ARG(kernel, 3, cl_mem, &input->data[1]);
318  CL_SET_KERNEL_ARG(kernel, 4, cl_mem, &ctx->util_mem);
319  CL_SET_KERNEL_ARG(kernel, 5, cl_float, &peak);
320 
321  local_work[0] = 16;
322  local_work[1] = 16;
323  // Note the work size based on uv plane, as we process a 2x2 quad in one workitem
324  err = ff_opencl_filter_work_size_from_image(avctx, global_work, output,
325  1, 16);
326  if (err < 0)
327  return err;
328 
329  cle = clEnqueueNDRangeKernel(ctx->command_queue, kernel, 2, NULL,
330  global_work, local_work,
331  0, NULL, NULL);
332  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to enqueue kernel: %d.\n", cle);
333  return 0;
334 fail:
335  return err;
336 }
337 
339 {
340  AVFilterContext *avctx = inlink->dst;
341  AVFilterLink *outlink = avctx->outputs[0];
342  TonemapOpenCLContext *ctx = avctx->priv;
343  AVFrame *output = NULL;
344  cl_int cle;
345  int err;
346  double peak = ctx->peak;
347 
348  AVHWFramesContext *input_frames_ctx =
350 
351  av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
352  av_get_pix_fmt_name(input->format),
353  input->width, input->height, input->pts);
354 
355  if (!input->hw_frames_ctx)
356  return AVERROR(EINVAL);
357 
358  output = ff_get_video_buffer(outlink, outlink->w, outlink->h);
359  if (!output) {
360  err = AVERROR(ENOMEM);
361  goto fail;
362  }
363 
364  err = av_frame_copy_props(output, input);
365  if (err < 0)
366  goto fail;
367 
368  if (!peak)
369  peak = ff_determine_signal_peak(input);
370 
371  if (ctx->trc != -1)
372  output->color_trc = ctx->trc;
373  if (ctx->primaries != -1)
374  output->color_primaries = ctx->primaries;
375  if (ctx->colorspace != -1)
376  output->colorspace = ctx->colorspace;
377  if (ctx->range != -1)
378  output->color_range = ctx->range;
379 
380  ctx->trc_in = input->color_trc;
381  ctx->trc_out = output->color_trc;
382  ctx->colorspace_in = input->colorspace;
383  ctx->colorspace_out = output->colorspace;
384  ctx->primaries_in = input->color_primaries;
385  ctx->primaries_out = output->color_primaries;
386  ctx->range_in = input->color_range;
387  ctx->range_out = output->color_range;
388  ctx->chroma_loc = output->chroma_location;
389 
390  if (!ctx->initialised) {
391  if (!(input->color_trc == AVCOL_TRC_SMPTE2084 ||
392  input->color_trc == AVCOL_TRC_ARIB_STD_B67)) {
393  av_log(ctx, AV_LOG_ERROR, "unsupported transfer function characteristic.\n");
394  err = AVERROR(ENOSYS);
395  goto fail;
396  }
397 
398  if (input_frames_ctx->sw_format != AV_PIX_FMT_P010) {
399  av_log(ctx, AV_LOG_ERROR, "unsupported format in tonemap_opencl.\n");
400  err = AVERROR(ENOSYS);
401  goto fail;
402  }
403 
404  err = tonemap_opencl_init(avctx);
405  if (err < 0)
406  goto fail;
407  }
408 
409  switch(input_frames_ctx->sw_format) {
410  case AV_PIX_FMT_P010:
411  err = launch_kernel(avctx, ctx->kernel, output, input, peak);
412  if (err < 0) goto fail;
413  break;
414  default:
415  err = AVERROR(ENOSYS);
416  goto fail;
417  }
418 
419  cle = clFinish(ctx->command_queue);
420  CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to finish command queue: %d.\n", cle);
421 
422  av_frame_free(&input);
423 
424  ff_update_hdr_metadata(output, ctx->target_peak);
425 
426  av_log(ctx, AV_LOG_DEBUG, "Tone-mapping output: %s, %ux%u (%"PRId64").\n",
427  av_get_pix_fmt_name(output->format),
428  output->width, output->height, output->pts);
429 #ifndef NDEBUG
430  {
431  uint32_t *ptr, *max_total_p, *avg_total_p, *frame_number_p;
432  float peak_detected, avg_detected;
433  unsigned map_size = (2 * DETECTION_FRAMES + 7) * sizeof(unsigned);
434  ptr = (void *)clEnqueueMapBuffer(ctx->command_queue, ctx->util_mem,
435  CL_TRUE, CL_MAP_READ, 0, map_size,
436  0, NULL, NULL, &cle);
437  // For the layout of the util buffer, refer tonemap.cl
438  if (ptr) {
439  max_total_p = ptr + 2 * (DETECTION_FRAMES + 1) + 1;
440  avg_total_p = max_total_p + 1;
441  frame_number_p = avg_total_p + 2;
442  peak_detected = (float)*max_total_p / (REFERENCE_WHITE * (*frame_number_p));
443  avg_detected = (float)*avg_total_p / (REFERENCE_WHITE * (*frame_number_p));
444  av_log(ctx, AV_LOG_DEBUG, "peak %f, avg %f will be used for next frame\n",
445  peak_detected, avg_detected);
446  clEnqueueUnmapMemObject(ctx->command_queue, ctx->util_mem, ptr, 0,
447  NULL, NULL);
448  }
449  }
450 #endif
451 
452  return ff_filter_frame(outlink, output);
453 
454 fail:
455  clFinish(ctx->command_queue);
456  av_frame_free(&input);
457  av_frame_free(&output);
458  return err;
459 }
460 
462 {
463  TonemapOpenCLContext *ctx = avctx->priv;
464  cl_int cle;
465 
466  if (ctx->util_mem)
467  clReleaseMemObject(ctx->util_mem);
468  if (ctx->kernel) {
469  cle = clReleaseKernel(ctx->kernel);
470  if (cle != CL_SUCCESS)
471  av_log(avctx, AV_LOG_ERROR, "Failed to release "
472  "kernel: %d.\n", cle);
473  }
474 
475  if (ctx->command_queue) {
476  cle = clReleaseCommandQueue(ctx->command_queue);
477  if (cle != CL_SUCCESS)
478  av_log(avctx, AV_LOG_ERROR, "Failed to release "
479  "command queue: %d.\n", cle);
480  }
481 
483 }
484 
485 #define OFFSET(x) offsetof(TonemapOpenCLContext, x)
486 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
488  { "tonemap", "tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_NONE}, TONEMAP_NONE, TONEMAP_MAX - 1, FLAGS, "tonemap" },
489  { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, "tonemap" },
490  { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, "tonemap" },
491  { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, "tonemap" },
492  { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, "tonemap" },
493  { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, "tonemap" },
494  { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, "tonemap" },
495  { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, "tonemap" },
496  { "transfer", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
497  { "t", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
498  { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709}, 0, 0, FLAGS, "transfer" },
499  { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, "transfer" },
500  { "matrix", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "matrix" },
501  { "m", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "matrix" },
502  { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709}, 0, 0, FLAGS, "matrix" },
503  { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, "matrix" },
504  { "primaries", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "primaries" },
505  { "p", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "primaries" },
506  { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709}, 0, 0, FLAGS, "primaries" },
507  { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020}, 0, 0, FLAGS, "primaries" },
508  { "range", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "range" },
509  { "r", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "range" },
510  { "tv", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
511  { "pc", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
512  { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
513  { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
514  { "format", "output pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, AV_PIX_FMT_NONE, INT_MAX, FLAGS, "fmt" },
515  { "peak", "signal peak override", OFFSET(peak), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, DBL_MAX, FLAGS },
516  { "param", "tonemap parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, {.dbl = NAN}, DBL_MIN, DBL_MAX, FLAGS },
517  { "desat", "desaturation parameter", OFFSET(desat_param), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0, DBL_MAX, FLAGS },
518  { "threshold", "scene detection threshold", OFFSET(scene_threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.2}, 0, DBL_MAX, FLAGS },
519  { NULL }
520 };
521 
522 AVFILTER_DEFINE_CLASS(tonemap_opencl);
523 
525  {
526  .name = "default",
527  .type = AVMEDIA_TYPE_VIDEO,
528  .filter_frame = &tonemap_opencl_filter_frame,
529  .config_props = &ff_opencl_filter_config_input,
530  },
531  { NULL }
532 };
533 
535  {
536  .name = "default",
537  .type = AVMEDIA_TYPE_VIDEO,
538  .config_props = &tonemap_opencl_config_output,
539  },
540  { NULL }
541 };
542 
544  .name = "tonemap_opencl",
545  .description = NULL_IF_CONFIG_SMALL("perform HDR to SDR conversion with tonemapping"),
546  .priv_size = sizeof(TonemapOpenCLContext),
547  .priv_class = &tonemap_opencl_class,
551  .inputs = tonemap_opencl_inputs,
552  .outputs = tonemap_opencl_outputs,
553  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
554 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
#define NULL
Definition: coverity.c:32
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:385
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
static int tonemap_opencl_config_output(AVFilterLink *outlink)
AVOption.
Definition: opt.h:246
int ff_opencl_filter_work_size_from_image(AVFilterContext *avctx, size_t *work_size, AVFrame *frame, int plane, int block_alignment)
Find the work size needed needed for a given plane of an image.
Definition: opencl.c:278
int ff_opencl_filter_config_input(AVFilterLink *inlink)
Check that the input link contains a suitable hardware frames context and extract the device from it...
Definition: opencl.c:60
static void fn() rgb2yuv(uint8_t *_yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t s, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
int ff_opencl_filter_query_formats(AVFilterContext *avctx)
Return that all inputs and outputs support only AV_PIX_FMT_OPENCL.
Definition: opencl.c:28
#define DETECTION_FRAMES
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
Memory handling functions.
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static void get_rgb2rgb_matrix(enum AVColorPrimaries in, enum AVColorPrimaries out, double rgb2rgb[3][3])
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:277
static const char * delinearize_funcs[AVCOL_TRC_NB]
enum AVChromaLocation chroma_loc
static int launch_kernel(AVFilterContext *avctx, cl_kernel kernel, AVFrame *output, AVFrame *input, float peak)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
enum AVColorSpace colorspace colorspace_in colorspace_out
int ff_opencl_filter_init(AVFilterContext *avctx)
Initialise an OpenCL filter context.
Definition: opencl.c:147
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:467
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
#define FLAGS
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2915
AVOpenCLDeviceContext * hwctx
Definition: opencl.h:41
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:605
const char * name
Pad name.
Definition: internal.h:60
#define AV_PIX_FMT_P010
Definition: pixfmt.h:436
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
#define av_cold
Definition: attributes.h:82
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:496
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2848
enum AVColorPrimaries primaries primaries_in primaries_out
cl_device_id device_id
The primary device ID of the device.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:361
AVFilter ff_vf_tonemap_opencl
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:519
const struct LumaCoefficients * ff_get_luma_coefficients(enum AVColorSpace csp)
Definition: colorspace.c:128
static const uint8_t header[24]
Definition: sdr2.c:67
int ff_opencl_filter_config_output(AVFilterLink *outlink)
Create a suitable hardware frames context for the output.
Definition: opencl.c:96
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:443
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
int width
Definition: frame.h:326
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
static const AVFilterPad tonemap_opencl_inputs[]
void * priv
private data for use by the filter
Definition: avfilter.h:353
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:512
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
Not part of ABI.
Definition: pixfmt.h:460
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:523
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:445
simple assert() macros that are a bit more flexible than ISO C assert().
#define fail()
Definition: checkasm.h:120
enum TonemapAlgorithm tonemap
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2867
enum AVPixelFormat output_format
Definition: opencl.h:45
static int tonemap_opencl_filter_frame(AVFilterLink *inlink, AVFrame *input)
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
Definition: pixfmt.h:484
#define NAN
Definition: mathematics.h:64
static const float sdr_avg
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:507
static const char * linearize_funcs[AVCOL_TRC_NB]
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
static int tonemap_opencl_init(AVFilterContext *avctx)
static const char * tonemap_func[TONEMAP_MAX]
static const AVFilterPad tonemap_opencl_outputs[]
enum AVColorTransferCharacteristic trc trc_in trc_out
static const struct WhitepointCoefficients whitepoint_table[AVCOL_PRI_NB]
static av_cold void tonemap_opencl_uninit(AVFilterContext *avctx)
#define OPENCL_SOURCE_NB
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
if(ret)
void ff_update_hdr_metadata(AVFrame *in, double peak)
Definition: colorspace.c:193
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:341
OpenCLFilterContext ocf
also ITU-R BT1361
Definition: pixfmt.h:469
void ff_fill_rgb2yuv_table(const struct LumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:141
#define AV_BPRINT_SIZE_AUTOMATIC
#define CL_SET_KERNEL_ARG(kernel, arg_num, type, arg)
set argument to specific Kernel.
Definition: opencl.h:56
static const struct PrimaryCoefficients primaries_table[AVCOL_PRI_NB]
uint8_t * data
The data buffer.
Definition: buffer.h:89
TonemapAlgorithm
Definition: vf_tonemap.c:42
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Filter definition.
Definition: avfilter.h:144
#define isnan(x)
Definition: libm.h:340
Not part of ABI.
Definition: pixfmt.h:489
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
AVFILTER_DEFINE_CLASS(tonemap_opencl)
const char * name
Filter name.
Definition: avfilter.h:148
const char * ff_opencl_source_colorspace_common
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
enum AVChromaLocation chroma_location
Definition: frame.h:525
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define CL_FAIL_ON_ERROR(errcode,...)
A helper macro to handle OpenCL errors.
Definition: opencl.h:69
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
common internal and external API header
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2891
void ff_opencl_filter_uninit(AVFilterContext *avctx)
Uninitialise an OpenCL filter context.
Definition: opencl.c:156
ARIB STD-B67, known as "Hybrid log-gamma".
Definition: pixfmt.h:488
#define OFFSET(x)
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
cl_context context
The OpenCL context which will contain all operations and frames on this device.
static const AVOption tonemap_opencl_options[]
enum AVColorPrimaries color_primaries
Definition: frame.h:514
void ff_opencl_print_const_matrix_3x3(AVBPrint *buf, const char *name_str, double mat[3][3])
Print a 3x3 matrix into a buffer as __constant array, which could be included in an OpenCL program...
Definition: opencl.c:341
An instance of a filter.
Definition: avfilter.h:338
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:482
ITU-R BT2020.
Definition: pixfmt.h:454
int height
Definition: frame.h:326
FILE * out
Definition: movenc.c:54
const char * ff_opencl_source_tonemap
AVChromaLocation
Location of chroma samples.
Definition: pixfmt.h:541
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:516
cl_program program
Definition: opencl.h:43
int ff_opencl_filter_load_program(AVFilterContext *avctx, const char **program_source_array, int nb_strings)
Load a new OpenCL program from strings in memory.
Definition: opencl.c:171
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:221
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
enum AVColorRange range range_in range_out
double ff_determine_signal_peak(AVFrame *in)
Definition: colorspace.c:168
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
void ff_fill_rgb2xyz_table(const struct PrimaryCoefficients *coeffs, const struct WhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:68
#define REFERENCE_WHITE
Definition: colorspace.h:26
cl_command_queue command_queue
enum AVPixelFormat format