FFmpeg
vf_tonemap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Vittorio Giovara <vittorio.giovara@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * tonemap algorithms
24  */
25 
26 #include <float.h>
27 #include <stdio.h>
28 
29 #include "libavutil/csp.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/intreadwrite.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 
36 #include "avfilter.h"
37 #include "colorspace.h"
38 #include "internal.h"
39 #include "video.h"
40 
50 };
51 
52 typedef struct TonemapContext {
53  const AVClass *class;
54 
56  double param;
57  double desat;
58  double peak;
59 
62 
64 {
65  TonemapContext *s = ctx->priv;
66 
67  switch(s->tonemap) {
68  case TONEMAP_GAMMA:
69  if (isnan(s->param))
70  s->param = 1.8f;
71  break;
72  case TONEMAP_REINHARD:
73  if (!isnan(s->param))
74  s->param = (1.0f - s->param) / s->param;
75  break;
76  case TONEMAP_MOBIUS:
77  if (isnan(s->param))
78  s->param = 0.3f;
79  break;
80  }
81 
82  if (isnan(s->param))
83  s->param = 1.0f;
84 
85  return 0;
86 }
87 
88 static float hable(float in)
89 {
90  float a = 0.15f, b = 0.50f, c = 0.10f, d = 0.20f, e = 0.02f, f = 0.30f;
91  return (in * (in * a + b * c) + d * e) / (in * (in * a + b) + d * f) - e / f;
92 }
93 
94 static float mobius(float in, float j, double peak)
95 {
96  float a, b;
97 
98  if (in <= j)
99  return in;
100 
101  a = -j * j * (peak - 1.0f) / (j * j - 2.0f * j + peak);
102  b = (j * j - 2.0f * j * peak + peak) / FFMAX(peak - 1.0f, 1e-6);
103 
104  return (b * b + 2.0f * b * j + j * j) / (b - a) * (in + a) / (in + b);
105 }
106 
107 #define MIX(x,y,a) (x) * (1 - (a)) + (y) * (a)
108 static void tonemap(TonemapContext *s, AVFrame *out, const AVFrame *in,
109  const AVPixFmtDescriptor *desc, int x, int y, double peak)
110 {
111  int map[3] = { desc->comp[0].plane, desc->comp[1].plane, desc->comp[2].plane };
112  const float *r_in = (const float *)(in->data[map[0]] + x * desc->comp[map[0]].step + y * in->linesize[map[0]]);
113  const float *g_in = (const float *)(in->data[map[1]] + x * desc->comp[map[1]].step + y * in->linesize[map[1]]);
114  const float *b_in = (const float *)(in->data[map[2]] + x * desc->comp[map[2]].step + y * in->linesize[map[2]]);
115  float *r_out = (float *)(out->data[map[0]] + x * desc->comp[map[0]].step + y * out->linesize[map[0]]);
116  float *g_out = (float *)(out->data[map[1]] + x * desc->comp[map[1]].step + y * out->linesize[map[1]]);
117  float *b_out = (float *)(out->data[map[2]] + x * desc->comp[map[2]].step + y * out->linesize[map[2]]);
118  float sig, sig_orig;
119 
120  /* load values */
121  *r_out = *r_in;
122  *g_out = *g_in;
123  *b_out = *b_in;
124 
125  /* desaturate to prevent unnatural colors */
126  if (s->desat > 0) {
127  float luma = av_q2d(s->coeffs->cr) * *r_in + av_q2d(s->coeffs->cg) * *g_in + av_q2d(s->coeffs->cb) * *b_in;
128  float overbright = FFMAX(luma - s->desat, 1e-6) / FFMAX(luma, 1e-6);
129  *r_out = MIX(*r_in, luma, overbright);
130  *g_out = MIX(*g_in, luma, overbright);
131  *b_out = MIX(*b_in, luma, overbright);
132  }
133 
134  /* pick the brightest component, reducing the value range as necessary
135  * to keep the entire signal in range and preventing discoloration due to
136  * out-of-bounds clipping */
137  sig = FFMAX(FFMAX3(*r_out, *g_out, *b_out), 1e-6);
138  sig_orig = sig;
139 
140  switch(s->tonemap) {
141  default:
142  case TONEMAP_NONE:
143  // do nothing
144  break;
145  case TONEMAP_LINEAR:
146  sig = sig * s->param / peak;
147  break;
148  case TONEMAP_GAMMA:
149  sig = sig > 0.05f ? pow(sig / peak, 1.0f / s->param)
150  : sig * pow(0.05f / peak, 1.0f / s->param) / 0.05f;
151  break;
152  case TONEMAP_CLIP:
153  sig = av_clipf(sig * s->param, 0, 1.0f);
154  break;
155  case TONEMAP_HABLE:
156  sig = hable(sig) / hable(peak);
157  break;
158  case TONEMAP_REINHARD:
159  sig = sig / (sig + s->param) * (peak + s->param) / peak;
160  break;
161  case TONEMAP_MOBIUS:
162  sig = mobius(sig, s->param, peak);
163  break;
164  }
165 
166  /* apply the computed scale factor to the color,
167  * linearly to prevent discoloration */
168  *r_out *= sig / sig_orig;
169  *g_out *= sig / sig_orig;
170  *b_out *= sig / sig_orig;
171 }
172 
173 typedef struct ThreadData {
174  AVFrame *in, *out;
176  double peak;
177 } ThreadData;
178 
179 static int tonemap_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
180 {
181  TonemapContext *s = ctx->priv;
182  ThreadData *td = arg;
183  AVFrame *in = td->in;
184  AVFrame *out = td->out;
185  const AVPixFmtDescriptor *desc = td->desc;
186  const int slice_start = (in->height * jobnr) / nb_jobs;
187  const int slice_end = (in->height * (jobnr+1)) / nb_jobs;
188  double peak = td->peak;
189 
190  for (int y = slice_start; y < slice_end; y++)
191  for (int x = 0; x < out->width; x++)
192  tonemap(s, out, in, desc, x, y, peak);
193 
194  return 0;
195 }
196 
198 {
199  AVFilterContext *ctx = link->dst;
200  TonemapContext *s = ctx->priv;
201  AVFilterLink *outlink = ctx->outputs[0];
202  ThreadData td;
203  AVFrame *out;
205  const AVPixFmtDescriptor *odesc = av_pix_fmt_desc_get(outlink->format);
206  int ret, x, y;
207  double peak = s->peak;
208 
209  if (!desc || !odesc) {
210  av_frame_free(&in);
211  return AVERROR_BUG;
212  }
213 
214  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
215  if (!out) {
216  av_frame_free(&in);
217  return AVERROR(ENOMEM);
218  }
219 
220  ret = av_frame_copy_props(out, in);
221  if (ret < 0) {
222  av_frame_free(&in);
223  av_frame_free(&out);
224  return ret;
225  }
226 
227  /* input and output transfer will be linear */
228  if (in->color_trc == AVCOL_TRC_UNSPECIFIED) {
229  av_log(s, AV_LOG_WARNING, "Untagged transfer, assuming linear light\n");
230  out->color_trc = AVCOL_TRC_LINEAR;
231  } else if (in->color_trc != AVCOL_TRC_LINEAR)
232  av_log(s, AV_LOG_WARNING, "Tonemapping works on linear light only\n");
233 
234  /* read peak from side data if not passed in */
235  if (!peak) {
236  peak = ff_determine_signal_peak(in);
237  av_log(s, AV_LOG_DEBUG, "Computed signal peak: %f\n", peak);
238  }
239 
240  /* load original color space even if pixel format is RGB to compute overbrights */
242  if (s->desat > 0 && (in->colorspace == AVCOL_SPC_UNSPECIFIED || !s->coeffs)) {
244  av_log(s, AV_LOG_WARNING, "Missing color space information, ");
245  else if (!s->coeffs)
246  av_log(s, AV_LOG_WARNING, "Unsupported color space '%s', ",
248  av_log(s, AV_LOG_WARNING, "desaturation is disabled\n");
249  s->desat = 0;
250  }
251 
252  /* do the tone map */
253  td.out = out;
254  td.in = in;
255  td.desc = desc;
256  td.peak = peak;
259 
260  /* copy/generate alpha if needed */
261  if (desc->flags & AV_PIX_FMT_FLAG_ALPHA && odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
262  av_image_copy_plane(out->data[3], out->linesize[3],
263  in->data[3], in->linesize[3],
264  out->linesize[3], outlink->h);
265  } else if (odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
266  for (y = 0; y < out->height; y++) {
267  for (x = 0; x < out->width; x++) {
268  AV_WN32(out->data[3] + x * odesc->comp[3].step + y * out->linesize[3],
269  av_float2int(1.0f));
270  }
271  }
272  }
273 
274  av_frame_free(&in);
275 
277 
278  return ff_filter_frame(outlink, out);
279 }
280 
281 #define OFFSET(x) offsetof(TonemapContext, x)
282 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
283 static const AVOption tonemap_options[] = {
284  { "tonemap", "tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_NONE}, TONEMAP_NONE, TONEMAP_MAX - 1, FLAGS, .unit = "tonemap" },
285  { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, .unit = "tonemap" },
286  { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, .unit = "tonemap" },
287  { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, .unit = "tonemap" },
288  { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, .unit = "tonemap" },
289  { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, .unit = "tonemap" },
290  { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, .unit = "tonemap" },
291  { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, .unit = "tonemap" },
292  { "param", "tonemap parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, {.dbl = NAN}, DBL_MIN, DBL_MAX, FLAGS },
293  { "desat", "desaturation strength", OFFSET(desat), AV_OPT_TYPE_DOUBLE, {.dbl = 2}, 0, DBL_MAX, FLAGS },
294  { "peak", "signal peak override", OFFSET(peak), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, DBL_MAX, FLAGS },
295  { NULL }
296 };
297 
299 
300 static const AVFilterPad tonemap_inputs[] = {
301  {
302  .name = "default",
303  .type = AVMEDIA_TYPE_VIDEO,
304  .filter_frame = filter_frame,
305  },
306 };
307 
309  .name = "tonemap",
310  .description = NULL_IF_CONFIG_SMALL("Conversion to/from different dynamic ranges."),
311  .init = init,
312  .priv_size = sizeof(TonemapContext),
313  .priv_class = &tonemap_class,
318 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:657
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
td
#define td
Definition: regdef.h:70
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:55
TONEMAP_GAMMA
@ TONEMAP_GAMMA
Definition: vf_tonemap.c:44
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:589
hable
static float hable(float in)
Definition: vf_tonemap.c:88
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:664
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
pixdesc.h
av_csp_luma_coeffs_from_avcsp
const struct AVLumaCoefficients * av_csp_luma_coeffs_from_avcsp(enum AVColorSpace csp)
Retrieves the Luma coefficients necessary to construct a conversion matrix from an enum constant desc...
Definition: csp.c:58
AVOption
AVOption.
Definition: opt.h:357
b
#define b
Definition: input.c:41
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:583
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
tonemap_options
static const AVOption tonemap_options[]
Definition: vf_tonemap.c:283
ff_determine_signal_peak
double ff_determine_signal_peak(AVFrame *in)
Definition: colorspace.c:153
ThreadData::desc
const AVPixFmtDescriptor * desc
Definition: vf_tonemap.c:175
float.h
FLAGS
#define FLAGS
Definition: vf_tonemap.c:282
AVLumaCoefficients
Struct containing luma coefficients to be used for RGB to YUV/YCoCg, or similar calculations.
Definition: csp.h:48
ThreadData::peak
double peak
Definition: vf_tonemap.c:176
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
tonemap_inputs
static const AVFilterPad tonemap_inputs[]
Definition: vf_tonemap.c:300
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
av_float2int
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
Definition: intfloat.h:50
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:527
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:374
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3341
colorspace.h
tonemap
static void tonemap(TonemapContext *s, AVFrame *out, const AVFrame *in, const AVPixFmtDescriptor *desc, int x, int y, double peak)
Definition: vf_tonemap.c:108
TonemapContext::desat
double desat
Definition: vf_tonemap.c:57
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
av_cold
#define av_cold
Definition: attributes.h:90
ff_video_default_filterpad
const AVFilterPad ff_video_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_VIDEO.
Definition: video.c:37
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:247
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1730
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
tonemap_slice
static int tonemap_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_tonemap.c:179
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:147
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TonemapContext::peak
double peak
Definition: vf_tonemap.c:58
NAN
#define NAN
Definition: mathematics.h:115
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_tonemap.c:63
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
MIX
#define MIX(x, y, a)
Definition: vf_tonemap.c:107
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
isnan
#define isnan(x)
Definition: libm.h:340
av_clipf
av_clipf
Definition: af_crystalizer.c:121
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
TonemapAlgorithm
TonemapAlgorithm
Definition: vf_tonemap.c:41
f
f
Definition: af_crystalizer.c:121
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:374
FILTER_PIXFMTS
#define FILTER_PIXFMTS(...)
Definition: internal.h:168
AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:508
ff_update_hdr_metadata
void ff_update_hdr_metadata(AVFrame *in, double peak)
Definition: colorspace.c:178
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:461
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
csp.h
internal.h
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(tonemap)
internal.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:827
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
TONEMAP_CLIP
@ TONEMAP_CLIP
Definition: vf_tonemap.c:45
AVFilter
Filter definition.
Definition: avfilter.h:166
TonemapContext
Definition: vf_tonemap.c:52
ret
ret
Definition: filter_design.txt:187
mobius
static float mobius(float in, float j, double peak)
Definition: vf_tonemap.c:94
ff_vf_tonemap
const AVFilter ff_vf_tonemap
Definition: vf_tonemap.c:308
TONEMAP_HABLE
@ TONEMAP_HABLE
Definition: vf_tonemap.c:47
TonemapContext::param
double param
Definition: vf_tonemap.c:56
AVFrame::height
int height
Definition: frame.h:446
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
avfilter.h
AV_PIX_FMT_GBRAPF32
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:509
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:699
TONEMAP_MAX
@ TONEMAP_MAX
Definition: vf_tonemap.c:49
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
OFFSET
#define OFFSET(x)
Definition: vf_tonemap.c:281
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
TONEMAP_NONE
@ TONEMAP_NONE
Definition: vf_tonemap.c:42
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
TonemapContext::tonemap
enum TonemapAlgorithm tonemap
Definition: vf_tonemap.c:55
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
d
d
Definition: ffmpeg_filter.c:424
imgutils.h
TonemapContext::coeffs
const AVLumaCoefficients * coeffs
Definition: vf_tonemap.c:60
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_tonemap.c:197
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
TONEMAP_MOBIUS
@ TONEMAP_MOBIUS
Definition: vf_tonemap.c:48
TONEMAP_LINEAR
@ TONEMAP_LINEAR
Definition: vf_tonemap.c:43
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
TONEMAP_REINHARD
@ TONEMAP_REINHARD
Definition: vf_tonemap.c:46