34 #define OFFSET(x) offsetof(RemoveGrainContext, x) 35 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM 66 #define REMOVE_GRAIN_SORT_AXIS \ 67 const int ma1 = FFMAX(a1, a8); \ 68 const int mi1 = FFMIN(a1, a8); \ 69 const int ma2 = FFMAX(a2, a7); \ 70 const int mi2 = FFMIN(a2, a7); \ 71 const int ma3 = FFMAX(a3, a6); \ 72 const int mi3 = FFMIN(a3, a6); \ 73 const int ma4 = FFMAX(a4, a5); \ 74 const int mi4 = FFMIN(a4, a5); 84 static int cmp_int(
const void *p1,
const void *p2)
86 int left = *(
const int *)p1;
87 int right = *(
const int *)p2;
97 return av_clip(c, a[2 - 1 ], a[7 - 1]);
106 return av_clip(c, a[3 - 1 ], a[6 - 1]);
115 return av_clip(c, a[4 - 1 ], a[5 - 1]);
132 }
else if (mindiff == c2) {
134 }
else if (mindiff == c3) {
145 const int d1 = ma1 - mi1;
146 const int d2 = ma2 - mi2;
147 const int d3 = ma3 - mi3;
148 const int d4 = ma4 - mi4;
150 const int cli1 =
av_clip(c, mi1, ma1);
151 const int cli2 =
av_clip(c, mi2, ma2);
152 const int cli3 =
av_clip(c, mi3, ma3);
153 const int cli4 =
av_clip(c, mi4, ma4);
164 }
else if (mindiff == c2) {
166 }
else if (mindiff == c3) {
177 const int d1 = ma1 - mi1;
178 const int d2 = ma2 - mi2;
179 const int d3 = ma3 - mi3;
180 const int d4 = ma4 - mi4;
182 const int cli1 =
av_clip(c, mi1, ma1);
183 const int cli2 =
av_clip(c, mi2, ma2);
184 const int cli3 =
av_clip(c, mi3, ma3);
185 const int cli4 =
av_clip(c, mi4, ma4);
187 const int c1 =
FFABS(c - cli1) + d1;
188 const int c2 =
FFABS(c - cli2) + d2;
189 const int c3 =
FFABS(c - cli3) + d3;
190 const int c4 =
FFABS(c - cli4) + d4;
196 }
else if (mindiff == c2) {
198 }
else if (mindiff == c3) {
209 const int d1 = ma1 - mi1;
210 const int d2 = ma2 - mi2;
211 const int d3 = ma3 - mi3;
212 const int d4 = ma4 - mi4;
214 const int cli1 =
av_clip(c, mi1, ma1);
215 const int cli2 =
av_clip(c, mi2, ma2);
216 const int cli3 =
av_clip(c, mi3, ma3);
217 const int cli4 =
av_clip(c, mi4, ma4);
228 }
else if (mindiff == c2) {
230 }
else if (mindiff == c3) {
241 const int d1 = ma1 - mi1;
242 const int d2 = ma2 - mi2;
243 const int d3 = ma3 - mi3;
244 const int d4 = ma4 - mi4;
250 }
else if (mindiff == d2) {
252 }
else if (mindiff == d3) {
261 const int d1 =
FFABS(c - a1);
262 const int d2 =
FFABS(c - a2);
263 const int d3 =
FFABS(c - a3);
264 const int d4 =
FFABS(c - a4);
265 const int d5 =
FFABS(c - a5);
266 const int d6 =
FFABS(c - a6);
267 const int d7 =
FFABS(c - a7);
268 const int d8 =
FFABS(c - a8);
273 if (mindiff == d7)
return a7;
274 if (mindiff == d8)
return a8;
275 if (mindiff == d6)
return a6;
276 if (mindiff == d2)
return a2;
277 if (mindiff == d3)
return a3;
278 if (mindiff == d1)
return a1;
279 if (mindiff == d5)
return a5;
286 const int sum = 4 * c + 2 * (a2 + a4 + a5 + a7) + a1 + a3 + a6 + a8;
287 const int val = (sum + 8) >> 4;
294 const int d1 =
FFABS(a1 - a8);
295 const int d2 =
FFABS(a2 - a7);
296 const int d3 =
FFABS(a3 - a6);
301 return (a2 + a7 + 1) >> 1;
304 return (a3 + a6 + 1) >> 1;
307 return (a1 + a8 + 1) >> 1;
312 const int d1 =
FFABS(a1 - a8);
313 const int d2 =
FFABS(a2 - a7);
314 const int d3 =
FFABS(a3 - a6);
317 const int average = (2 * (a2 + a7) + a1 + a3 + a6 + a8 + 4) >> 3;
363 const int sum = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
364 const int val = (sum + 4) >> 3;
371 const int sum = a1 + a2 + a3 + a4 + c + a5 + a6 + a7 + a8;
372 const int val = (sum + 4) / 9;
379 const int l1l = (a1 + a8) >> 1;
380 const int l2l = (a2 + a7) >> 1;
381 const int l3l = (a3 + a6) >> 1;
382 const int l4l = (a4 +
a5) >> 1;
384 const int l1h = (a1 + a8 + 1) >> 1;
385 const int l2h = (a2 + a7 + 1) >> 1;
386 const int l3h = (a3 + a6 + 1) >> 1;
387 const int l4h = (a4 + a5 + 1) >> 1;
397 const int l1 = (a1 + a8 + 1) >> 1;
398 const int l2 = (a2 + a7 + 1) >> 1;
399 const int l3 = (a3 + a6 + 1) >> 1;
400 const int l4 = (a4 + a5 + 1) >> 1;
412 const int linediff1 = ma1 - mi1;
413 const int linediff2 = ma2 - mi2;
414 const int linediff3 = ma3 - mi3;
415 const int linediff4 = ma4 - mi4;
417 const int u1 =
FFMIN(c - ma1, linediff1);
418 const int u2 =
FFMIN(c - ma2, linediff2);
419 const int u3 =
FFMIN(c - ma3, linediff3);
420 const int u4 =
FFMIN(c - ma4, linediff4);
423 const int d1 =
FFMIN(mi1 - c, linediff1);
424 const int d2 =
FFMIN(mi2 - c, linediff2);
425 const int d3 =
FFMIN(mi3 - c, linediff3);
426 const int d4 =
FFMIN(mi4 - c, linediff4);
436 const int linediff1 = ma1 - mi1;
437 const int linediff2 = ma2 - mi2;
438 const int linediff3 = ma3 - mi3;
439 const int linediff4 = ma4 - mi4;
441 const int tu1 = c - ma1;
442 const int tu2 = c - ma2;
443 const int tu3 = c - ma3;
444 const int tu4 = c - ma4;
446 const int u1 =
FFMIN(tu1, linediff1 - tu1);
447 const int u2 =
FFMIN(tu2, linediff2 - tu2);
448 const int u3 =
FFMIN(tu3, linediff3 - tu3);
449 const int u4 =
FFMIN(tu4, linediff4 - tu4);
452 const int td1 = mi1 -
c;
453 const int td2 = mi2 -
c;
454 const int td3 = mi3 -
c;
455 const int td4 = mi4 -
c;
457 const int d1 =
FFMIN(td1, linediff1 - td1);
458 const int d2 =
FFMIN(td2, linediff2 - td2);
459 const int d3 =
FFMIN(td3, linediff3 - td3);
460 const int d4 =
FFMIN(td4, linediff4 - td4);
480 switch (s->
mode[i]) {
534 int start = (height * jobnr ) / nb_jobs;
535 int end = (height * (jobnr+1)) / nb_jobs;
538 start =
FFMAX(1, start);
539 end =
FFMIN(height-1, end);
540 for (y = start; y <
end; y++) {
570 const int a1 = src[-
op];
571 const int a2 = src[-o0];
572 const int a3 = src[-om];
573 const int a4 = src[-1 ];
574 const int c = src[ 0 ];
575 const int a5 = src[ 1 ];
576 const int a6 = src[ om];
577 const int a7 = src[ o0];
578 const int a8 = src[
op];
611 if (s->
mode[i] == 0) {
652 .
name =
"removegrain",
656 .
inputs = removegrain_inputs,
657 .
outputs = removegrain_outputs,
658 .priv_class = &removegrain_class,
void ff_removegrain_init_x86(RemoveGrainContext *rg)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
static int mode08(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Main libavfilter public API header.
int h
agreed upon image height
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
AVFilter ff_vf_removegrain
int(* rg[4])(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static int mode24(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
const char * name
Pad name.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static const AVOption removegrain_options[]
static int mode19(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static av_cold int end(AVCodecContext *avctx)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define u(width, name, range_min, range_max)
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static int mode23(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static int mode04(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static int query_formats(AVFilterContext *ctx)
A filter pad used for either input or output.
A link between two filters.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
static int mode20(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
static int mode1112(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
#define FFDIFFSIGN(x, y)
Comparator.
static int mode1314(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static int cmp_int(const void *p1, const void *p2)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static int mode09(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
#define REMOVE_GRAIN_SORT_AXIS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int mode07(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static const AVFilterPad removegrain_inputs[]
static const AVFilterPad outputs[]
int format
agreed upon media format
static int mode01(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static int config_input(AVFilterLink *inlink)
Used for passing data between threads.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int mode21(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static const AVFilterPad removegrain_outputs[]
const char * name
Filter name.
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
#define flags(name, subs,...)
static int mode06(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
static int mode18(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static int mode10(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static int mode17(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
static int mode03(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
planar GBRA 4:4:4:4 32bpp
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
avfilter_execute_func * execute
static int mode1516(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
AVFilterContext * dst
dest filter
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static int mode22(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static int mode02(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
AVFILTER_DEFINE_CLASS(removegrain)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int mode05(int c, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8)
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
mode
Use these values in ebur128_init (or'ed).
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
void(* fl[4])(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int pixels)
#define AV_CEIL_RSHIFT(a, b)