FFmpeg
mathops.h
Go to the documentation of this file.
1 /*
2  * simple math operations
3  * Copyright (c) 2001, 2002 Fabrice Bellard
4  * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #ifndef AVCODEC_MATHOPS_H
23 #define AVCODEC_MATHOPS_H
24 
25 #include <stdint.h>
26 
27 #include "libavutil/common.h"
28 #include "libavutil/reverse.h"
29 #include "config.h"
30 
31 #define MAX_NEG_CROP 1024
32 
33 extern const uint32_t ff_inverse[257];
34 extern const uint8_t ff_sqrt_tab[256];
35 extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
36 extern const uint8_t ff_zigzag_direct[64];
37 extern const uint8_t ff_zigzag_scan[16+1];
38 
39 #if ARCH_ARM
40 # include "arm/mathops.h"
41 #elif ARCH_AVR32
42 # include "avr32/mathops.h"
43 #elif ARCH_MIPS
44 # include "mips/mathops.h"
45 #elif ARCH_PPC
46 # include "ppc/mathops.h"
47 #elif ARCH_X86
48 # include "x86/mathops.h"
49 #endif
50 
51 /* generic implementation */
52 
53 #ifndef MUL64
54 # define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
55 #endif
56 
57 #ifndef MULL
58 # define MULL(a,b,s) (MUL64(a, b) >> (s))
59 #endif
60 
61 #ifndef MULH
62 static av_always_inline int MULH(int a, int b){
63  return MUL64(a, b) >> 32;
64 }
65 #endif
66 
67 #ifndef UMULH
68 static av_always_inline unsigned UMULH(unsigned a, unsigned b){
69  return ((uint64_t)(a) * (uint64_t)(b))>>32;
70 }
71 #endif
72 
73 #ifndef MAC64
74 # define MAC64(d, a, b) ((d) += MUL64(a, b))
75 #endif
76 
77 #ifndef MLS64
78 # define MLS64(d, a, b) ((d) -= MUL64(a, b))
79 #endif
80 
81 /* signed 16x16 -> 32 multiply add accumulate */
82 #ifndef MAC16
83 # define MAC16(rt, ra, rb) rt += (ra) * (rb)
84 #endif
85 
86 /* signed 16x16 -> 32 multiply */
87 #ifndef MUL16
88 # define MUL16(ra, rb) ((ra) * (rb))
89 #endif
90 
91 #ifndef MLS16
92 # define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
93 #endif
94 
95 /* median of 3 */
96 #ifndef mid_pred
97 #define mid_pred mid_pred
98 static inline av_const int mid_pred(int a, int b, int c)
99 {
100  if(a>b){
101  if(c>b){
102  if(c>a) b=a;
103  else b=c;
104  }
105  }else{
106  if(b>c){
107  if(c>a) b=c;
108  else b=a;
109  }
110  }
111  return b;
112 }
113 #endif
114 
115 #ifndef median4
116 #define median4 median4
117 static inline av_const int median4(int a, int b, int c, int d)
118 {
119  if (a < b) {
120  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
121  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
122  } else {
123  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
124  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
125  }
126 }
127 #endif
128 
129 #ifndef sign_extend
130 static inline av_const int sign_extend(int val, unsigned bits)
131 {
132  unsigned shift = 8 * sizeof(int) - bits;
133  union { unsigned u; int s; } v = { (unsigned) val << shift };
134  return v.s >> shift;
135 }
136 #endif
137 
138 #ifndef zero_extend
139 static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
140 {
141  return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
142 }
143 #endif
144 
145 #ifndef COPY3_IF_LT
146 #define COPY3_IF_LT(x, y, a, b, c, d)\
147 if ((y) < (x)) {\
148  (x) = (y);\
149  (a) = (b);\
150  (c) = (d);\
151 }
152 #endif
153 
154 #ifndef MASK_ABS
155 #define MASK_ABS(mask, level) do { \
156  mask = level >> 31; \
157  level = (level ^ mask) - mask; \
158  } while (0)
159 #endif
160 
161 #ifndef NEG_SSR32
162 # define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
163 #endif
164 
165 #ifndef NEG_USR32
166 # define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
167 #endif
168 
169 #if HAVE_BIGENDIAN
170 # ifndef PACK_2U8
171 # define PACK_2U8(a,b) (((a) << 8) | (b))
172 # endif
173 # ifndef PACK_4U8
174 # define PACK_4U8(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
175 # endif
176 # ifndef PACK_2U16
177 # define PACK_2U16(a,b) (((a) << 16) | (b))
178 # endif
179 #else
180 # ifndef PACK_2U8
181 # define PACK_2U8(a,b) (((b) << 8) | (a))
182 # endif
183 # ifndef PACK_4U2
184 # define PACK_4U8(a,b,c,d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
185 # endif
186 # ifndef PACK_2U16
187 # define PACK_2U16(a,b) (((b) << 16) | (a))
188 # endif
189 #endif
190 
191 #ifndef PACK_2S8
192 # define PACK_2S8(a,b) PACK_2U8((a)&255, (b)&255)
193 #endif
194 #ifndef PACK_4S8
195 # define PACK_4S8(a,b,c,d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
196 #endif
197 #ifndef PACK_2S16
198 # define PACK_2S16(a,b) PACK_2U16((a)&0xffff, (b)&0xffff)
199 #endif
200 
201 #ifndef FASTDIV
202 # define FASTDIV(a,b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
203 #endif /* FASTDIV */
204 
205 #ifndef ff_sqrt
206 #define ff_sqrt ff_sqrt
207 static inline av_const unsigned int ff_sqrt(unsigned int a)
208 {
209  unsigned int b;
210 
211  if (a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
212  else if (a < (1 << 12)) b = ff_sqrt_tab[a >> 4] >> 2;
213 #if !CONFIG_SMALL
214  else if (a < (1 << 14)) b = ff_sqrt_tab[a >> 6] >> 1;
215  else if (a < (1 << 16)) b = ff_sqrt_tab[a >> 8] ;
216 #endif
217  else {
218  int s = av_log2_16bit(a >> 16) >> 1;
219  unsigned int c = a >> (s + 2);
220  b = ff_sqrt_tab[c >> (s + 8)];
221  b = FASTDIV(c,b) + (b << s);
222  }
223 
224  return b - (a < b * b);
225 }
226 #endif
227 
228 static inline av_const float ff_sqrf(float a)
229 {
230  return a*a;
231 }
232 
233 static inline int8_t ff_u8_to_s8(uint8_t a)
234 {
235  union {
236  uint8_t u8;
237  int8_t s8;
238  } b;
239  b.u8 = a;
240  return b.s8;
241 }
242 
243 static av_always_inline uint32_t bitswap_32(uint32_t x)
244 {
245  return (uint32_t)ff_reverse[ x & 0xFF] << 24 |
246  (uint32_t)ff_reverse[(x >> 8) & 0xFF] << 16 |
247  (uint32_t)ff_reverse[(x >> 16) & 0xFF] << 8 |
248  (uint32_t)ff_reverse[ x >> 24];
249 }
250 
251 #endif /* AVCODEC_MATHOPS_H */
bitswap_32
static av_always_inline uint32_t bitswap_32(uint32_t x)
Definition: mathops.h:243
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:233
av_log2_16bit
int av_log2_16bit(unsigned v)
Definition: intmath.c:31
av_const
#define av_const
Definition: attributes.h:84
b
#define b
Definition: input.c:40
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
zero_extend
static av_const unsigned zero_extend(unsigned val, unsigned bits)
Definition: mathops.h:139
reverse.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_inverse
const uint32_t ff_inverse[257]
Definition: mathtables.c:27
mathops.h
median4
#define median4
Definition: mathops.h:116
val
static double val(void *priv, double ch)
Definition: aeval.c:76
ff_sqrt
#define ff_sqrt
Definition: mathops.h:206
s
#define s(width, name)
Definition: cbs_vp9.c:257
bits
uint8_t bits
Definition: vp3data.h:141
UMULH
static av_always_inline unsigned UMULH(unsigned a, unsigned b)
Definition: mathops.h:68
FASTDIV
#define FASTDIV(a, b)
Definition: mathops.h:202
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_sqrt_tab
const uint8_t ff_sqrt_tab[256]
Definition: mathtables.c:63
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
ff_crop_tab
const uint8_t ff_crop_tab[256+2 *MAX_NEG_CROP]
Definition: mathtables.c:77
MULH
static av_always_inline int MULH(int a, int b)
Definition: mathops.h:62
mathops.h
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
common.h
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_sqrf
static av_const float ff_sqrf(float a)
Definition: mathops.h:228
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
mid_pred
#define mid_pred
Definition: mathops.h:97
mathops.h
mathops.h
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
MUL64
#define MUL64(a, b)
Definition: mathops.h:54
shift
static int shift(int a, int b)
Definition: sonic.c:83
mathops.h
d
d
Definition: ffmpeg_filter.c:153
MAX_NEG_CROP
#define MAX_NEG_CROP
Definition: mathops.h:31
int
int
Definition: ffmpeg_filter.c:153