FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mathops.h
Go to the documentation of this file.
1 /*
2  * simple math operations
3  * Copyright (c) 2001, 2002 Fabrice Bellard
4  * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #ifndef AVCODEC_MATHOPS_H
23 #define AVCODEC_MATHOPS_H
24 
25 #include <stdint.h>
26 
27 #include "libavutil/common.h"
28 #include "config.h"
29 
30 #define MAX_NEG_CROP 1024
31 
32 extern const uint32_t ff_inverse[257];
33 extern const uint8_t ff_reverse[256];
34 extern const uint8_t ff_sqrt_tab[256];
35 extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
36 extern const uint8_t ff_zigzag_direct[64];
37 
38 #if ARCH_ARM
39 # include "arm/mathops.h"
40 #elif ARCH_AVR32
41 # include "avr32/mathops.h"
42 #elif ARCH_BFIN
43 # include "bfin/mathops.h"
44 #elif ARCH_MIPS
45 # include "mips/mathops.h"
46 #elif ARCH_PPC
47 # include "ppc/mathops.h"
48 #elif ARCH_X86
49 # include "x86/mathops.h"
50 #endif
51 
52 /* generic implementation */
53 
54 #ifndef MUL64
55 # define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
56 #endif
57 
58 #ifndef MULL
59 # define MULL(a,b,s) (MUL64(a, b) >> (s))
60 #endif
61 
62 #ifndef MULH
63 static av_always_inline int MULH(int a, int b){
64  return MUL64(a, b) >> 32;
65 }
66 #endif
67 
68 #ifndef UMULH
69 static av_always_inline unsigned UMULH(unsigned a, unsigned b){
70  return ((uint64_t)(a) * (uint64_t)(b))>>32;
71 }
72 #endif
73 
74 #ifndef MAC64
75 # define MAC64(d, a, b) ((d) += MUL64(a, b))
76 #endif
77 
78 #ifndef MLS64
79 # define MLS64(d, a, b) ((d) -= MUL64(a, b))
80 #endif
81 
82 /* signed 16x16 -> 32 multiply add accumulate */
83 #ifndef MAC16
84 # define MAC16(rt, ra, rb) rt += (ra) * (rb)
85 #endif
86 
87 /* signed 16x16 -> 32 multiply */
88 #ifndef MUL16
89 # define MUL16(ra, rb) ((ra) * (rb))
90 #endif
91 
92 #ifndef MLS16
93 # define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
94 #endif
95 
96 /* median of 3 */
97 #ifndef mid_pred
98 #define mid_pred mid_pred
99 static inline av_const int mid_pred(int a, int b, int c)
100 {
101 #if 0
102  int t= (a-b)&((a-b)>>31);
103  a-=t;
104  b+=t;
105  b-= (b-c)&((b-c)>>31);
106  b+= (a-b)&((a-b)>>31);
107 
108  return b;
109 #else
110  if(a>b){
111  if(c>b){
112  if(c>a) b=a;
113  else b=c;
114  }
115  }else{
116  if(b>c){
117  if(c>a) b=c;
118  else b=a;
119  }
120  }
121  return b;
122 #endif
123 }
124 #endif
125 
126 #ifndef sign_extend
127 static inline av_const int sign_extend(int val, unsigned bits)
128 {
129  unsigned shift = 8 * sizeof(int) - bits;
130  union { unsigned u; int s; } v = { (unsigned) val << shift };
131  return v.s >> shift;
132 }
133 #endif
134 
135 #ifndef zero_extend
136 static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
137 {
138  return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
139 }
140 #endif
141 
142 #ifndef COPY3_IF_LT
143 #define COPY3_IF_LT(x, y, a, b, c, d)\
144 if ((y) < (x)) {\
145  (x) = (y);\
146  (a) = (b);\
147  (c) = (d);\
148 }
149 #endif
150 
151 #ifndef MASK_ABS
152 #define MASK_ABS(mask, level) do { \
153  mask = level >> 31; \
154  level = (level ^ mask) - mask; \
155  } while (0)
156 #endif
157 
158 #ifndef NEG_SSR32
159 # define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
160 #endif
161 
162 #ifndef NEG_USR32
163 # define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
164 #endif
165 
166 #if HAVE_BIGENDIAN
167 # ifndef PACK_2U8
168 # define PACK_2U8(a,b) (((a) << 8) | (b))
169 # endif
170 # ifndef PACK_4U8
171 # define PACK_4U8(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
172 # endif
173 # ifndef PACK_2U16
174 # define PACK_2U16(a,b) (((a) << 16) | (b))
175 # endif
176 #else
177 # ifndef PACK_2U8
178 # define PACK_2U8(a,b) (((b) << 8) | (a))
179 # endif
180 # ifndef PACK_4U2
181 # define PACK_4U8(a,b,c,d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
182 # endif
183 # ifndef PACK_2U16
184 # define PACK_2U16(a,b) (((b) << 16) | (a))
185 # endif
186 #endif
187 
188 #ifndef PACK_2S8
189 # define PACK_2S8(a,b) PACK_2U8((a)&255, (b)&255)
190 #endif
191 #ifndef PACK_4S8
192 # define PACK_4S8(a,b,c,d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
193 #endif
194 #ifndef PACK_2S16
195 # define PACK_2S16(a,b) PACK_2U16((a)&0xffff, (b)&0xffff)
196 #endif
197 
198 #ifndef FASTDIV
199 # define FASTDIV(a,b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
200 #endif /* FASTDIV */
201 
202 static inline av_const unsigned int ff_sqrt(unsigned int a)
203 {
204  unsigned int b;
205 
206  if (a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
207  else if (a < (1 << 12)) b = ff_sqrt_tab[a >> 4] >> 2;
208 #if !CONFIG_SMALL
209  else if (a < (1 << 14)) b = ff_sqrt_tab[a >> 6] >> 1;
210  else if (a < (1 << 16)) b = ff_sqrt_tab[a >> 8] ;
211 #endif
212  else {
213  int s = av_log2_16bit(a >> 16) >> 1;
214  unsigned int c = a >> (s + 2);
215  b = ff_sqrt_tab[c >> (s + 8)];
216  b = FASTDIV(c,b) + (b << s);
217  }
218 
219  return b - (a < b * b);
220 }
221 
222 static inline int8_t ff_u8_to_s8(uint8_t a)
223 {
224  union {
225  uint8_t u8;
226  int8_t s8;
227  } b;
228  b.u8 = a;
229  return b.s8;
230 }
231 
232 #endif /* AVCODEC_MATHOPS_H */