FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mathops.h
Go to the documentation of this file.
1 /*
2  * simple math operations
3  * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #ifndef AVCODEC_X86_MATHOPS_H
23 #define AVCODEC_X86_MATHOPS_H
24 
25 #include "config.h"
26 #include "libavutil/common.h"
27 #include "libavutil/x86/asm.h"
28 
29 #if HAVE_INLINE_ASM
30 
31 #if ARCH_X86_32
32 
33 #define MULL MULL
34 static av_always_inline av_const int MULL(int a, int b, unsigned shift)
35 {
36  int rt, dummy;
37  __asm__ (
38  "imull %3 \n\t"
39  "shrdl %4, %%edx, %%eax \n\t"
40  :"=a"(rt), "=d"(dummy)
41  :"a"(a), "rm"(b), "ci"((uint8_t)shift)
42  );
43  return rt;
44 }
45 
46 #define MULH MULH
47 static av_always_inline av_const int MULH(int a, int b)
48 {
49  int rt, dummy;
50  __asm__ (
51  "imull %3"
52  :"=d"(rt), "=a"(dummy)
53  :"a"(a), "rm"(b)
54  );
55  return rt;
56 }
57 
58 #define MUL64 MUL64
59 static av_always_inline av_const int64_t MUL64(int a, int b)
60 {
61  int64_t rt;
62  __asm__ (
63  "imull %2"
64  :"=A"(rt)
65  :"a"(a), "rm"(b)
66  );
67  return rt;
68 }
69 
70 #endif /* ARCH_X86_32 */
71 
72 #if HAVE_I686
73 /* median of 3 */
74 #define mid_pred mid_pred
75 static inline av_const int mid_pred(int a, int b, int c)
76 {
77  int i=b;
78  __asm__ (
79  "cmp %2, %1 \n\t"
80  "cmovg %1, %0 \n\t"
81  "cmovg %2, %1 \n\t"
82  "cmp %3, %1 \n\t"
83  "cmovl %3, %1 \n\t"
84  "cmp %1, %0 \n\t"
85  "cmovg %1, %0 \n\t"
86  :"+&r"(i), "+&r"(a)
87  :"r"(b), "r"(c)
88  );
89  return i;
90 }
91 
92 #if HAVE_6REGS
93 #define COPY3_IF_LT(x, y, a, b, c, d)\
94 __asm__ volatile(\
95  "cmpl %0, %3 \n\t"\
96  "cmovl %3, %0 \n\t"\
97  "cmovl %4, %1 \n\t"\
98  "cmovl %5, %2 \n\t"\
99  : "+&r" (x), "+&r" (a), "+r" (c)\
100  : "r" (y), "r" (b), "r" (d)\
101 );
102 #endif /* HAVE_6REGS */
103 #endif /* HAVE_I686 */
104 
105 #define MASK_ABS(mask, level) \
106  __asm__ ("cdq \n\t" \
107  "xorl %1, %0 \n\t" \
108  "subl %1, %0 \n\t" \
109  : "+a"(level), "=&d"(mask))
110 
111 // avoid +32 for shift optimization (gcc should do that ...)
112 #define NEG_SSR32 NEG_SSR32
113 static inline int32_t NEG_SSR32( int32_t a, int8_t s){
114  __asm__ ("sarl %1, %0\n\t"
115  : "+r" (a)
116  : "ic" ((uint8_t)(-s))
117  );
118  return a;
119 }
120 
121 #define NEG_USR32 NEG_USR32
122 static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
123  __asm__ ("shrl %1, %0\n\t"
124  : "+r" (a)
125  : "ic" ((uint8_t)(-s))
126  );
127  return a;
128 }
129 
130 #endif /* HAVE_INLINE_ASM */
131 #endif /* AVCODEC_X86_MATHOPS_H */