StRoot  1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
atomic.h
1 #ifndef __ARCH_I386_ATOMIC__
2 #define __ARCH_I386_ATOMIC__
3 
4 //#include <linux/config.h>
5 
6 /*
7  * Atomic operations that C can't guarantee us. Useful for
8  * resource counting etc..
9  */
10 
11 #define CONFIG_SMP /* Tonko added this - most machines DAQ has _are_ SMP */
12 
13 #ifdef CONFIG_SMP
14 #define LOCK "lock ; "
15 #else
16 #define LOCK ""
17 #endif
18 
19 /*
20  * Make sure gcc doesn't try to be clever and move things around
21  * on us. We need to use _exactly_ the address the user gave us,
22  * not some alias that contains the same information.
23  */
24 typedef struct { volatile int counter; } atomic_t;
25 
26 #define ATOMIC_INIT(i) { (i) }
27 
35 #define atomic_read(v) ((v)->counter)
36 
45 #define atomic_set(v,i) (((v)->counter) = (i))
46 
55 static __inline__ void atomic_add(int i, atomic_t *v)
56 {
57  __asm__ __volatile__(
58  LOCK "addl %1,%0"
59  :"=m" (v->counter)
60  :"ir" (i), "m" (v->counter));
61 }
62 
71 static __inline__ void atomic_sub(int i, atomic_t *v)
72 {
73  __asm__ __volatile__(
74  LOCK "subl %1,%0"
75  :"=m" (v->counter)
76  :"ir" (i), "m" (v->counter));
77 }
78 
89 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
90 {
91  unsigned char c;
92 
93  __asm__ __volatile__(
94  LOCK "subl %2,%0; sete %1"
95  :"=m" (v->counter), "=qm" (c)
96  :"ir" (i), "m" (v->counter) : "memory");
97  return c;
98 }
99 
107 static __inline__ void atomic_inc(atomic_t *v)
108 {
109  __asm__ __volatile__(
110  LOCK "incl %0"
111  :"=m" (v->counter)
112  :"m" (v->counter));
113 }
114 
122 static __inline__ void atomic_dec(atomic_t *v)
123 {
124  __asm__ __volatile__(
125  LOCK "decl %0"
126  :"=m" (v->counter)
127  :"m" (v->counter));
128 }
129 
139 static __inline__ int atomic_dec_and_test(atomic_t *v)
140 {
141  unsigned char c;
142 
143  __asm__ __volatile__(
144  LOCK "decl %0; sete %1"
145  :"=m" (v->counter), "=qm" (c)
146  :"m" (v->counter) : "memory");
147  return c != 0;
148 }
149 
159 static __inline__ int atomic_inc_and_test(atomic_t *v)
160 {
161  unsigned char c;
162 
163  __asm__ __volatile__(
164  LOCK "incl %0; sete %1"
165  :"=m" (v->counter), "=qm" (c)
166  :"m" (v->counter) : "memory");
167  return c != 0;
168 }
169 
180 static __inline__ int atomic_add_negative(int i, atomic_t *v)
181 {
182  unsigned char c;
183 
184  __asm__ __volatile__(
185  LOCK "addl %2,%0; sets %1"
186  :"=m" (v->counter), "=qm" (c)
187  :"ir" (i), "m" (v->counter) : "memory");
188  return c;
189 }
190 
191 /* returns true if the inc succeeded. */
192 static inline int atomic_inc_if_nonzero(atomic_t *v)
193 {
194  int val = atomic_read(v);
195 
196  while (val) {
197  unsigned char c;
198  __asm__ __volatile__(
199  LOCK "cmpxchgl %3,%1 ; setz %2"
200  : "=a" (val), "=m" (v->counter), "=qm" (c)
201  : "r" (val + 1), "0" (val), "m" (v->counter)
202  : "memory");
203  if (c != 0)
204  return 1;
205  }
206  return 0;
207 }
208 
209 /* These are x86-specific, used by some header files */
210 #define atomic_clear_mask(mask, addr) \
211 __asm__ __volatile__(LOCK "andl %0,%1" \
212 : : "r" (~(mask)),"m" (*addr) : "memory")
213 
214 #define atomic_set_mask(mask, addr) \
215 __asm__ __volatile__(LOCK "orl %0,%1" \
216 : : "r" (mask),"m" (*addr) : "memory")
217 
218 /* Atomic operations are already serializing on x86 */
219 #define smp_mb__before_atomic_dec() barrier()
220 #define smp_mb__after_atomic_dec() barrier()
221 #define smp_mb__before_atomic_inc() barrier()
222 #define smp_mb__after_atomic_inc() barrier()
223 
224 #endif