Browse code

core, lib, modules: restructured source code tree

- new folder src/ to hold the source code for main project applications
- main.c is in src/
- all core files are subfolder are in src/core/
- modules are in src/modules/
- libs are in src/lib/
- application Makefiles are in src/
- application binary is built in src/ (src/kamailio)

Daniel-Constantin Mierla authored on 07/12/2016 11:03:51
Showing 1 changed files
1 1
deleted file mode 100644
... ...
@@ -1,381 +0,0 @@
1
-/* 
2
- * Copyright (C) 2006 iptelorg GmbH
3
- *
4
- * Permission to use, copy, modify, and distribute this software for any
5
- * purpose with or without fee is hereby granted, provided that the above
6
- * copyright notice and this permission notice appear in all copies.
7
- *
8
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15
- */
16
-
17
-/**
18
- * @file
19
- * @brief Atomic operations and memory barriers (x86 and x86_64/amd64 specific)
20
- * 
21
- * Atomic operations and memory barriers (x86 and x86_64/amd64 specific)
22
- * \warning atomic ops do not include memory barriers, see atomic_ops.h for more
23
- * details.
24
- *
25
- * Config defines:
26
- * - NOSMP
27
- * - X86_OOSTORE (out of order store, defined by default)
28
- * - X86_64_OOSTORE, like X86_OOSTORE, but for x86_64 CPUs, default off
29
- * - __CPU_x86_64 (64 bit mode, long and void* is 64 bit and the CPU has all
30
- *   of the mfence, lfence and sfence instructions)
31
- * - __CPU_i386  (486+, 32 bit)
32
- * @ingroup atomic
33
- */
34
-
35
-#ifndef _atomic_x86_h
36
-#define _atomic_x86_h
37
-
38
-#define HAVE_ASM_INLINE_ATOMIC_OPS
39
-#define HAVE_ASM_INLINE_MEMBAR
40
-
41
-#ifdef NOSMP
42
-#define __LOCK_PREF 
43
-#else
44
-#define __LOCK_PREF "lock ;"
45
-#endif
46
-
47
-
48
-/* memory barriers */
49
-
50
-#ifdef NOSMP
51
-
52
-#define membar()	asm volatile ("" : : : "memory")
53
-#define membar_read()	membar()
54
-#define membar_write()	membar()
55
-#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
56
-/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
57
- * contain gcc barriers*/
58
-#define membar_enter_lock() do {} while(0)
59
-#define membar_leave_lock() do {} while(0)
60
-/* membars after or before atomic_ops or atomic_setget -> use these or
61
- *  mb_<atomic_op_name>() if you need a memory barrier in one of these
62
- *  situations (on some archs where the atomic operations imply memory
63
- *   barriers is better to use atomic_op_x(); membar_atomic_op() then
64
- *    atomic_op_x(); membar()) */
65
-#define membar_atomic_op()				do {} while(0)
66
-#define membar_atomic_setget()			membar()
67
-#define membar_write_atomic_op()		do {} while(0)
68
-#define membar_write_atomic_setget()	membar_write()
69
-#define membar_read_atomic_op()			do {} while(0)
70
-#define membar_read_atomic_setget()		membar_read()
71
-
72
-#else
73
-
74
-/* although most x86 do stores in order, we're playing it safe and use
75
- *  oostore ready write barriers */
76
-#define X86_OOSTORE 
77
-
78
-#ifdef __CPU_x86_64
79
-/*
80
-#define membar() \
81
-	asm volatile( \
82
-					" lock; addq $0, 0(%%rsp) \n\t " \
83
-					: : : "memory" \
84
-				) 
85
-*/
86
-#define membar() 		asm volatile( " mfence \n\t " : : : "memory" )
87
-#define membar_read()	asm volatile( " lfence \n\t " : : : "memory" )
88
-#ifdef X86_64_OOSTORE
89
-#define membar_write()	asm volatile( " sfence \n\t " : : : "memory" )
90
-#else
91
-#define membar_write()	asm volatile ("" : : : "memory") /* gcc don't cache*/
92
-#endif /* X86_OOSTORE */
93
-
94
-#else /* ! __CPU_x86_64  => __CPU_i386*/
95
-/* membar: lfence, mfence, sfence available only on newer cpus, so for now
96
- * stick to lock addl */
97
-#define membar() asm volatile(" lock; addl $0, 0(%%esp) \n\t " : : : "memory" )
98
-#define membar_read()	membar()
99
-#ifdef X86_OOSTORE
100
-/* out of order store version */
101
-#define membar_write()	membar()
102
-#else
103
-/* no oostore, most x86 cpus => do nothing, just a gcc do_not_cache barrier*/
104
-#define membar_write()	asm volatile ("" : : : "memory")
105
-#endif /* X86_OOSTORE */
106
-
107
-#endif /* __CPU_x86_64 */
108
-
109
-#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
110
-/* lock barrriers: empty, not needed on x86 or x86_64 (atomic ops already
111
- *  force the barriers if needed); the lock/unlock should already contain the 
112
- *  gcc do_not_cache barriers*/
113
-#define membar_enter_lock() do {} while(0)
114
-#define membar_leave_lock() do {} while(0)
115
-/* membars after or before atomic_ops or atomic_setget -> use these or
116
- *  mb_<atomic_op_name>() if you need a memory barrier in one of these
117
- *  situations (on some archs where the atomic operations imply memory
118
- *   barriers is better to use atomic_op_x(); membar_atomic_op() then
119
- *    atomic_op_x(); membar()) */
120
-#define membar_atomic_op()				do {} while(0)
121
-#define membar_atomic_setget()			membar()
122
-#define membar_write_atomic_op()		do {} while(0)
123
-#define membar_write_atomic_setget()	membar_write()
124
-#define membar_read_atomic_op()			do {} while(0)
125
-#define membar_read_atomic_setget()		membar_read()
126
-
127
-
128
-#endif /* NOSMP */
129
-
130
-/* 1 param atomic f */
131
-#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE) \
132
-	inline static void atomic_##NAME##_##P_TYPE (volatile P_TYPE* var) \
133
-	{ \
134
-		asm volatile( \
135
-				__LOCK_PREF " " OP " \n\t" \
136
-				: "=m"(*var) : "m"(*var) : "cc", "memory" \
137
-				); \
138
-	}
139
-
140
-/* 2 params atomic f */
141
-#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE) \
142
-	inline static void atomic_##NAME##_##P_TYPE (volatile P_TYPE* var, \
143
-			                                    P_TYPE v) \
144
-	{ \
145
-		asm volatile( \
146
-				__LOCK_PREF " " OP " \n\t" \
147
-				: "=m"(*var) : "ri" (v), "m"(*var) : "cc", "memory" \
148
-				); \
149
-	}
150
-
151
-#if defined __GNUC__ &&  __GNUC__ < 3 && __GNUC_MINOR__ < 9
152
-/* gcc version < 2.9 */
153
-#define ATOMIC_FUNC_XCHG(NAME, OP, TYPE) \
154
-	inline static TYPE atomic_##NAME##_##TYPE(volatile TYPE* var, TYPE v) \
155
-{ \
156
-	asm volatile( \
157
-			OP " \n\t" \
158
-			: "=q"(v), "=m"(*var) :"m"(*var), "0"(v) : "memory" \
159
-			); \
160
-	return v; \
161
-}
162
-#else
163
-#define ATOMIC_FUNC_XCHG(NAME, OP, TYPE) \
164
-	inline static TYPE atomic_##NAME##_##TYPE(volatile TYPE* var, TYPE v) \
165
-{ \
166
-	asm volatile( \
167
-			OP " \n\t" \
168
-			: "+q"(v), "=m"(*var) : "m"(*var) : "memory" \
169
-			); \
170
-	return v; \
171
-}
172
-#endif /* gcc & gcc version < 2.9 */
173
-
174
-/* returns a value, 1 param */
175
-#define ATOMIC_FUNC_TEST(NAME, OP, P_TYPE, RET_TYPE) \
176
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE(volatile P_TYPE* var) \
177
-	{ \
178
-		char ret; \
179
-		asm volatile( \
180
-				__LOCK_PREF " " OP "\n\t" \
181
-				"setz %1 \n\t" \
182
-				: "=m" (*var), "=qm"(ret) : "m"(*var) : "cc", "memory" \
183
-				); \
184
-		return ret; \
185
-	}
186
-
187
-/* returns a value, 3 params (var, old, new)
188
- * The returned value is the value before the xchg:
189
- *  if ret!=old => cmpxchg failed and ret is var's value
190
- *  else  => success and new_v is var's new value */
191
-#define ATOMIC_FUNC_CMPXCHG(NAME, OP, P_TYPE, RET_TYPE) \
192
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE(volatile P_TYPE* var, \
193
-													P_TYPE old, P_TYPE new_v)\
194
-	{ \
195
-		P_TYPE ret; \
196
-		asm volatile( \
197
-				__LOCK_PREF " " OP "\n\t" \
198
-				: "=a"(ret), "=m" (*var) :\
199
-					"r"(new_v), "m"(*var), "0"(old):\
200
-					"cc", "memory" \
201
-				); \
202
-		return ret; \
203
-	}
204
-
205
-/* similar w/ XCHG but with LOCK prefix, relaxed constraints & diff. return */
206
-#define ATOMIC_FUNC_XADD(NAME, OP, TYPE) \
207
-	inline static TYPE atomic_##NAME##_##TYPE(volatile TYPE* var, TYPE v) \
208
-{ \
209
-	TYPE ret; \
210
-	asm volatile( \
211
-			__LOCK_PREF " " OP " \n\t" \
212
-			: "=r"(ret), "=m"(*var) :"m"(*var), "0"(v) : "cc", "memory" \
213
-			); \
214
-	return ret+v; \
215
-}
216
-
217
-ATOMIC_FUNC_DECL1(inc, "incl %0", int)
218
-ATOMIC_FUNC_DECL1(dec, "decl %0", int)
219
-ATOMIC_FUNC_DECL2(and, "andl %1, %0", int)
220
-ATOMIC_FUNC_DECL2(or,  "orl %1, %0", int)
221
-ATOMIC_FUNC_TEST(inc_and_test, "incl %0", int, int)
222
-ATOMIC_FUNC_TEST(dec_and_test, "decl %0", int, int)
223
-ATOMIC_FUNC_XCHG(get_and_set,  "xchgl %1, %0", int)
224
-ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgl %2, %1", int , int)
225
-ATOMIC_FUNC_XADD(add, "xaddl %0, %1", int) 
226
-#ifdef __CPU_x86_64
227
-ATOMIC_FUNC_DECL1(inc, "incq %0", long)
228
-ATOMIC_FUNC_DECL1(dec, "decq %0", long)
229
-ATOMIC_FUNC_DECL2(and, "andq %1, %0", long)
230
-ATOMIC_FUNC_DECL2(or,  "orq %1, %0", long)
231
-ATOMIC_FUNC_TEST(inc_and_test, "incq %0", long, int)
232
-ATOMIC_FUNC_TEST(dec_and_test, "decq %0", long, int)
233
-ATOMIC_FUNC_XCHG(get_and_set,  "xchgq %1, %0", long)
234
-ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgq %2, %1", long , long)
235
-ATOMIC_FUNC_XADD(add, "xaddq %0, %1",long) 
236
-#else
237
-ATOMIC_FUNC_DECL1(inc, "incl %0", long)
238
-ATOMIC_FUNC_DECL1(dec, "decl %0", long)
239
-ATOMIC_FUNC_DECL2(and, "andl %1, %0", long)
240
-ATOMIC_FUNC_DECL2(or,  "orl %1, %0", long)
241
-ATOMIC_FUNC_TEST(inc_and_test, "incl %0", long, int)
242
-ATOMIC_FUNC_TEST(dec_and_test, "decl %0", long, int)
243
-ATOMIC_FUNC_XCHG(get_and_set,  "xchgl %1, %0", long)
244
-ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgl %2, %1", long , long)
245
-ATOMIC_FUNC_XADD(add, "xaddl %0, %1",long) 
246
-#endif
247
-
248
-#define atomic_inc(var) atomic_inc_int(&(var)->val)
249
-#define atomic_dec(var) atomic_dec_int(&(var)->val)
250
-#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
251
-#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
252
-#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
253
-#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
254
-#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
255
-#define atomic_cmpxchg(var, old, newv) \
256
-		atomic_cmpxchg_int(&(var)->val, old, newv)
257
-#define atomic_add(var, v) atomic_add_int(&(var)->val, v)
258
-
259
-
260
-#ifdef NOSMP
261
-
262
-#define mb_atomic_set_int(v, i) \
263
-	do{ \
264
-		membar(); atomic_set_int(v, i); \
265
-	}while(0)
266
-
267
-#define mb_atomic_set_long(v, i) \
268
-	do{ \
269
-		membar(); atomic_set_long(v, i); \
270
-	}while(0)
271
-
272
-
273
-
274
-inline static int mb_atomic_get_int(volatile int* v)
275
-{
276
-	membar(); return atomic_get_int(v);
277
-}
278
-
279
-inline static long mb_atomic_get_long(volatile long* v)
280
-{
281
-	membar(); return atomic_get_long(v);
282
-}
283
-
284
-
285
-#else /* NOSMP */
286
-
287
-
288
-inline static void mb_atomic_set_int(volatile int* v, int i)
289
-{
290
-	asm volatile(
291
-			"xchgl %1, %0 \n\t"
292
-#if defined __GNUC__ &&  __GNUC__ < 3 && __GNUC_MINOR__ < 9
293
-			: "=q"(i), "=m"(*v) : "m"(*v), "0"(i) : "memory" 
294
-#else
295
-			: "+q"(i), "=m"(*v) : "m"(*v) : "memory" 
296
-#endif
297
-			);
298
-}
299
-
300
-
301
-inline static void mb_atomic_set_long(volatile long* v, long l)
302
-{
303
-	asm volatile(
304
-#ifdef __CPU_x86_64
305
-			"xchgq %1, %0 \n\t"
306
-#else
307
-			"xchgl %1, %0 \n\t"
308
-#endif
309
-#if defined __GNUC__ &&  __GNUC__ < 3 && __GNUC_MINOR__ < 9
310
-			: "=q"(l), "=m"(*v) : "m"(*v), "0"(l) : "memory" 
311
-#else
312
-			: "+q"(l), "=m"(*v) : "m"(*v) : "memory" 
313
-#endif
314
-			);
315
-}
316
-
317
-
318
-inline static int mb_atomic_get_int(volatile int* var)
319
-{
320
-	int ret;
321
-	
322
-	asm volatile(
323
-			__LOCK_PREF " cmpxchgl %0, %1 \n\t"
324
-			: "=a"(ret)  : "m"(*var) : "cc", "memory"
325
-			);
326
-	return ret;
327
-}
328
-
329
-inline static long mb_atomic_get_long(volatile long* var)
330
-{
331
-	long ret;
332
-	
333
-	asm volatile(
334
-#ifdef __CPU_x86_64
335
-			__LOCK_PREF " cmpxchgq %0, %1 \n\t"
336
-#else
337
-			__LOCK_PREF " cmpxchgl %0, %1 \n\t"
338
-#endif
339
-			: "=a"(ret)  : "m"(*var) : "cc", "memory"
340
-			);
341
-	return ret;
342
-}
343
-
344
-#endif /* NOSMP */
345
-
346
-
347
-/* on x86 atomic intructions act also as barriers */
348
-#define mb_atomic_inc_int(v)	atomic_inc_int(v)
349
-#define mb_atomic_dec_int(v)	atomic_dec_int(v)
350
-#define mb_atomic_or_int(v, m)	atomic_or_int(v, m)
351
-#define mb_atomic_and_int(v, m)	atomic_and_int(v, m)
352
-#define mb_atomic_inc_and_test_int(v)	atomic_inc_and_test_int(v)
353
-#define mb_atomic_dec_and_test_int(v)	atomic_dec_and_test_int(v)
354
-#define mb_atomic_get_and_set_int(v, i)	atomic_get_and_set_int(v, i)
355
-#define mb_atomic_cmpxchg_int(v, o, n)	atomic_cmpxchg_int(v, o, n)
356
-#define mb_atomic_add_int(v, a)	atomic_add_int(v, a)
357
-
358
-#define mb_atomic_inc_long(v)	atomic_inc_long(v)
359
-#define mb_atomic_dec_long(v)	atomic_dec_long(v)
360
-#define mb_atomic_or_long(v, m)	atomic_or_long(v, m)
361
-#define mb_atomic_and_long(v, m)	atomic_and_long(v, m)
362
-#define mb_atomic_inc_and_test_long(v)	atomic_inc_and_test_long(v)
363
-#define mb_atomic_dec_and_test_long(v)	atomic_dec_and_test_long(v)
364
-#define mb_atomic_get_and_set_long(v, i)	atomic_get_and_set_long(v, i)
365
-#define mb_atomic_cmpxchg_long(v, o, n)	atomic_cmpxchg_long(v, o, n)
366
-#define mb_atomic_add_long(v, a)	atomic_add_long(v, a)
367
-
368
-#define mb_atomic_inc(v)	atomic_inc(v)
369
-#define mb_atomic_dec(v)	atomic_dec(v)
370
-#define mb_atomic_or(v, m)	atomic_or(v, m)
371
-#define mb_atomic_and(v, m)	atomic_and(v, m)
372
-#define mb_atomic_inc_and_test(v)	atomic_inc_and_test(v)
373
-#define mb_atomic_dec_and_test(v)	atomic_dec_and_test(v)
374
-#define mb_atomic_get(v)	mb_atomic_get_int( &(v)->val)
375
-#define mb_atomic_set(v, i)	mb_atomic_set_int(&(v)->val, i)
376
-#define mb_atomic_get_and_set(v, i)	atomic_get_and_set_int(&(v)->val, i)
377
-#define mb_atomic_cmpxchg(v, o, n)	atomic_cmpxchg_int(&(v)->val, o, n)
378
-#define mb_atomic_add(v, a)	atomic_add_int(&(v)->val, a)
379
-
380
-
381
-#endif
Browse code

atomic Remove history from source code

Olle E. Johansson authored on 03/04/2016 18:09:15
Showing 1 changed files
... ...
@@ -31,15 +31,6 @@
31 31
  * - __CPU_i386  (486+, 32 bit)
32 32
  * @ingroup atomic
33 33
  */
34
-/* 
35
- * History:
36
- * --------
37
- *  2006-03-08  created by andrei
38
- *  2007-05-07  added cmpxchg (andrei)
39
- *  2007-05-08  added atomic_add (andrei)
40
- *  2007-05-29  added membar_depends(), membar_*_atomic_op and
41
- *                membar_*_atomic_setget (andrei)
42
- */
43 34
 
44 35
 #ifndef _atomic_x86_h
45 36
 #define _atomic_x86_h
Browse code

doxygen: convert existing docs to use proper doxygen structures and groups, small cleanups

Henning Westerholt authored on 23/06/2011 21:39:01
Showing 1 changed files
... ...
@@ -1,6 +1,4 @@
1 1
 /* 
2
- * $Id$
3
- * 
4 2
  * Copyright (C) 2006 iptelorg GmbH
5 3
  *
6 4
  * Permission to use, copy, modify, and distribute this software for any
... ...
@@ -16,19 +14,22 @@
16 14
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 15
  */
18 16
 
19
-/** @file @brief
20
- *  atomic operations and memory barriers (x86 and x86_64/amd64 specific)
21
- *  WARNING: atomic ops do not include memory barriers
22
- *  see atomic_ops.h for more details 
17
+/**
18
+ * @file
19
+ * @brief Atomic operations and memory barriers (x86 and x86_64/amd64 specific)
20
+ * 
21
+ * Atomic operations and memory barriers (x86 and x86_64/amd64 specific)
22
+ * \warning atomic ops do not include memory barriers, see atomic_ops.h for more
23
+ * details.
23 24
  *
24
- *  Config defines:   - NOSMP
25
- *                    - X86_OOSTORE (out of order store, defined by default)
26
- *                    - X86_64_OOSTORE, like X86_OOSTORE, but for x86_64 cpus,
27
- *                      default off
28
- *                    - __CPU_x86_64 (64 bit mode, long and void* is 64 bit and
29
- *                                    the cpu has all of the mfence, lfence
30
- *                                    and sfence instructions)
31
- *                    - __CPU_i386  (486+, 32 bit)
25
+ * Config defines:
26
+ * - NOSMP
27
+ * - X86_OOSTORE (out of order store, defined by default)
28
+ * - X86_64_OOSTORE, like X86_OOSTORE, but for x86_64 CPUs, default off
29
+ * - __CPU_x86_64 (64 bit mode, long and void* is 64 bit and the CPU has all
30
+ *   of the mfence, lfence and sfence instructions)
31
+ * - __CPU_i386  (486+, 32 bit)
32
+ * @ingroup atomic
32 33
  */
33 34
 /* 
34 35
  * History:
Browse code

Doxygen updates

oej authored on 25/10/2009 19:11:28
Showing 1 changed files
... ...
@@ -15,7 +15,8 @@
15 15
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 16
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
  */
18
-/*
18
+
19
+/** @file @brief
19 20
  *  atomic operations and memory barriers (x86 and x86_64/amd64 specific)
20 21
  *  WARNING: atomic ops do not include memory barriers
21 22
  *  see atomic_ops.h for more details 
Browse code

- added membar_depends(), needed on smp archs. with separate cache banks where it's possible to get a new pointer value, but the old pointer content (e.g. if the two are in different cache banks and the "content" bank is very busy processing a long invalidations queue). For now only Alpha SMP needs it, on all other archs is a no-op (for more info see atomic_ops.h , http://lse.sourceforge.net/locking/wmbdd.html, http://www.linuxjournal.com/article/8212 or Alpha Architecture Reference Manual Chapter 5.6.

- added membar_atomic_op(), membar_atomic_setget(), membar_read_atomic_op(),
membar_read_atomic_setget(), membar_write_atomic_op(),
membar_write_atomic_setget() -- special case memory barriers that can be
optimized if the atomic ops already force some kind of barrier (e.g. x86),
see the description in atomic_ops.h for more info.

Andrei Pelinescu-Onciul authored on 29/05/2007 11:31:29
Showing 1 changed files
... ...
@@ -35,6 +35,8 @@
35 35
  *  2006-03-08  created by andrei
36 36
  *  2007-05-07  added cmpxchg (andrei)
37 37
  *  2007-05-08  added atomic_add (andrei)
38
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
39
+ *                membar_*_atomic_setget (andrei)
38 40
  */
39 41
 
40 42
 #ifndef _atomic_x86_h
... ...
@@ -57,10 +59,22 @@
57 59
 #define membar()	asm volatile ("" : : : "memory")
58 60
 #define membar_read()	membar()
59 61
 #define membar_write()	membar()
62
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
60 63
 /* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
61 64
  * contain gcc barriers*/
62
-#define membar_enter_lock() 
63
-#define membar_leave_lock()
65
+#define membar_enter_lock() do {} while(0)
66
+#define membar_leave_lock() do {} while(0)
67
+/* membars after or before atomic_ops or atomic_setget -> use these or
68
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
69
+ *  situations (on some archs where the atomic operations imply memory
70
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
71
+ *    atomic_op_x(); membar()) */
72
+#define membar_atomic_op()				do {} while(0)
73
+#define membar_atomic_setget()			membar()
74
+#define membar_write_atomic_op()		do {} while(0)
75
+#define membar_write_atomic_setget()	membar_write()
76
+#define membar_read_atomic_op()			do {} while(0)
77
+#define membar_read_atomic_setget()		membar_read()
64 78
 
65 79
 #else
66 80
 
... ...
@@ -99,15 +113,23 @@
99 113
 
100 114
 #endif /* __CPU_x86_64 */
101 115
 
116
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
102 117
 /* lock barrriers: empty, not needed on x86 or x86_64 (atomic ops already
103 118
  *  force the barriers if needed); the lock/unlock should already contain the 
104 119
  *  gcc do_not_cache barriers*/
105
-#define membar_enter_lock() 
106
-#define membar_leave_lock()
107
-
108
-
109
-
110
-
120
+#define membar_enter_lock() do {} while(0)
121
+#define membar_leave_lock() do {} while(0)
122
+/* membars after or before atomic_ops or atomic_setget -> use these or
123
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
124
+ *  situations (on some archs where the atomic operations imply memory
125
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
126
+ *    atomic_op_x(); membar()) */
127
+#define membar_atomic_op()				do {} while(0)
128
+#define membar_atomic_setget()			membar()
129
+#define membar_write_atomic_op()		do {} while(0)
130
+#define membar_write_atomic_setget()	membar_write()
131
+#define membar_read_atomic_op()			do {} while(0)
132
+#define membar_read_atomic_setget()		membar_read()
111 133
 
112 134
 
113 135
 #endif /* NOSMP */
Browse code

- parts of atomic_ops.h moved into atomic/atomic_common.h and atomic/atomic_native.h

- added membar_eneter_lock() and membar_leave_lock() (to be used only if
creating locks using the atomic ops functions, for more info see atomic_ops.h)

Andrei Pelinescu-Onciul authored on 14/05/2007 17:29:31
Showing 1 changed files
... ...
@@ -57,6 +57,10 @@
57 57
 #define membar()	asm volatile ("" : : : "memory")
58 58
 #define membar_read()	membar()
59 59
 #define membar_write()	membar()
60
+/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
61
+ * contain gcc barriers*/
62
+#define membar_enter_lock() 
63
+#define membar_leave_lock()
60 64
 
61 65
 #else
62 66
 
... ...
@@ -95,6 +99,13 @@
95 99
 
96 100
 #endif /* __CPU_x86_64 */
97 101
 
102
+/* lock barrriers: empty, not needed on x86 or x86_64 (atomic ops already
103
+ *  force the barriers if needed); the lock/unlock should already contain the 
104
+ *  gcc do_not_cache barriers*/
105
+#define membar_enter_lock() 
106
+#define membar_leave_lock()
107
+
108
+
98 109
 
99 110
 
100 111
 
Browse code

- atomic_add & atomic_cmpxchg added to ppc - atomic_unkown (used when the procesor does not suport atomic ops or is not among the supported ones), tries now to use a "hash" of locks if GEN_LOCK_SET_T_UNLIMITED is defined => less contention on multi-cpus - atomic_ops.h defines *_UNLIMITED macros when the number of locks or set size are limited only by the available memory (everything exept SYSV sems) - license changes: all the atomic* stuff and the locks are now under a BSD (OpenBSD) style license

Andrei Pelinescu-Onciul authored on 11/05/2007 20:44:15
Showing 1 changed files
... ...
@@ -3,26 +3,17 @@
3 3
  * 
4 4
  * Copyright (C) 2006 iptelorg GmbH
5 5
  *
6
- * This file is part of ser, a free SIP server.
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
7 9
  *
8
- * ser is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version
12
- *
13
- * For a license to use the ser software under conditions
14
- * other than those described here, or to purchase support for this
15
- * software, please contact iptel.org by e-mail at the following addresses:
16
- *    info@iptel.org
17
- *
18
- * ser is distributed in the hope that it will be useful,
19
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
- * GNU General Public License for more details.
22
- *
23
- * You should have received a copy of the GNU General Public License
24
- * along with this program; if not, write to the Free Software
25
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 17
  */
27 18
 /*
28 19
  *  atomic operations and memory barriers (x86 and x86_64/amd64 specific)
... ...
@@ -335,6 +326,8 @@ inline static long mb_atomic_get_long(volatile long* var)
335 326
 #define mb_atomic_inc_and_test_int(v)	atomic_inc_and_test_int(v)
336 327
 #define mb_atomic_dec_and_test_int(v)	atomic_dec_and_test_int(v)
337 328
 #define mb_atomic_get_and_set_int(v, i)	atomic_get_and_set_int(v, i)
329
+#define mb_atomic_cmpxchg_int(v, o, n)	atomic_cmpxchg_int(v, o, n)
330
+#define mb_atomic_add_int(v, a)	atomic_add_int(v, a)
338 331
 
339 332
 #define mb_atomic_inc_long(v)	atomic_inc_long(v)
340 333
 #define mb_atomic_dec_long(v)	atomic_dec_long(v)
... ...
@@ -343,6 +336,8 @@ inline static long mb_atomic_get_long(volatile long* var)
343 336
 #define mb_atomic_inc_and_test_long(v)	atomic_inc_and_test_long(v)
344 337
 #define mb_atomic_dec_and_test_long(v)	atomic_dec_and_test_long(v)
345 338
 #define mb_atomic_get_and_set_long(v, i)	atomic_get_and_set_long(v, i)
339
+#define mb_atomic_cmpxchg_long(v, o, n)	atomic_cmpxchg_long(v, o, n)
340
+#define mb_atomic_add_long(v, a)	atomic_add_long(v, a)
346 341
 
347 342
 #define mb_atomic_inc(v)	atomic_inc(v)
348 343
 #define mb_atomic_dec(v)	atomic_dec(v)
... ...
@@ -353,6 +348,8 @@ inline static long mb_atomic_get_long(volatile long* var)
353 348
 #define mb_atomic_get(v)	mb_atomic_get_int( &(v)->val)
354 349
 #define mb_atomic_set(v, i)	mb_atomic_set_int(&(v)->val, i)
355 350
 #define mb_atomic_get_and_set(v, i)	atomic_get_and_set_int(&(v)->val, i)
351
+#define mb_atomic_cmpxchg(v, o, n)	atomic_cmpxchg_int(&(v)->val, o, n)
352
+#define mb_atomic_add(v, a)	atomic_add_int(&(v)->val, a)
356 353
 
357 354
 
358 355
 #endif
Browse code

- x86: atomic_add - sparc64: atomic_cmpxchg, atomic_add

Andrei Pelinescu-Onciul authored on 08/05/2007 19:41:59
Showing 1 changed files
... ...
@@ -43,6 +43,7 @@
43 43
  * --------
44 44
  *  2006-03-08  created by andrei
45 45
  *  2007-05-07  added cmpxchg (andrei)
46
+ *  2007-05-08  added atomic_add (andrei)
46 47
  */
47 48
 
48 49
 #ifndef _atomic_x86_h
... ...
@@ -184,6 +185,18 @@
184 185
 		return ret; \
185 186
 	}
186 187
 
188
+/* similar w/ XCHG but with LOCK prefix, relaxed constraints & diff. return */
189
+#define ATOMIC_FUNC_XADD(NAME, OP, TYPE) \
190
+	inline static TYPE atomic_##NAME##_##TYPE(volatile TYPE* var, TYPE v) \
191
+{ \
192
+	TYPE ret; \
193
+	asm volatile( \
194
+			__LOCK_PREF " " OP " \n\t" \
195
+			: "=r"(ret), "=m"(*var) :"m"(*var), "0"(v) : "cc", "memory" \
196
+			); \
197
+	return ret+v; \
198
+}
199
+
187 200
 ATOMIC_FUNC_DECL1(inc, "incl %0", int)
188 201
 ATOMIC_FUNC_DECL1(dec, "decl %0", int)
189 202
 ATOMIC_FUNC_DECL2(and, "andl %1, %0", int)
... ...
@@ -192,6 +205,7 @@ ATOMIC_FUNC_TEST(inc_and_test, "incl %0", int, int)
192 205
 ATOMIC_FUNC_TEST(dec_and_test, "decl %0", int, int)
193 206
 ATOMIC_FUNC_XCHG(get_and_set,  "xchgl %1, %0", int)
194 207
 ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgl %2, %1", int , int)
208
+ATOMIC_FUNC_XADD(add, "xaddl %0, %1", int) 
195 209
 #ifdef __CPU_x86_64
196 210
 ATOMIC_FUNC_DECL1(inc, "incq %0", long)
197 211
 ATOMIC_FUNC_DECL1(dec, "decq %0", long)
... ...
@@ -201,6 +215,7 @@ ATOMIC_FUNC_TEST(inc_and_test, "incq %0", long, int)
201 215
 ATOMIC_FUNC_TEST(dec_and_test, "decq %0", long, int)
202 216
 ATOMIC_FUNC_XCHG(get_and_set,  "xchgq %1, %0", long)
203 217
 ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgq %2, %1", long , long)
218
+ATOMIC_FUNC_XADD(add, "xaddq %0, %1",long) 
204 219
 #else
205 220
 ATOMIC_FUNC_DECL1(inc, "incl %0", long)
206 221
 ATOMIC_FUNC_DECL1(dec, "decl %0", long)
... ...
@@ -210,6 +225,7 @@ ATOMIC_FUNC_TEST(inc_and_test, "incl %0", long, int)
210 225
 ATOMIC_FUNC_TEST(dec_and_test, "decl %0", long, int)
211 226
 ATOMIC_FUNC_XCHG(get_and_set,  "xchgl %1, %0", long)
212 227
 ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgl %2, %1", long , long)
228
+ATOMIC_FUNC_XADD(add, "xaddl %0, %1",long) 
213 229
 #endif
214 230
 
215 231
 #define atomic_inc(var) atomic_inc_int(&(var)->val)
... ...
@@ -221,6 +237,7 @@ ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgl %2, %1", long , long)
221 237
 #define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
222 238
 #define atomic_cmpxchg(var, old, newv) \
223 239
 		atomic_cmpxchg_int(&(var)->val, old, newv)
240
+#define atomic_add(var, v) atomic_add_int(&(var)->val, v)
224 241
 
225 242
 
226 243
 #ifdef NOSMP
Browse code

- added cmpxchg

Andrei Pelinescu-Onciul authored on 08/05/2007 08:25:37
Showing 1 changed files
... ...
@@ -42,6 +42,7 @@
42 42
  * History:
43 43
  * --------
44 44
  *  2006-03-08  created by andrei
45
+ *  2007-05-07  added cmpxchg (andrei)
45 46
  */
46 47
 
47 48
 #ifndef _atomic_x86_h
... ...
@@ -165,6 +166,24 @@
165 166
 		return ret; \
166 167
 	}
167 168
 
169
+/* returns a value, 3 params (var, old, new)
170
+ * The returned value is the value before the xchg:
171
+ *  if ret!=old => cmpxchg failed and ret is var's value
172
+ *  else  => success and new_v is var's new value */
173
+#define ATOMIC_FUNC_CMPXCHG(NAME, OP, P_TYPE, RET_TYPE) \
174
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE(volatile P_TYPE* var, \
175
+													P_TYPE old, P_TYPE new_v)\
176
+	{ \
177
+		P_TYPE ret; \
178
+		asm volatile( \
179
+				__LOCK_PREF " " OP "\n\t" \
180
+				: "=a"(ret), "=m" (*var) :\
181
+					"r"(new_v), "m"(*var), "0"(old):\
182
+					"cc", "memory" \
183
+				); \
184
+		return ret; \
185
+	}
186
+
168 187
 ATOMIC_FUNC_DECL1(inc, "incl %0", int)
169 188
 ATOMIC_FUNC_DECL1(dec, "decl %0", int)
170 189
 ATOMIC_FUNC_DECL2(and, "andl %1, %0", int)
... ...
@@ -172,6 +191,7 @@ ATOMIC_FUNC_DECL2(or,  "orl %1, %0", int)
172 191
 ATOMIC_FUNC_TEST(inc_and_test, "incl %0", int, int)
173 192
 ATOMIC_FUNC_TEST(dec_and_test, "decl %0", int, int)
174 193
 ATOMIC_FUNC_XCHG(get_and_set,  "xchgl %1, %0", int)
194
+ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgl %2, %1", int , int)
175 195
 #ifdef __CPU_x86_64
176 196
 ATOMIC_FUNC_DECL1(inc, "incq %0", long)
177 197
 ATOMIC_FUNC_DECL1(dec, "decq %0", long)
... ...
@@ -180,6 +200,7 @@ ATOMIC_FUNC_DECL2(or,  "orq %1, %0", long)
180 200
 ATOMIC_FUNC_TEST(inc_and_test, "incq %0", long, int)
181 201
 ATOMIC_FUNC_TEST(dec_and_test, "decq %0", long, int)
182 202
 ATOMIC_FUNC_XCHG(get_and_set,  "xchgq %1, %0", long)
203
+ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgq %2, %1", long , long)
183 204
 #else
184 205
 ATOMIC_FUNC_DECL1(inc, "incl %0", long)
185 206
 ATOMIC_FUNC_DECL1(dec, "decl %0", long)
... ...
@@ -188,6 +209,7 @@ ATOMIC_FUNC_DECL2(or,  "orl %1, %0", long)
188 209
 ATOMIC_FUNC_TEST(inc_and_test, "incl %0", long, int)
189 210
 ATOMIC_FUNC_TEST(dec_and_test, "decl %0", long, int)
190 211
 ATOMIC_FUNC_XCHG(get_and_set,  "xchgl %1, %0", long)
212
+ATOMIC_FUNC_CMPXCHG(cmpxchg, "cmpxchgl %2, %1", long , long)
191 213
 #endif
192 214
 
193 215
 #define atomic_inc(var) atomic_inc_int(&(var)->val)
... ...
@@ -197,6 +219,8 @@ ATOMIC_FUNC_XCHG(get_and_set,  "xchgl %1, %0", long)
197 219
 #define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
198 220
 #define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
199 221
 #define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
222
+#define atomic_cmpxchg(var, old, newv) \
223
+		atomic_cmpxchg_int(&(var)->val, old, newv)
200 224
 
201 225
 
202 226
 #ifdef NOSMP
Browse code

- compilation fix for gcc < 2.9 (workaround for unsuported contraint)

Andrei Pelinescu-Onciul authored on 28/02/2007 19:20:24
Showing 1 changed files
... ...
@@ -129,6 +129,18 @@
129 129
 				); \
130 130
 	}
131 131
 
132
+#if defined __GNUC__ &&  __GNUC__ < 3 && __GNUC_MINOR__ < 9
133
+/* gcc version < 2.9 */
134
+#define ATOMIC_FUNC_XCHG(NAME, OP, TYPE) \
135
+	inline static TYPE atomic_##NAME##_##TYPE(volatile TYPE* var, TYPE v) \
136
+{ \
137
+	asm volatile( \
138
+			OP " \n\t" \
139
+			: "=q"(v), "=m"(*var) :"m"(*var), "0"(v) : "memory" \
140
+			); \
141
+	return v; \
142
+}
143
+#else
132 144
 #define ATOMIC_FUNC_XCHG(NAME, OP, TYPE) \
133 145
 	inline static TYPE atomic_##NAME##_##TYPE(volatile TYPE* var, TYPE v) \
134 146
 { \
... ...
@@ -138,6 +150,7 @@
138 150
 			); \
139 151
 	return v; \
140 152
 }
153
+#endif /* gcc & gcc version < 2.9 */
141 154
 
142 155
 /* returns a value, 1 param */
143 156
 #define ATOMIC_FUNC_TEST(NAME, OP, P_TYPE, RET_TYPE) \
... ...
@@ -218,7 +231,11 @@ inline static void mb_atomic_set_int(volatile int* v, int i)
218 231
 {
219 232
 	asm volatile(
220 233
 			"xchgl %1, %0 \n\t"
234
+#if defined __GNUC__ &&  __GNUC__ < 3 && __GNUC_MINOR__ < 9
235
+			: "=q"(i), "=m"(*v) : "m"(*v), "0"(i) : "memory" 
236
+#else
221 237
 			: "+q"(i), "=m"(*v) : "m"(*v) : "memory" 
238
+#endif
222 239
 			);
223 240
 }
224 241
 
... ...
@@ -231,7 +248,11 @@ inline static void mb_atomic_set_long(volatile long* v, long l)
231 248
 #else
232 249
 			"xchgl %1, %0 \n\t"
233 250
 #endif
251
+#if defined __GNUC__ &&  __GNUC__ < 3 && __GNUC_MINOR__ < 9
252
+			: "=q"(l), "=m"(*v) : "m"(*v), "0"(l) : "memory" 
253
+#else
234 254
 			: "+q"(l), "=m"(*v) : "m"(*v) : "memory" 
255
+#endif
235 256
 			);
236 257
 }
237 258
 
Browse code

- membar_write on x86_64 is by default empty (since on amd64 stores are always ordered)

- x86/x86_64 lock optimizations: spinning on a lock should be friendlier now
for the other cpus caches (at the extra cost of a cmp mem + jump) ; tried to
arrange a little better the instructions to allow for some parallel
execution.

- x86 unlocks with xchg by default (since some x86s reorder stores, so a
simple mov is unsafe)

Andrei Pelinescu-Onciul authored on 03/04/2006 19:03:16
Showing 1 changed files
... ...
@@ -31,6 +31,8 @@
31 31
  *
32 32
  *  Config defines:   - NOSMP
33 33
  *                    - X86_OOSTORE (out of order store, defined by default)
34
+ *                    - X86_64_OOSTORE, like X86_OOSTORE, but for x86_64 cpus,
35
+ *                      default off
34 36
  *                    - __CPU_x86_64 (64 bit mode, long and void* is 64 bit and
35 37
  *                                    the cpu has all of the mfence, lfence
36 38
  *                                    and sfence instructions)
... ...
@@ -79,7 +81,7 @@
79 81
 */
80 82
 #define membar() 		asm volatile( " mfence \n\t " : : : "memory" )
81 83
 #define membar_read()	asm volatile( " lfence \n\t " : : : "memory" )
82
-#ifdef X86_OOSTORE
84
+#ifdef X86_64_OOSTORE
83 85
 #define membar_write()	asm volatile( " sfence \n\t " : : : "memory" )
84 86
 #else
85 87
 #define membar_write()	asm volatile ("" : : : "memory") /* gcc don't cache*/
Browse code

- makefile: - compile in 64bit mode by default on sparc64 - sparc <= v8 support - -CC_GCC_LIKE_ASM is defined when the compiler supports gcc style inline asm (gcc and icc)

- atomic operations and memory barriers support for:
- x86
- x86_64
- mips (only in NOSMP mode and if it supports ll and sc)
- mips2 (mips32, isa >= 2)
- mips64
- powerpc
- powerpc64
- sparc <= v8 (only memory barriers, the atomic operations are implemented
using locks because there is no hardware support for them)
- sparc64 - both 32 (v8plus) and 64 bit mode
If there is no support for the compiler/arch. combination, it falls back to
locks.

The code is tested (only basic tests: it runs and the results are ok, but no
parallel tests) on x86, x86_64, mips2, powerpc, sparc64 (both modes).
The sparc version runs ok on sparc64 (so it's most likely ok).
powerpc64 and mips64 not tested due to no access to the corresponding
hardware, but they do compile ok.
For more details see the comments at the beginning of atomic_ops.h.

Andrei Pelinescu-Onciul authored on 30/03/2006 19:56:06
Showing 1 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,294 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  atomic operations and memory barriers (x86 and x86_64/amd64 specific)
29
+ *  WARNING: atomic ops do not include memory barriers
30
+ *  see atomic_ops.h for more details 
31
+ *
32
+ *  Config defines:   - NOSMP
33
+ *                    - X86_OOSTORE (out of order store, defined by default)
34
+ *                    - __CPU_x86_64 (64 bit mode, long and void* is 64 bit and
35
+ *                                    the cpu has all of the mfence, lfence
36
+ *                                    and sfence instructions)
37
+ *                    - __CPU_i386  (486+, 32 bit)
38
+ */
39
+/* 
40
+ * History:
41
+ * --------
42
+ *  2006-03-08  created by andrei
43
+ */
44
+
45
+#ifndef _atomic_x86_h
46
+#define _atomic_x86_h
47
+
48
+#define HAVE_ASM_INLINE_ATOMIC_OPS
49
+#define HAVE_ASM_INLINE_MEMBAR
50
+
51
+#ifdef NOSMP
52
+#define __LOCK_PREF 
53
+#else
54
+#define __LOCK_PREF "lock ;"
55
+#endif
56
+
57
+
58
+/* memory barriers */
59
+
60
+#ifdef NOSMP
61
+
62
+#define membar()	asm volatile ("" : : : "memory")
63
+#define membar_read()	membar()
64
+#define membar_write()	membar()
65
+
66
+#else
67
+
68
+/* although most x86 do stores in order, we're playing it safe and use
69
+ *  oostore ready write barriers */
70
+#define X86_OOSTORE 
71
+
72
+#ifdef __CPU_x86_64
73
+/*
74
+#define membar() \
75
+	asm volatile( \
76
+					" lock; addq $0, 0(%%rsp) \n\t " \
77
+					: : : "memory" \
78
+				) 
79
+*/
80
+#define membar() 		asm volatile( " mfence \n\t " : : : "memory" )
81
+#define membar_read()	asm volatile( " lfence \n\t " : : : "memory" )
82
+#ifdef X86_OOSTORE
83
+#define membar_write()	asm volatile( " sfence \n\t " : : : "memory" )
84
+#else
85
+#define membar_write()	asm volatile ("" : : : "memory") /* gcc don't cache*/
86
+#endif /* X86_OOSTORE */
87
+
88
+#else /* ! __CPU_x86_64  => __CPU_i386*/
89
+/* membar: lfence, mfence, sfence available only on newer cpus, so for now
90
+ * stick to lock addl */
91
+#define membar() asm volatile(" lock; addl $0, 0(%%esp) \n\t " : : : "memory" )
92
+#define membar_read()	membar()
93
+#ifdef X86_OOSTORE
94
+/* out of order store version */
95
+#define membar_write()	membar()
96
+#else