Browse code

- parts of atomic_ops.h moved into atomic/atomic_common.h and atomic/atomic_native.h

- added membar_eneter_lock() and membar_leave_lock() (to be used only if
creating locks using the atomic ops functions, for more info see atomic_ops.h)

Andrei Pelinescu-Onciul authored on 14/05/2007 17:29:31
Showing 10 changed files
... ...
@@ -44,11 +44,18 @@
44 44
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
45 45
 #define membar_read()  membar()
46 46
 #define membar_write() membar()
47
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
48
+ * contain gcc barriers*/
49
+#define membar_enter_lock() 
50
+#define membar_leave_lock()
51
+
47 52
 #else
48 53
 
49 54
 #define membar()		asm volatile ("    mb \n\t" : : : "memory" ) 
50 55
 #define membar_read()	membar()
51 56
 #define membar_write()	asm volatile ("    wmb \n\t" : : : "memory" )
57
+#define membar_enter_lock() asm volatile("mb \n\t" : : : "memory")
58
+#define membar_leave_lock() asm volatile("mb \n\t" : : : "memory")
52 59
 
53 60
 #endif /* NOSMP */
54 61
 
... ...
@@ -44,6 +44,10 @@
44 44
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
45 45
 #define membar_read()  membar()
46 46
 #define membar_write() membar()
47
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
48
+ * contain gcc barriers*/
49
+#define membar_enter_lock() 
50
+#define membar_leave_lock()
47 51
 #else /* SMP */
48 52
 #warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
49 53
 /* fall back to default lock based barriers (don't define HAVE_ASM...) */
50 54
new file mode 100644
... ...
@@ -0,0 +1,54 @@
0
+/* 
1
+ * $Id$
2
+ * 
3
+ * Copyright (C) 2006 iptelorg GmbH
4
+ *
5
+ * Permission to use, copy, modify, and distribute this software for any
6
+ * purpose with or without fee is hereby granted, provided that the above
7
+ * copyright notice and this permission notice appear in all copies.
8
+ *
9
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
+ */
17
+/*
18
+ * common part for all the atomic operations (atomic_t and common operations)
19
+ *  See atomic_ops.h for more info.
20
+ */
21
+/* 
22
+ * History:
23
+ * --------
24
+ *  2006-03-08  created by andrei
25
+ *  2007-05-13  split from atomic_ops.h (andrei)
26
+ */
27
+#ifndef __atomic_common
28
+#define __atomic_common
29
+
30
+/* atomic_t defined as a struct to easily catch non atomic ops. on it,
31
+ * e.g.  atomic_t  foo; foo++  will generate a compile error */
32
+typedef struct{ volatile int val; } atomic_t; 
33
+
34
+
35
+/* store and load operations are atomic on all cpus, note however that they
36
+ * don't include memory barriers so if you want to use atomic_{get,set} 
37
+ * to implement mutexes you must use the mb_* versions or explicitely use
38
+ * the barriers */
39
+
40
+#define atomic_set_int(pvar, i) (*(pvar)=i)
41
+#define atomic_set_long(pvar, i) (*(pvar)=i)
42
+#define atomic_get_int(pvar) (*(pvar))
43
+#define atomic_get_long(pvar) (*(pvar))
44
+
45
+#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
46
+
47
+inline static int atomic_get(atomic_t *v)
48
+{
49
+	return atomic_get_int(&(v->val));
50
+}
51
+
52
+
53
+#endif
... ...
@@ -52,6 +52,11 @@
52 52
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
53 53
 #define membar_read()  membar()
54 54
 #define membar_write() membar()
55
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
56
+ * contain gcc barriers*/
57
+#define membar_enter_lock() 
58
+#define membar_leave_lock()
59
+
55 60
 #else
56 61
 
57 62
 #define membar() \
... ...
@@ -66,6 +71,8 @@
66 66
 
67 67
 #define membar_read()  membar()
68 68
 #define membar_write() membar()
69
+#define membar_enter_lock() membar()
70
+#define membar_leave_lock() membar()
69 71
 
70 72
 #endif /* NOSMP */
71 73
 
72 74
new file mode 100644
... ...
@@ -0,0 +1,83 @@
0
+/* 
1
+ * $Id$
2
+ * 
3
+ * Copyright (C) 2006 iptelorg GmbH
4
+ *
5
+ * Permission to use, copy, modify, and distribute this software for any
6
+ * purpose with or without fee is hereby granted, provided that the above
7
+ * copyright notice and this permission notice appear in all copies.
8
+ *
9
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
+ */
17
+/*
18
+ *  include file for native (asm) atomic operations and memory barriers
19
+ *  WARNING: atomic ops do not include memory barriers
20
+ *  See atomic_ops.h for more info.
21
+ *  Expects atomic_t to be defined (#include "atomic_common.h")
22
+ *
23
+ * Config defines:   CC_GCC_LIKE_ASM  - the compiler support gcc style
24
+ *                     inline asm
25
+ *                   NOSMP - the code will be a little faster, but not SMP
26
+ *                            safe
27
+ *                   __CPU_i386, __CPU_x86_64, X86_OOSTORE - see 
28
+ *                       atomic_x86.h
29
+ *                   __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see
30
+ *                       atomic_mip2.h
31
+ *                   __CPU_ppc, __CPU_ppc64 - see atomic_ppc.h
32
+ *                   __CPU_sparc - see atomic_sparc.h
33
+ *                   __CPU_sparc64, SPARC64_MODE - see atomic_sparc64.h
34
+ *                   __CPU_arm, __CPU_arm6 - see atomic_arm.h
35
+ *                   __CPU_alpha - see atomic_alpha.h
36
+ */
37
+/* 
38
+ * History:
39
+ * --------
40
+ *  2006-03-08  created by andrei
41
+ *  2007-05-13  split from atomic_ops.h (andrei)
42
+ */
43
+#ifndef __atomic_native
44
+#define __atomic_native
45
+
46
+#ifdef CC_GCC_LIKE_ASM
47
+
48
+#if defined __CPU_i386 || defined __CPU_x86_64
49
+
50
+#include "atomic_x86.h"
51
+
52
+#elif defined __CPU_mips2 || defined __CPU_mips64 || \
53
+	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
54
+
55
+#include "atomic_mips2.h"
56
+
57
+#elif defined __CPU_ppc || defined __CPU_ppc64
58
+
59
+#include "atomic_ppc.h"
60
+
61
+#elif defined __CPU_sparc64
62
+
63
+#include "atomic_sparc64.h"
64
+
65
+#elif defined __CPU_sparc
66
+
67
+#include "atomic_sparc.h"
68
+
69
+#elif defined __CPU_arm || defined __CPU_arm6
70
+
71
+#include "atomic_arm.h"
72
+
73
+#elif defined __CPU_alpha
74
+
75
+#include "atomic_alpha.h"
76
+
77
+#endif /* __CPU_xxx  => no known cpu */
78
+
79
+#endif /* CC_GCC_LIKE_ASM */
80
+
81
+
82
+#endif
... ...
@@ -48,12 +48,21 @@
48 48
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
49 49
 #define membar_read()  membar()
50 50
 #define membar_write() membar()
51
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
52
+ * contain gcc barriers*/
53
+#define membar_enter_lock() 
54
+#define membar_leave_lock()
55
+
51 56
 #else
52 57
 #define membar() asm volatile ("sync \n\t" : : : "memory") 
53 58
 /* lwsync orders LoadLoad, LoadStore and StoreStore */
54 59
 #define membar_read() asm volatile ("lwsync \n\t" : : : "memory") 
55 60
 /* on "normal" cached mem. eieio orders StoreStore */
56 61
 #define membar_write() asm volatile ("eieio \n\t" : : : "memory") 
62
+#define membar_enter_lock() asm volatile("lwsync \n\t" : : : "memory")
63
+/* for unlock lwsync will work too and is faster then sync
64
+ *  [IBM Prgramming Environments Manual, D.4.2.2] */
65
+#define membar_leave_lock() asm volatile("lwsync \n\t" : : : "memory")
57 66
 #endif /* NOSMP */
58 67
 
59 68
 
... ...
@@ -40,10 +40,17 @@
40 40
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
41 41
 #define membar_read()  membar()
42 42
 #define membar_write() membar()
43
+/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
44
+ * contain gcc barriers*/
45
+#define membar_enter_lock() 
46
+#define membar_leave_lock()
43 47
 #else /* SMP */
44 48
 #define membar_write() asm volatile ("stbar \n\t" : : : "memory") 
45 49
 #define membar() membar_write()
46 50
 #define membar_read() asm volatile ("" : : : "memory") 
51
+#define membar_enter_lock() 
52
+#define membar_leave_lock() asm volatile ("stbar \n\t" : : : "memory") 
53
+
47 54
 #endif /* NOSMP */
48 55
 
49 56
 
... ...
@@ -52,6 +52,20 @@
52 52
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
53 53
 #define membar_read()  membar()
54 54
 #define membar_write() membar()
55
+/*  memory barriers for lock & unlock where lock & unlock are inline asm
56
+ *  functions that use atomic ops (and both of them use at least a store to
57
+ *  the lock). membar_enter_lock() is at most a StoreStore|StoreLoad barrier
58
+ *   and membar_leave_lock() is at most a LoadStore|StoreStore barries
59
+ *  (if the atomic ops on the specific arhitecture imply these barriers
60
+ *   => these macros will be empty)
61
+ *   Warning: these barriers don't force LoadLoad ordering between code
62
+ *    before the lock/membar_enter_lock() and code 
63
+ *    after membar_leave_lock()/unlock()
64
+ *
65
+ *  Usage: lock(); membar_enter_lock(); .... ; membar_leave_lock(); unlock()
66
+ */
67
+#define membar_enter_lock()
68
+#define membar_leave_lock()
55 69
 #else /* SMP */
56 70
 #define membar() \
57 71
 	asm volatile ( \
... ...
@@ -60,6 +74,10 @@
60 60
 
61 61
 #define membar_read() asm volatile ("membar #LoadLoad \n\t" : : : "memory")
62 62
 #define membar_write() asm volatile ("membar #StoreStore \n\t" : : : "memory")
63
+#define membar_enter_lock() \
64
+	asm volatile ("membar #StoreStore | #StoreLoad \n\t" : : : "memory");
65
+#define membar_leave_lock() \
66
+	asm volatile ("membar #LoadStore | #StoreStore \n\t" : : : "memory");
63 67
 #endif /* NOSMP */
64 68
 
65 69
 
... ...
@@ -57,6 +57,10 @@
57 57
 #define membar()	asm volatile ("" : : : "memory")
58 58
 #define membar_read()	membar()
59 59
 #define membar_write()	membar()
60
+/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
61
+ * contain gcc barriers*/
62
+#define membar_enter_lock() 
63
+#define membar_leave_lock()
60 64
 
61 65
 #else
62 66
 
... ...
@@ -95,6 +99,13 @@
95 95
 
96 96
 #endif /* __CPU_x86_64 */
97 97
 
98
+/* lock barrriers: empty, not needed on x86 or x86_64 (atomic ops already
99
+ *  force the barriers if needed); the lock/unlock should already contain the 
100
+ *  gcc do_not_cache barriers*/
101
+#define membar_enter_lock() 
102
+#define membar_leave_lock()
103
+
104
+
98 105
 
99 106
 
100 107
 
... ...
@@ -26,9 +26,31 @@
26 26
  *  void membar_read()   - load (read) memory barrier
27 27
  *  void membar_write()  - store (write) memory barrier
28 28
  *
29
- *  Note: properly using memory barriers is tricky, in general try not to 
29
+ *  void membar_enter_lock() - memory barrier function that should be 
30
+ *                             called after a lock operation (where lock is
31
+ *                             an asm inline function that uses atomic store
32
+ *                             operation on the lock var.). It is at most
33
+ *                             a StoreStore|StoreLoad barrier, but could also
34
+ *                             be empty if an atomic op implies a memory 
35
+ *                             barrier on the specific arhitecture.
36
+ *                             Example usage: 
37
+ *                               raw_lock(l); membar_enter_lock(); ...
38
+ *  void membar_leave_lock() - memory barrier function that should be called 
39
+ *                             before an unlock operation (where unlock is an
40
+ *                             asm inline function that uses at least an atomic
41
+ *                             store to on the lock var.). It is at most a 
42
+ *                             LoadStore|StoreStore barrier (but could also be
43
+ *                             empty, see above).
44
+ *                             Example: raw_lock(l); membar_enter_lock(); ..
45
+ *                                      ... critical section ...
46
+ *                                      membar_leave_lock(); raw_unlock(l);
47
+ *
48
+ *  Note: - properly using memory barriers is tricky, in general try not to 
30 49
  *        depend on them. Locks include memory barriers, so you don't need
31 50
  *        them for writes/load already protected by locks.
51
+ *        - membar_enter_lock() and membar_leave_lock() are needed only if
52
+ *        you implement your own locks using atomic ops (ser locks have the
53
+ *        membars included)
32 54
  *
33 55
  * atomic operations:
34 56
  * ------------------
... ...
@@ -89,70 +111,17 @@
89 89
  * History:
90 90
  * --------
91 91
  *  2006-03-08  created by andrei
92
+ *  2007-05-13  moved some of the decl. and includes into atomic_common.h and 
93
+ *               atomic_native.h (andrei)
92 94
  */
93 95
 #ifndef __atomic_ops
94 96
 #define __atomic_ops
95 97
 
96
-/* atomic_t defined as a struct to easily catch non atomic ops. on it,
97
- * e.g.  atomic_t  foo; foo++  will generate a compile error */
98
-typedef struct{ volatile int val; } atomic_t; 
99
-
100
-
101
-/* store and load operations are atomic on all cpus, note however that they
102
- * don't include memory barriers so if you want to use atomic_{get,set} 
103
- * to implement mutexes you must use the mb_* versions or explicitely use
104
- * the barriers */
105
-
106
-#define atomic_set_int(pvar, i) (*(pvar)=i)
107
-#define atomic_set_long(pvar, i) (*(pvar)=i)
108
-#define atomic_get_int(pvar) (*(pvar))
109
-#define atomic_get_long(pvar) (*(pvar))
110
-
111
-#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
112
-
113
-inline static int atomic_get(atomic_t *v)
114
-{
115
-	return atomic_get_int(&(v->val));
116
-}
117
-
118
-
119
-
120
-#ifdef CC_GCC_LIKE_ASM
121
-
122
-#if defined __CPU_i386 || defined __CPU_x86_64
123
-
124
-#include "atomic/atomic_x86.h"
125
-
126
-#elif defined __CPU_mips2 || defined __CPU_mips64 || \
127
-	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
128
-
129
-#include "atomic/atomic_mips2.h"
130
-
131
-#elif defined __CPU_ppc || defined __CPU_ppc64
132
-
133
-#include "atomic/atomic_ppc.h"
134
-
135
-#elif defined __CPU_sparc64
136
-
137
-#include "atomic/atomic_sparc64.h"
138
-
139
-#elif defined __CPU_sparc
140
-
141
-#include "atomic/atomic_sparc.h"
142
-
143
-#elif defined __CPU_arm || defined __CPU_arm6
144
-
145
-#include "atomic/atomic_arm.h"
146
-
147
-#elif defined __CPU_alpha
148
-
149
-#include "atomic/atomic_alpha.h"
150
-
151
-#endif /* __CPU_xxx  => no known cpu */
152
-
153
-#endif /* CC_GCC_LIKE_ASM */
98
+#include "atomic/atomic_common.h"
154 99
 
100
+#include "atomic/atomic_native.h"
155 101
 
102
+/* if no native operations, emulate them using locks */
156 103
 #if  ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
157 104
 
158 105
 #include "atomic/atomic_unknown.h"