Browse code

- parts of atomic_ops.h moved into atomic/atomic_common.h and atomic/atomic_native.h

- added membar_eneter_lock() and membar_leave_lock() (to be used only if
creating locks using the atomic ops functions, for more info see atomic_ops.h)

Andrei Pelinescu-Onciul authored on 14/05/2007 17:29:31
Showing 10 changed files
... ...
@@ -44,11 +44,18 @@
44 44
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
45 45
 #define membar_read()  membar()
46 46
 #define membar_write() membar()
47
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
48
+ * contain gcc barriers*/
49
+#define membar_enter_lock() 
50
+#define membar_leave_lock()
51
+
47 52
 #else
48 53
 
49 54
 #define membar()		asm volatile ("    mb \n\t" : : : "memory" ) 
50 55
 #define membar_read()	membar()
51 56
 #define membar_write()	asm volatile ("    wmb \n\t" : : : "memory" )
57
+#define membar_enter_lock() asm volatile("mb \n\t" : : : "memory")
58
+#define membar_leave_lock() asm volatile("mb \n\t" : : : "memory")
52 59
 
53 60
 #endif /* NOSMP */
54 61
 
... ...
@@ -44,6 +44,10 @@
44 44
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
45 45
 #define membar_read()  membar()
46 46
 #define membar_write() membar()
47
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
48
+ * contain gcc barriers*/
49
+#define membar_enter_lock() 
50
+#define membar_leave_lock()
47 51
 #else /* SMP */
48 52
 #warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
49 53
 /* fall back to default lock based barriers (don't define HAVE_ASM...) */
50 54
new file mode 100644
... ...
@@ -0,0 +1,54 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
9
+ *
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
+ */
18
+/*
19
+ * common part for all the atomic operations (atomic_t and common operations)
20
+ *  See atomic_ops.h for more info.
21
+ */
22
+/* 
23
+ * History:
24
+ * --------
25
+ *  2006-03-08  created by andrei
26
+ *  2007-05-13  split from atomic_ops.h (andrei)
27
+ */
28
+#ifndef __atomic_common
29
+#define __atomic_common
30
+
31
+/* atomic_t defined as a struct to easily catch non atomic ops. on it,
32
+ * e.g.  atomic_t  foo; foo++  will generate a compile error */
33
+typedef struct{ volatile int val; } atomic_t; 
34
+
35
+
36
+/* store and load operations are atomic on all cpus, note however that they
37
+ * don't include memory barriers so if you want to use atomic_{get,set} 
38
+ * to implement mutexes you must use the mb_* versions or explicitely use
39
+ * the barriers */
40
+
41
+#define atomic_set_int(pvar, i) (*(pvar)=i)
42
+#define atomic_set_long(pvar, i) (*(pvar)=i)
43
+#define atomic_get_int(pvar) (*(pvar))
44
+#define atomic_get_long(pvar) (*(pvar))
45
+
46
+#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
47
+
48
+inline static int atomic_get(atomic_t *v)
49
+{
50
+	return atomic_get_int(&(v->val));
51
+}
52
+
53
+
54
+#endif
... ...
@@ -52,6 +52,11 @@
52 52
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
53 53
 #define membar_read()  membar()
54 54
 #define membar_write() membar()
55
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
56
+ * contain gcc barriers*/
57
+#define membar_enter_lock() 
58
+#define membar_leave_lock()
59
+
55 60
 #else
56 61
 
57 62
 #define membar() \
... ...
@@ -66,6 +71,8 @@
66 71
 
67 72
 #define membar_read()  membar()
68 73
 #define membar_write() membar()
74
+#define membar_enter_lock() membar()
75
+#define membar_leave_lock() membar()
69 76
 
70 77
 #endif /* NOSMP */
71 78
 
72 79
new file mode 100644
... ...
@@ -0,0 +1,83 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
9
+ *
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
+ */
18
+/*
19
+ *  include file for native (asm) atomic operations and memory barriers
20
+ *  WARNING: atomic ops do not include memory barriers
21
+ *  See atomic_ops.h for more info.
22
+ *  Expects atomic_t to be defined (#include "atomic_common.h")
23
+ *
24
+ * Config defines:   CC_GCC_LIKE_ASM  - the compiler support gcc style
25
+ *                     inline asm
26
+ *                   NOSMP - the code will be a little faster, but not SMP
27
+ *                            safe
28
+ *                   __CPU_i386, __CPU_x86_64, X86_OOSTORE - see 
29
+ *                       atomic_x86.h
30
+ *                   __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see
31
+ *                       atomic_mip2.h
32
+ *                   __CPU_ppc, __CPU_ppc64 - see atomic_ppc.h
33
+ *                   __CPU_sparc - see atomic_sparc.h
34
+ *                   __CPU_sparc64, SPARC64_MODE - see atomic_sparc64.h
35
+ *                   __CPU_arm, __CPU_arm6 - see atomic_arm.h
36
+ *                   __CPU_alpha - see atomic_alpha.h
37
+ */
38
+/* 
39
+ * History:
40
+ * --------
41
+ *  2006-03-08  created by andrei
42
+ *  2007-05-13  split from atomic_ops.h (andrei)
43
+ */
44
+#ifndef __atomic_native
45
+#define __atomic_native
46
+
47
+#ifdef CC_GCC_LIKE_ASM
48
+
49
+#if defined __CPU_i386 || defined __CPU_x86_64
50
+
51
+#include "atomic_x86.h"
52
+
53
+#elif defined __CPU_mips2 || defined __CPU_mips64 || \
54
+	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
55
+
56
+#include "atomic_mips2.h"
57
+
58
+#elif defined __CPU_ppc || defined __CPU_ppc64
59
+
60
+#include "atomic_ppc.h"
61
+
62
+#elif defined __CPU_sparc64
63
+
64
+#include "atomic_sparc64.h"
65
+
66
+#elif defined __CPU_sparc
67
+
68
+#include "atomic_sparc.h"
69
+
70
+#elif defined __CPU_arm || defined __CPU_arm6
71
+
72
+#include "atomic_arm.h"
73
+
74
+#elif defined __CPU_alpha
75
+
76
+#include "atomic_alpha.h"
77
+
78
+#endif /* __CPU_xxx  => no known cpu */
79
+
80
+#endif /* CC_GCC_LIKE_ASM */
81
+
82
+
83
+#endif
... ...
@@ -48,12 +48,21 @@
48 48
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
49 49
 #define membar_read()  membar()
50 50
 #define membar_write() membar()
51
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
52
+ * contain gcc barriers*/
53
+#define membar_enter_lock() 
54
+#define membar_leave_lock()
55
+
51 56
 #else
52 57
 #define membar() asm volatile ("sync \n\t" : : : "memory") 
53 58
 /* lwsync orders LoadLoad, LoadStore and StoreStore */
54 59
 #define membar_read() asm volatile ("lwsync \n\t" : : : "memory") 
55 60
 /* on "normal" cached mem. eieio orders StoreStore */
56 61
 #define membar_write() asm volatile ("eieio \n\t" : : : "memory") 
62
+#define membar_enter_lock() asm volatile("lwsync \n\t" : : : "memory")
63
+/* for unlock lwsync will work too and is faster then sync
64
+ *  [IBM Prgramming Environments Manual, D.4.2.2] */
65
+#define membar_leave_lock() asm volatile("lwsync \n\t" : : : "memory")
57 66
 #endif /* NOSMP */
58 67
 
59 68
 
... ...
@@ -40,10 +40,17 @@
40 40
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
41 41
 #define membar_read()  membar()
42 42
 #define membar_write() membar()
43
+/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
44
+ * contain gcc barriers*/
45
+#define membar_enter_lock() 
46
+#define membar_leave_lock()
43 47
 #else /* SMP */
44 48
 #define membar_write() asm volatile ("stbar \n\t" : : : "memory") 
45 49
 #define membar() membar_write()
46 50
 #define membar_read() asm volatile ("" : : : "memory") 
51
+#define membar_enter_lock() 
52
+#define membar_leave_lock() asm volatile ("stbar \n\t" : : : "memory") 
53
+
47 54
 #endif /* NOSMP */
48 55
 
49 56
 
... ...
@@ -52,6 +52,20 @@
52 52
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
53 53
 #define membar_read()  membar()
54 54
 #define membar_write() membar()
55
+/*  memory barriers for lock & unlock where lock & unlock are inline asm
56
+ *  functions that use atomic ops (and both of them use at least a store to
57
+ *  the lock). membar_enter_lock() is at most a StoreStore|StoreLoad barrier
58
+ *   and membar_leave_lock() is at most a LoadStore|StoreStore barries
59
+ *  (if the atomic ops on the specific arhitecture imply these barriers
60
+ *   => these macros will be empty)
61
+ *   Warning: these barriers don't force LoadLoad ordering between code
62
+ *    before the lock/membar_enter_lock() and code 
63
+ *    after membar_leave_lock()/unlock()
64
+ *
65
+ *  Usage: lock(); membar_enter_lock(); .... ; membar_leave_lock(); unlock()
66
+ */
67
+#define membar_enter_lock()
68
+#define membar_leave_lock()
55 69
 #else /* SMP */
56 70
 #define membar() \
57 71
 	asm volatile ( \
... ...
@@ -60,6 +74,10 @@
60 74
 
61 75
 #define membar_read() asm volatile ("membar #LoadLoad \n\t" : : : "memory")
62 76
 #define membar_write() asm volatile ("membar #StoreStore \n\t" : : : "memory")
77
+#define membar_enter_lock() \
78
+	asm volatile ("membar #StoreStore | #StoreLoad \n\t" : : : "memory");
79
+#define membar_leave_lock() \
80
+	asm volatile ("membar #LoadStore | #StoreStore \n\t" : : : "memory");
63 81
 #endif /* NOSMP */
64 82
 
65 83
 
... ...
@@ -57,6 +57,10 @@
57 57
 #define membar()	asm volatile ("" : : : "memory")
58 58
 #define membar_read()	membar()
59 59
 #define membar_write()	membar()
60
+/* lock barrriers: empty, not needed for NOSMP; the lock/unlock should already
61
+ * contain gcc barriers*/
62
+#define membar_enter_lock() 
63
+#define membar_leave_lock()
60 64
 
61 65
 #else
62 66
 
... ...
@@ -95,6 +99,13 @@
95 99
 
96 100
 #endif /* __CPU_x86_64 */
97 101
 
102
+/* lock barrriers: empty, not needed on x86 or x86_64 (atomic ops already
103
+ *  force the barriers if needed); the lock/unlock should already contain the 
104
+ *  gcc do_not_cache barriers*/
105
+#define membar_enter_lock() 
106
+#define membar_leave_lock()
107
+
108
+
98 109
 
99 110
 
100 111
 
... ...
@@ -26,9 +26,31 @@
26 26
  *  void membar_read()   - load (read) memory barrier
27 27
  *  void membar_write()  - store (write) memory barrier
28 28
  *
29
- *  Note: properly using memory barriers is tricky, in general try not to 
29
+ *  void membar_enter_lock() - memory barrier function that should be 
30
+ *                             called after a lock operation (where lock is
31
+ *                             an asm inline function that uses atomic store
32
+ *                             operation on the lock var.). It is at most
33
+ *                             a StoreStore|StoreLoad barrier, but could also
34
+ *                             be empty if an atomic op implies a memory 
35
+ *                             barrier on the specific arhitecture.
36
+ *                             Example usage: 
37
+ *                               raw_lock(l); membar_enter_lock(); ...
38
+ *  void membar_leave_lock() - memory barrier function that should be called 
39
+ *                             before an unlock operation (where unlock is an
40
+ *                             asm inline function that uses at least an atomic
41
+ *                             store to on the lock var.). It is at most a 
42
+ *                             LoadStore|StoreStore barrier (but could also be
43
+ *                             empty, see above).
44
+ *                             Example: raw_lock(l); membar_enter_lock(); ..
45
+ *                                      ... critical section ...
46
+ *                                      membar_leave_lock(); raw_unlock(l);
47
+ *
48
+ *  Note: - properly using memory barriers is tricky, in general try not to 
30 49
  *        depend on them. Locks include memory barriers, so you don't need
31 50
  *        them for writes/load already protected by locks.
51
+ *        - membar_enter_lock() and membar_leave_lock() are needed only if
52
+ *        you implement your own locks using atomic ops (ser locks have the
53
+ *        membars included)
32 54
  *
33 55
  * atomic operations:
34 56
  * ------------------
... ...
@@ -89,70 +111,17 @@
89 111
  * History:
90 112
  * --------
91 113
  *  2006-03-08  created by andrei
114
+ *  2007-05-13  moved some of the decl. and includes into atomic_common.h and 
115
+ *               atomic_native.h (andrei)
92 116
  */
93 117
 #ifndef __atomic_ops
94 118
 #define __atomic_ops
95 119
 
96
-/* atomic_t defined as a struct to easily catch non atomic ops. on it,
97
- * e.g.  atomic_t  foo; foo++  will generate a compile error */
98
-typedef struct{ volatile int val; } atomic_t; 
99
-
100
-
101
-/* store and load operations are atomic on all cpus, note however that they
102
- * don't include memory barriers so if you want to use atomic_{get,set} 
103
- * to implement mutexes you must use the mb_* versions or explicitely use
104
- * the barriers */
105
-
106
-#define atomic_set_int(pvar, i) (*(pvar)=i)
107
-#define atomic_set_long(pvar, i) (*(pvar)=i)
108
-#define atomic_get_int(pvar) (*(pvar))
109
-#define atomic_get_long(pvar) (*(pvar))
110
-
111
-#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
112
-
113
-inline static int atomic_get(atomic_t *v)
114
-{
115
-	return atomic_get_int(&(v->val));
116
-}
117
-
118
-
119
-
120
-#ifdef CC_GCC_LIKE_ASM
121
-
122
-#if defined __CPU_i386 || defined __CPU_x86_64
123
-
124
-#include "atomic/atomic_x86.h"
125
-
126
-#elif defined __CPU_mips2 || defined __CPU_mips64 || \
127
-	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
128
-
129
-#include "atomic/atomic_mips2.h"
130
-
131
-#elif defined __CPU_ppc || defined __CPU_ppc64
132
-
133
-#include "atomic/atomic_ppc.h"
134
-
135
-#elif defined __CPU_sparc64
136
-
137
-#include "atomic/atomic_sparc64.h"
138
-
139
-#elif defined __CPU_sparc
140
-
141
-#include "atomic/atomic_sparc.h"
142
-
143
-#elif defined __CPU_arm || defined __CPU_arm6
144
-
145
-#include "atomic/atomic_arm.h"
146
-
147
-#elif defined __CPU_alpha
148
-
149
-#include "atomic/atomic_alpha.h"
150
-
151
-#endif /* __CPU_xxx  => no known cpu */
152
-
153
-#endif /* CC_GCC_LIKE_ASM */
120
+#include "atomic/atomic_common.h"
154 121
 
122
+#include "atomic/atomic_native.h"
155 123
 
124
+/* if no native operations, emulate them using locks */
156 125
 #if  ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
157 126
 
158 127
 #include "atomic/atomic_unknown.h"