Browse code

core, lib, modules: restructured source code tree

- new folder src/ to hold the source code for main project applications
- main.c is in src/
- all core files are subfolder are in src/core/
- modules are in src/modules/
- libs are in src/lib/
- application Makefiles are in src/
- application binary is built in src/ (src/kamailio)

Daniel-Constantin Mierla authored on 07/12/2016 11:03:51
Showing 1 changed files
1 1
deleted file mode 100644
... ...
@@ -1,184 +0,0 @@
1
-/* 
2
- * Copyright (C) 2006 iptelorg GmbH
3
- *
4
- * Permission to use, copy, modify, and distribute this software for any
5
- * purpose with or without fee is hereby granted, provided that the above
6
- * copyright notice and this permission notice appear in all copies.
7
- *
8
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15
- */
16
-
17
-/*!
18
- * \file
19
- * \brief Kamailio core :: Atomic operations and memory barriers
20
- * Copyright (C) 2006 iptelorg GmbH
21
- * \ingroup core
22
- * Module: \ref core
23
- * See \ref atomic
24
- */
25
-
26
-/**
27
- * \page atomicops  Atomic operations and memory barriers
28
- *
29
- *  WARNING: atomic ops do not include memory barriers
30
- *  
31
- *  memory barriers:
32
- *  ----------------
33
- *
34
- *  void membar();        - memory barrier (load & store)
35
- *  void membar_read()    - load (read) memory barrier
36
- *  void membar_write()   - store (write) memory barrier
37
- *  void membar_depends() - read depends memory barrier, needed before using
38
- *                          the contents of a pointer (for now is needed only
39
- *                          on Alpha so on all other CPUs it will be a no-op)
40
- *                          For more info see: 
41
- *                          http://lse.sourceforge.net/locking/wmbdd.html
42
- *                          http://www.linuxjournal.com/article/8212
43
- *
44
- *  void membar_enter_lock() - memory barrier function that should be 
45
- *                             called after a lock operation (where lock is
46
- *                             an asm inline function that uses atomic store
47
- *                             operation on the lock var.). It is at most
48
- *                             a StoreStore|StoreLoad barrier, but could also
49
- *                             be empty if an atomic op implies a memory 
50
- *                             barrier on the specific arhitecture.
51
- *                             Example usage: 
52
- *                               raw_lock(l); membar_enter_lock(); ...
53
- *  void membar_leave_lock() - memory barrier function that should be called 
54
- *                             before an unlock operation (where unlock is an
55
- *                             asm inline function that uses at least an atomic
56
- *                             store to on the lock var.). It is at most a 
57
- *                             LoadStore|StoreStore barrier (but could also be
58
- *                             empty, see above).
59
- *                             Example: raw_lock(l); membar_enter_lock(); ..
60
- *                                      ... critical section ...
61
- *                                      membar_leave_lock(); raw_unlock(l);
62
- *  void membar_atomic_op() - memory barrier that should be called if a memory
63
- *                            barrier is needed immediately after or 
64
- *                            immediately before an atomic operation
65
- *                            (for example: atomic_inc(&i); membar_atomic_op()
66
- *                               instead of atomic_inc(&i); membar()).
67
- *                            atomic_op means every atomic operation except get
68
- *                            and set (for them use membar_atomic_setget()).
69
- *                            Using membar_atomic_op() instead of membar() in
70
- *                            these cases will generate faster code on some
71
- *                            architectures (for now x86 and x86_64), where 
72
- *                            atomic operations act also as memory barriers.
73
- *                            Note that mb_atomic_<OP>(...) is equivalent to
74
- *                            membar_atomic_op(); atomic_<OP>(...) and in this
75
- *                            case the first form is preferred).
76
- * void membar_atomic_setget() - same as above but for atomic_set and 
77
- *                            atomic_get (and not for any other atomic op.,
78
- *                            including atomic_get_and_set, for them use
79
- *                            membar_atomic_op()).
80
- *                            Note that mb_atomic_{get,set}(&i) is equivalent 
81
- *                            and preferred to membar_atomic_setget(); 
82
- *                            atomic_{get,set}(&i) (it will generate faster
83
- *                            code on x86 and x86_64).
84
- * void membar_read_atomic_op() - like membar_atomic_op(), but acts only as
85
- *                             a read barrier.
86
- * void membar_read_atomic_setget() - like membar_atomic_setget() but acts only
87
- *                            as a read barrier.
88
- * void membar_write_atomic_op() - like membar_atomic_op(), but acts only as
89
- *                            a write barrier.
90
- * void membar_write_atomic_setget() - like membar_atomic_setget() but acts 
91
- *                            only as a write barrier.
92
- *
93
- *
94
- *  Note: - properly using memory barriers is tricky, in general try not to 
95
- *        depend on them. Locks include memory barriers, so you don't need
96
- *        them for writes/load already protected by locks.
97
- *        - membar_enter_lock() and membar_leave_lock() are needed only if
98
- *        you implement your own locks using atomic ops (ser locks have the
99
- *        membars included)
100
- *
101
- * atomic operations:
102
- * ------------------
103
- *  type: atomic_t
104
- *
105
- * not including memory barriers:
106
- *
107
- *  void atomic_set(atomic_t* v, int i)      - v->val=i
108
- *  int atomic_get(atomic_t* v)              - return v->val
109
- *  int atomic_get_and_set(atomic_t *v, i)   - return old v->val, v->val=i
110
- *  void atomic_inc(atomic_t* v)
111
- *  void atomic_dec(atomic_t* v)
112
- *  int atomic_inc_and_test(atomic_t* v)     - returns 1 if the result is 0
113
- *  int atomic_dec_and_test(atomic_t* v)     - returns 1 if the result is 0
114
- *  void atomic_or (atomic_t* v, int mask)   - v->val|=mask 
115
- *  void atomic_and(atomic_t* v, int mask)   - v->val&=mask
116
- *  int atomic_add(atomic_t* v, int i)       - v->val+=i; return v->val
117
- *  int atomic_cmpxchg(atomic_t* v, o, n)    - r=v->val; if (r==o) v->val=n;
118
- *                                             return r (old value)
119
- *
120
- * 
121
- * same ops, but with builtin memory barriers:
122
- *
123
- *  void mb_atomic_set(atomic_t* v, int i)      -  v->val=i
124
- *  int mb_atomic_get(atomic_t* v)              -  return v->val
125
- *  int mb_atomic_get_and_set(atomic_t *v, i)   -  return old v->val, v->val=i
126
- *  void mb_atomic_inc(atomic_t* v)
127
- *  void mb_atomic_dec(atomic_t* v)
128
- *  int mb_atomic_inc_and_test(atomic_t* v)  - returns 1 if the result is 0
129
- *  int mb_atomic_dec_and_test(atomic_t* v)  - returns 1 if the result is 0
130
- *  void mb_atomic_or(atomic_t* v, int mask - v->val|=mask 
131
- *  void mb_atomic_and(atomic_t* v, int mask)- v->val&=mask
132
- *  int mb_atomic_add(atomic_t* v, int i)    - v->val+=i; return v->val
133
- *  int mb_atomic_cmpxchg(atomic_t* v, o, n) - r=v->val; if (r==o) v->val=n;
134
- *                                             return r (old value)
135
- *
136
- *  Same operations are available for int and long. The functions are named
137
- *   after the following rules:
138
- *     - add an int or long  suffix to the correspondent atomic function
139
- *     -  volatile int* or volatile long* replace atomic_t* in the functions
140
- *        declarations
141
- *     -  long and int replace the parameter type (if the function has an extra
142
- *        parameter) and the return value
143
- *  E.g.:
144
- *    long atomic_get_long(volatile long* v)
145
- *    int atomic_get_int( volatile int* v)
146
- *    long atomic_get_and_set(volatile long* v, long l)
147
- *    int atomic_get_and_set(volatile int* v, int i)
148
- *
149
- * Config defines:   CC_GCC_LIKE_ASM  - the compiler support gcc style
150
- *                     inline asm
151
- *                   NOSMP - the code will be a little faster, but not SMP
152
- *                            safe
153
- *                   __CPU_i386, __CPU_x86_64, X86_OOSTORE - see 
154
- *                       atomic/atomic_x86.h
155
- *                   __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see
156
- *                       atomic/atomic_mip2.h
157
- *                   __CPU_ppc, __CPU_ppc64 - see atomic/atomic_ppc.h
158
- *                   __CPU_sparc - see atomic/atomic_sparc.h
159
- *                   __CPU_sparc64, SPARC64_MODE - see atomic/atomic_sparc64.h
160
- *                   __CPU_arm, __CPU_arm6 - see atomic/atomic_arm.h
161
- *                   __CPU_alpha - see atomic/atomic_alpha.h
162
- */
163
-/* 
164
- * History:
165
- * --------
166
- *  2006-03-08  created by andrei
167
- *  2007-05-13  moved some of the decl. and includes into atomic_common.h and 
168
- *               atomic_native.h (andrei)
169
- */
170
-#ifndef __atomic_ops
171
-#define __atomic_ops
172
-
173
-#include "atomic/atomic_common.h"
174
-
175
-#include "atomic/atomic_native.h"
176
-
177
-/*! \brief if no native operations, emulate them using locks */
178
-#if  ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
179
-
180
-#include "atomic/atomic_unknown.h"
181
-
182
-#endif /* if HAVE_ASM_INLINE_ATOMIC_OPS */
183
-
184
-#endif
Browse code

core : Update include files - delete IDs, update doxygen, delete history

Olle E. Johansson authored on 03/01/2015 10:55:48
Showing 1 changed files
... ...
@@ -1,6 +1,4 @@
1 1
 /* 
2
- * $Id$
3
- * 
4 2
  * Copyright (C) 2006 iptelorg GmbH
5 3
  *
6 4
  * Permission to use, copy, modify, and distribute this software for any
... ...
@@ -18,13 +16,14 @@
18 16
 
19 17
 /*!
20 18
  * \file
21
- * \brief SIP-router core :: Atomic operations and memory barriers
19
+ * \brief Kamailio core :: Atomic operations and memory barriers
20
+ * Copyright (C) 2006 iptelorg GmbH
22 21
  * \ingroup core
23 22
  * Module: \ref core
24 23
  * See \ref atomic
25 24
  */
26 25
 
27
-/*
26
+/**
28 27
  * \page atomicops  Atomic operations and memory barriers
29 28
  *
30 29
  *  WARNING: atomic ops do not include memory barriers
Browse code

doxygen: small bunch of fixes for doxygen documentation

Henning Westerholt authored on 23/06/2011 22:36:18
Showing 1 changed files
... ...
@@ -21,7 +21,7 @@
21 21
  * \brief SIP-router core :: Atomic operations and memory barriers
22 22
  * \ingroup core
23 23
  * Module: \ref core
24
- * See \ref atomicops
24
+ * See \ref atomic
25 25
  */
26 26
 
27 27
 /*
Browse code

Doxygen updates

oej authored on 25/10/2009 19:11:28
Showing 1 changed files
... ...
@@ -175,7 +175,7 @@
175 175
 
176 176
 #include "atomic/atomic_native.h"
177 177
 
178
-/* if no native operations, emulate them using locks */
178
+/*! \brief if no native operations, emulate them using locks */
179 179
 #if  ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
180 180
 
181 181
 #include "atomic/atomic_unknown.h"
Browse code

- Doxygen updates on core files - Add project name to doxygen in Makefile

oej authored on 19/10/2009 20:35:43
Showing 1 changed files
... ...
@@ -15,8 +15,18 @@
15 15
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 16
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
  */
18
+
19
+/*!
20
+ * \file
21
+ * \brief SIP-router core :: Atomic operations and memory barriers
22
+ * \ingroup core
23
+ * Module: \ref core
24
+ * See \ref atomicops
25
+ */
26
+
18 27
 /*
19
- *  atomic operations and memory barriers
28
+ * \page atomicops  Atomic operations and memory barriers
29
+ *
20 30
  *  WARNING: atomic ops do not include memory barriers
21 31
  *  
22 32
  *  memory barriers:
Browse code

- added membar_depends(), needed on smp archs. with separate cache banks where it's possible to get a new pointer value, but the old pointer content (e.g. if the two are in different cache banks and the "content" bank is very busy processing a long invalidations queue). For now only Alpha SMP needs it, on all other archs is a no-op (for more info see atomic_ops.h , http://lse.sourceforge.net/locking/wmbdd.html, http://www.linuxjournal.com/article/8212 or Alpha Architecture Reference Manual Chapter 5.6.

- added membar_atomic_op(), membar_atomic_setget(), membar_read_atomic_op(),
membar_read_atomic_setget(), membar_write_atomic_op(),
membar_write_atomic_setget() -- special case memory barriers that can be
optimized if the atomic ops already force some kind of barrier (e.g. x86),
see the description in atomic_ops.h for more info.

Andrei Pelinescu-Onciul authored on 29/05/2007 11:31:29
Showing 1 changed files
... ...
@@ -22,9 +22,15 @@
22 22
  *  memory barriers:
23 23
  *  ----------------
24 24
  *
25
- *  void membar();       - memory barrier (load & store)
26
- *  void membar_read()   - load (read) memory barrier
27
- *  void membar_write()  - store (write) memory barrier
25
+ *  void membar();        - memory barrier (load & store)
26
+ *  void membar_read()    - load (read) memory barrier
27
+ *  void membar_write()   - store (write) memory barrier
28
+ *  void membar_depends() - read depends memory barrier, needed before using
29
+ *                          the contents of a pointer (for now is needed only
30
+ *                          on Alpha so on all other CPUs it will be a no-op)
31
+ *                          For more info see: 
32
+ *                          http://lse.sourceforge.net/locking/wmbdd.html
33
+ *                          http://www.linuxjournal.com/article/8212
28 34
  *
29 35
  *  void membar_enter_lock() - memory barrier function that should be 
30 36
  *                             called after a lock operation (where lock is
... ...
@@ -44,6 +50,37 @@
44 50
  *                             Example: raw_lock(l); membar_enter_lock(); ..
45 51
  *                                      ... critical section ...
46 52
  *                                      membar_leave_lock(); raw_unlock(l);
53
+ *  void membar_atomic_op() - memory barrier that should be called if a memory
54
+ *                            barrier is needed immediately after or 
55
+ *                            immediately before an atomic operation
56
+ *                            (for example: atomic_inc(&i); membar_atomic_op()
57
+ *                               instead of atomic_inc(&i); membar()).
58
+ *                            atomic_op means every atomic operation except get
59
+ *                            and set (for them use membar_atomic_setget()).
60
+ *                            Using membar_atomic_op() instead of membar() in
61
+ *                            these cases will generate faster code on some
62
+ *                            architectures (for now x86 and x86_64), where 
63
+ *                            atomic operations act also as memory barriers.
64
+ *                            Note that mb_atomic_<OP>(...) is equivalent to
65
+ *                            membar_atomic_op(); atomic_<OP>(...) and in this
66
+ *                            case the first form is preferred).
67
+ * void membar_atomic_setget() - same as above but for atomic_set and 
68
+ *                            atomic_get (and not for any other atomic op.,
69
+ *                            including atomic_get_and_set, for them use
70
+ *                            membar_atomic_op()).
71
+ *                            Note that mb_atomic_{get,set}(&i) is equivalent 
72
+ *                            and preferred to membar_atomic_setget(); 
73
+ *                            atomic_{get,set}(&i) (it will generate faster
74
+ *                            code on x86 and x86_64).
75
+ * void membar_read_atomic_op() - like membar_atomic_op(), but acts only as
76
+ *                             a read barrier.
77
+ * void membar_read_atomic_setget() - like membar_atomic_setget() but acts only
78
+ *                            as a read barrier.
79
+ * void membar_write_atomic_op() - like membar_atomic_op(), but acts only as
80
+ *                            a write barrier.
81
+ * void membar_write_atomic_setget() - like membar_atomic_setget() but acts 
82
+ *                            only as a write barrier.
83
+ *
47 84
  *
48 85
  *  Note: - properly using memory barriers is tricky, in general try not to 
49 86
  *        depend on them. Locks include memory barriers, so you don't need
... ...
@@ -67,6 +104,10 @@
67 104
  *  int atomic_dec_and_test(atomic_t* v)     - returns 1 if the result is 0
68 105
  *  void atomic_or (atomic_t* v, int mask)   - v->val|=mask 
69 106
  *  void atomic_and(atomic_t* v, int mask)   - v->val&=mask
107
+ *  int atomic_add(atomic_t* v, int i)       - v->val+=i; return v->val
108
+ *  int atomic_cmpxchg(atomic_t* v, o, n)    - r=v->val; if (r==o) v->val=n;
109
+ *                                             return r (old value)
110
+ *
70 111
  * 
71 112
  * same ops, but with builtin memory barriers:
72 113
  *
... ...
@@ -79,6 +120,9 @@
79 120
  *  int mb_atomic_dec_and_test(atomic_t* v)  - returns 1 if the result is 0
80 121
  *  void mb_atomic_or(atomic_t* v, int mask - v->val|=mask 
81 122
  *  void mb_atomic_and(atomic_t* v, int mask)- v->val&=mask
123
+ *  int mb_atomic_add(atomic_t* v, int i)    - v->val+=i; return v->val
124
+ *  int mb_atomic_cmpxchg(atomic_t* v, o, n) - r=v->val; if (r==o) v->val=n;
125
+ *                                             return r (old value)
82 126
  *
83 127
  *  Same operations are available for int and long. The functions are named
84 128
  *   after the following rules:
Browse code

- parts of atomic_ops.h moved into atomic/atomic_common.h and atomic/atomic_native.h

- added membar_eneter_lock() and membar_leave_lock() (to be used only if
creating locks using the atomic ops functions, for more info see atomic_ops.h)

Andrei Pelinescu-Onciul authored on 14/05/2007 17:29:31
Showing 1 changed files
... ...
@@ -26,9 +26,31 @@
26 26
  *  void membar_read()   - load (read) memory barrier
27 27
  *  void membar_write()  - store (write) memory barrier
28 28
  *
29
- *  Note: properly using memory barriers is tricky, in general try not to 
29
+ *  void membar_enter_lock() - memory barrier function that should be 
30
+ *                             called after a lock operation (where lock is
31
+ *                             an asm inline function that uses atomic store
32
+ *                             operation on the lock var.). It is at most
33
+ *                             a StoreStore|StoreLoad barrier, but could also
34
+ *                             be empty if an atomic op implies a memory 
35
+ *                             barrier on the specific arhitecture.
36
+ *                             Example usage: 
37
+ *                               raw_lock(l); membar_enter_lock(); ...
38
+ *  void membar_leave_lock() - memory barrier function that should be called 
39
+ *                             before an unlock operation (where unlock is an
40
+ *                             asm inline function that uses at least an atomic
41
+ *                             store to on the lock var.). It is at most a 
42
+ *                             LoadStore|StoreStore barrier (but could also be
43
+ *                             empty, see above).
44
+ *                             Example: raw_lock(l); membar_enter_lock(); ..
45
+ *                                      ... critical section ...
46
+ *                                      membar_leave_lock(); raw_unlock(l);
47
+ *
48
+ *  Note: - properly using memory barriers is tricky, in general try not to 
30 49
  *        depend on them. Locks include memory barriers, so you don't need
31 50
  *        them for writes/load already protected by locks.
51
+ *        - membar_enter_lock() and membar_leave_lock() are needed only if
52
+ *        you implement your own locks using atomic ops (ser locks have the
53
+ *        membars included)
32 54
  *
33 55
  * atomic operations:
34 56
  * ------------------
... ...
@@ -89,70 +111,17 @@
89 111
  * History:
90 112
  * --------
91 113
  *  2006-03-08  created by andrei
114
+ *  2007-05-13  moved some of the decl. and includes into atomic_common.h and 
115
+ *               atomic_native.h (andrei)
92 116
  */
93 117
 #ifndef __atomic_ops
94 118
 #define __atomic_ops
95 119
 
96
-/* atomic_t defined as a struct to easily catch non atomic ops. on it,
97
- * e.g.  atomic_t  foo; foo++  will generate a compile error */
98
-typedef struct{ volatile int val; } atomic_t; 
99
-
100
-
101
-/* store and load operations are atomic on all cpus, note however that they
102
- * don't include memory barriers so if you want to use atomic_{get,set} 
103
- * to implement mutexes you must use the mb_* versions or explicitely use
104
- * the barriers */
105
-
106
-#define atomic_set_int(pvar, i) (*(pvar)=i)
107
-#define atomic_set_long(pvar, i) (*(pvar)=i)
108
-#define atomic_get_int(pvar) (*(pvar))
109
-#define atomic_get_long(pvar) (*(pvar))
110
-
111
-#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
112
-
113
-inline static int atomic_get(atomic_t *v)
114
-{
115
-	return atomic_get_int(&(v->val));
116
-}
117
-
118
-
119
-
120
-#ifdef CC_GCC_LIKE_ASM
121
-
122
-#if defined __CPU_i386 || defined __CPU_x86_64
123
-
124
-#include "atomic/atomic_x86.h"
125
-
126
-#elif defined __CPU_mips2 || defined __CPU_mips64 || \
127
-	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
128
-
129
-#include "atomic/atomic_mips2.h"
130
-
131
-#elif defined __CPU_ppc || defined __CPU_ppc64
132
-
133
-#include "atomic/atomic_ppc.h"
134
-
135
-#elif defined __CPU_sparc64
136
-
137
-#include "atomic/atomic_sparc64.h"
138
-
139
-#elif defined __CPU_sparc
140
-
141
-#include "atomic/atomic_sparc.h"
142
-
143
-#elif defined __CPU_arm || defined __CPU_arm6
144
-
145
-#include "atomic/atomic_arm.h"
146
-
147
-#elif defined __CPU_alpha
148
-
149
-#include "atomic/atomic_alpha.h"
150
-
151
-#endif /* __CPU_xxx  => no known cpu */
152
-
153
-#endif /* CC_GCC_LIKE_ASM */
120
+#include "atomic/atomic_common.h"
154 121
 
122
+#include "atomic/atomic_native.h"
155 123
 
124
+/* if no native operations, emulate them using locks */
156 125
 #if  ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
157 126
 
158 127
 #include "atomic/atomic_unknown.h"
Browse code

- atomic_add & atomic_cmpxchg added to ppc - atomic_unkown (used when the procesor does not suport atomic ops or is not among the supported ones), tries now to use a "hash" of locks if GEN_LOCK_SET_T_UNLIMITED is defined => less contention on multi-cpus - atomic_ops.h defines *_UNLIMITED macros when the number of locks or set size are limited only by the available memory (everything exept SYSV sems) - license changes: all the atomic* stuff and the locks are now under a BSD (OpenBSD) style license

Andrei Pelinescu-Onciul authored on 11/05/2007 20:44:15
Showing 1 changed files
... ...
@@ -3,26 +3,17 @@
3 3
  * 
4 4
  * Copyright (C) 2006 iptelorg GmbH
5 5
  *
6
- * This file is part of ser, a free SIP server.
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
7 9
  *
8
- * ser is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version
12
- *
13
- * For a license to use the ser software under conditions
14
- * other than those described here, or to purchase support for this
15
- * software, please contact iptel.org by e-mail at the following addresses:
16
- *    info@iptel.org
17
- *
18
- * ser is distributed in the hope that it will be useful,
19
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
- * GNU General Public License for more details.
22
- *
23
- * You should have received a copy of the GNU General Public License
24
- * along with this program; if not, write to the Free Software
25
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 17
  */
27 18
 /*
28 19
  *  atomic operations and memory barriers
Browse code

- added atomic ops & mem. barriers support for: - arm - arm v6 (untested, but it compiles ok) - alpha (untested. but it compiles ok) - fastlock: minor fixes - Makefile.defs: support for mip64 and armv6; various minor fixes

Andrei Pelinescu-Onciul authored on 31/03/2006 21:22:40
Showing 1 changed files
... ...
@@ -86,11 +86,13 @@
86 86
  *                            safe
87 87
  *                   __CPU_i386, __CPU_x86_64, X86_OOSTORE - see 
88 88
  *                       atomic/atomic_x86.h
89
- *                   __CPU_mips, __CPU_mip2, __CPU_mip64, MIPS_HAS_LLSC - see
89
+ *                   __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see
90 90
  *                       atomic/atomic_mip2.h
91 91
  *                   __CPU_ppc, __CPU_ppc64 - see atomic/atomic_ppc.h
92 92
  *                   __CPU_sparc - see atomic/atomic_sparc.h
93 93
  *                   __CPU_sparc64, SPARC64_MODE - see atomic/atomic_sparc64.h
94
+ *                   __CPU_arm, __CPU_arm6 - see atomic/atomic_arm.h
95
+ *                   __CPU_alpha - see atomic/atomic_alpha.h
94 96
  */
95 97
 /* 
96 98
  * History:
... ...
@@ -147,6 +149,14 @@ inline static int atomic_get(atomic_t *v)
147 149
 
148 150
 #include "atomic/atomic_sparc.h"
149 151
 
152
+#elif defined __CPU_arm || defined __CPU_arm6
153
+
154
+#include "atomic/atomic_arm.h"
155
+
156
+#elif defined __CPU_alpha
157
+
158
+#include "atomic/atomic_alpha.h"
159
+
150 160
 #endif /* __CPU_xxx  => no known cpu */
151 161
 
152 162
 #endif /* CC_GCC_LIKE_ASM */
Browse code

- makefile: - compile in 64bit mode by default on sparc64 - sparc <= v8 support - -CC_GCC_LIKE_ASM is defined when the compiler supports gcc style inline asm (gcc and icc)

- atomic operations and memory barriers support for:
- x86
- x86_64
- mips (only in NOSMP mode and if it supports ll and sc)
- mips2 (mips32, isa >= 2)
- mips64
- powerpc
- powerpc64
- sparc <= v8 (only memory barriers, the atomic operations are implemented
using locks because there is no hardware support for them)
- sparc64 - both 32 (v8plus) and 64 bit mode
If there is no support for the compiler/arch. combination, it falls back to
locks.

The code is tested (only basic tests: it runs and the results are ok, but no
parallel tests) on x86, x86_64, mips2, powerpc, sparc64 (both modes).
The sparc version runs ok on sparc64 (so it's most likely ok).
powerpc64 and mips64 not tested due to no access to the corresponding
hardware, but they do compile ok.
For more details see the comments at the beginning of atomic_ops.h.

Andrei Pelinescu-Onciul authored on 30/03/2006 19:56:06
Showing 1 changed files
... ...
@@ -45,26 +45,52 @@
45 45
  *
46 46
  * not including memory barriers:
47 47
  *
48
- *  void atomic_set(atomic_t* v, long i)      -      v->val=i
49
- *  long atomic_get(atomic_t* v)              -       return v->val
48
+ *  void atomic_set(atomic_t* v, int i)      - v->val=i
49
+ *  int atomic_get(atomic_t* v)              - return v->val
50
+ *  int atomic_get_and_set(atomic_t *v, i)   - return old v->val, v->val=i
50 51
  *  void atomic_inc(atomic_t* v)
51 52
  *  void atomic_dec(atomic_t* v)
52
- *  long atomic_inc_and_test(atomic_t* v)     - returns 1 if the result is 0
53
- *  long atomic_dec_and_test(atomic_t* v)     - returns 1 if the result is 0
54
- *  void atomic_or (atomic_t* v, long mask)   - v->val|=mask 
55
- *  void atomic_and(atomic_t* v, long mask)   - v->val&=mask
53
+ *  int atomic_inc_and_test(atomic_t* v)     - returns 1 if the result is 0
54
+ *  int atomic_dec_and_test(atomic_t* v)     - returns 1 if the result is 0
55
+ *  void atomic_or (atomic_t* v, int mask)   - v->val|=mask 
56
+ *  void atomic_and(atomic_t* v, int mask)   - v->val&=mask
56 57
  * 
57 58
  * same ops, but with builtin memory barriers:
58 59
  *
59
- *  void mb_atomic_set(atomic_t* v, long i)      -      v->val=i
60
- *  long mb_atomic_get(atomic_t* v)              -       return v->val
60
+ *  void mb_atomic_set(atomic_t* v, int i)      -  v->val=i
61
+ *  int mb_atomic_get(atomic_t* v)              -  return v->val
62
+ *  int mb_atomic_get_and_set(atomic_t *v, i)   -  return old v->val, v->val=i
61 63
  *  void mb_atomic_inc(atomic_t* v)
62 64
  *  void mb_atomic_dec(atomic_t* v)
63
- *  long mb_atomic_inc_and_test(atomic_t* v)  - returns 1 if the result is 0
64
- *  long mb_atomic_dec_and_test(atomic_t* v)  - returns 1 if the result is 0
65
- *  void mb_atomic_or(atomic_t* v, long mask - v->val|=mask 
66
- *  void mb_atomic_and(atomic_t* v, long mask)- v->val&=mask
67
- * 
65
+ *  int mb_atomic_inc_and_test(atomic_t* v)  - returns 1 if the result is 0
66
+ *  int mb_atomic_dec_and_test(atomic_t* v)  - returns 1 if the result is 0
67
+ *  void mb_atomic_or(atomic_t* v, int mask - v->val|=mask 
68
+ *  void mb_atomic_and(atomic_t* v, int mask)- v->val&=mask
69
+ *
70
+ *  Same operations are available for int and long. The functions are named
71
+ *   after the following rules:
72
+ *     - add an int or long  suffix to the correspondent atomic function
73
+ *     -  volatile int* or volatile long* replace atomic_t* in the functions
74
+ *        declarations
75
+ *     -  long and int replace the parameter type (if the function has an extra
76
+ *        parameter) and the return value
77
+ *  E.g.:
78
+ *    long atomic_get_long(volatile long* v)
79
+ *    int atomic_get_int( volatile int* v)
80
+ *    long atomic_get_and_set(volatile long* v, long l)
81
+ *    int atomic_get_and_set(volatile int* v, int i)
82
+ *
83
+ * Config defines:   CC_GCC_LIKE_ASM  - the compiler support gcc style
84
+ *                     inline asm
85
+ *                   NOSMP - the code will be a little faster, but not SMP
86
+ *                            safe
87
+ *                   __CPU_i386, __CPU_x86_64, X86_OOSTORE - see 
88
+ *                       atomic/atomic_x86.h
89
+ *                   __CPU_mips, __CPU_mip2, __CPU_mip64, MIPS_HAS_LLSC - see
90
+ *                       atomic/atomic_mip2.h
91
+ *                   __CPU_ppc, __CPU_ppc64 - see atomic/atomic_ppc.h
92
+ *                   __CPU_sparc - see atomic/atomic_sparc.h
93
+ *                   __CPU_sparc64, SPARC64_MODE - see atomic/atomic_sparc64.h
68 94
  */
69 95
 /* 
70 96
  * History:
... ...
@@ -76,433 +102,59 @@
76 102
 
77 103
 /* atomic_t defined as a struct to easily catch non atomic ops. on it,
78 104
  * e.g.  atomic_t  foo; foo++  will generate a compile error */
79
-typedef struct{ volatile long val; } atomic_t; 
105
+typedef struct{ volatile int val; } atomic_t; 
80 106
 
81 107
 
82 108
 /* store and load operations are atomic on all cpus, note however that they
83 109
  * don't include memory barriers so if you want to use atomic_{get,set} 
84
- * to implement mutexes you must explicitely use the barriers */
85
-#define atomic_set(at_var, value)	((at_var)->val=(value))
86
-#define atomic_get(at_var) ((at_var)->val)
87
-
88
-/* init atomic ops */
89
-int atomic_ops_init();
90
-
91
-
92
-
93
-#if defined(__CPU_i386) || defined(__CPU_x86_64)
94
-
95
-#define HAVE_ASM_INLINE_ATOMIC_OPS
96
-
97
-#ifdef NOSMP
98
-#define __LOCK_PREF 
99
-#else
100
-#define __LOCK_PREF "lock ;"
101
-#endif
102
-
103
-
104
-/* memory barriers */
105
-
106
-#ifdef NOSMP
107
-
108
-#define membar()	asm volatile ("" : : : "memory")
109
-#define membar_read()	membar()
110
-#define membar_write()	membar()
110
+ * to implement mutexes you must use the mb_* versions or explicitely use
111
+ * the barriers */
111 112
 
112
-#else
113
+#define atomic_set_int(pvar, i) (*(pvar)=i)
114
+#define atomic_set_long(pvar, i) (*(pvar)=i)
115
+#define atomic_get_int(pvar) (*(pvar))
116
+#define atomic_get_long(pvar) (*(pvar))
113 117
 
114
-/* although most x86 do stores in order, we're playing it safe and use
115
- *  oostore ready write barriers */
116
-#define X86_OOSTORE 
118
+#define atomic_set(at_var, value)	(atomic_set_int(&((at_var)->val), (value)))
117 119
 
118
-/* membar: lfence, mfence, sfence available only on newer cpus, so for now
119
- * stick to lock addl */
120
-#define membar() \
121
-	asm volatile( \
122
-					" lock; addl $0, 0(%%esp) \n\t " \
123
-					: : : "memory" \
124
-				) 
125
-
126
-#define membar_read()	membar()
127
-
128
-#ifdef X86_OOSTORE
129
-/* out of order store version */
130
-#define membar_write()	membar()
131
-#else
132
-/* no oostore, most x86 cpus => do nothing, just a gcc do_not_cache barrier*/
133
-#define membar_write()	asm volatile ("" : : : "memory")
134
-#endif
135
-
136
-
137
-#endif /* NOSMP */
138
-
139
-
140
-#define atomic_inc(var) \
141
-	asm volatile( \
142
-			__LOCK_PREF " incl %0 \n\t"  \
143
-			: "=m"((var)->val) : "m"((var)->val) : "cc", "memory" \
144
-			) 
145
-
146
-#define atomic_dec(var) \
147
-	asm volatile( \
148
-			__LOCK_PREF " decl %0 \n\t" \
149
-			: "=m"((var)->val) : "m"((var)->val) : "cc", "memory" \
150
-			) 
151
-
152
-#define atomic_and(var, i) \
153
-	asm volatile( \
154
-			__LOCK_PREF " andl %1, %0 \n\t" \
155
-			: "=m"((var)->val) : "ri"((i)), "m"((var)->val) : "cc", "memory" \
156
-			)
157
-#define atomic_or(var, i) \
158
-	asm volatile( \
159
-			__LOCK_PREF " orl %1, %0 \n\t" \
160
-			: "=m"((var)->val) : "ri"((i)), "m"((var)->val) : "cc", "memory" \
161
-			)
162
-
163
-
164
-/* returns 1 if the result is 0 */
165
-inline static long atomic_inc_and_test(atomic_t* var)
120
+inline static int atomic_get(atomic_t *v)
166 121
 {
167
-	char ret;
168
-	
169
-	asm volatile(
170
-			__LOCK_PREF " incl %0 \n\t"
171
-			"setz  %1 \n\t"
172
-			: "=m"(var->val), "=qm"(ret) : "m" (var->val) : "cc", "memory"
173
-			);
174
-	return ret;
122
+	return atomic_get_int(&(v->val));
175 123
 }
176 124
 
177 125
 
178
-/* returns 1 if the result is 0 */
179
-inline static long atomic_dec_and_test(atomic_t* var)
180
-{
181
-	char ret;
182
-	
183
-	asm volatile(
184
-			__LOCK_PREF " decl %0 \n\t"
185
-			"setz  %1 \n\t"
186
-			: "=m"(var->val), "=qm"(ret) : "m" (var->val) : "cc", "memory"
187
-			);
188
-	return ret;
189
-}
190
-
191
-#ifdef NOSMP
192
-
193
-#define mb_atomic_set(v, i) \
194
-	do{ \
195
-		membar(); \
196
-		atomic_set(v, i); \
197
-	}while(0)
198
-
199
-
200
-inline static long mb_atomic_get(atomic_t* v)
201
-{
202
-	membar();
203
-	return atomic_get(v);
204
-}
205
-#else /* NOSMP */
206
-
207
-
208
-inline static void mb_atomic_set(atomic_t* v, long i)
209
-{
210
-	asm volatile(
211
-			"xchgl %1, %0 \n\t"
212
-			: "+q"(i), "=m"(v->val) : "m"((v)->val) : "memory" 
213
-			);
214
-}
215
-
216
-
217
-
218
-inline static long mb_atomic_get(atomic_t* var)
219
-{
220
-	long ret;
221
-	
222
-	asm volatile(
223
-			__LOCK_PREF " cmpxchg %0, %1 \n\t"
224
-			: "=a"(ret)  : "m"(var->val) : "cc", "memory"
225
-			);
226
-	return ret;
227
-}
228
-
229
-#endif /* NOSMP */
230
-
231
-
232
-/* on x86 atomic intructions act also as barriers */
233
-#define mb_atomic_inc(v)	atomic_inc(v)
234
-#define mb_atomic_dec(v)	atomic_dec(v)
235
-#define mb_atomic_or(v, m)	atomic_or(v, m)
236
-#define mb_atomic_and(v, m)	atomic_and(v, m)
237
-#define mb_atomic_inc_and_test(v)	atomic_inc_and_test(v)
238
-#define mb_atomic_dec_and_test(v)	atomic_dec_and_test(v)
239
-
240
-
241
-#elif defined __CPU_mips2 || ( defined __CPU_mips && defined MIPS_HAS_LLSC )
242
-
243
-#define HAVE_ASM_INLINE_ATOMIC_OPS
244
-
245
-#ifdef NOSMP
246
-#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
247
-#define membar_read()  membar()
248
-#define membar_write() membar()
249
-#else
250
-
251
-#define membar() \
252
-	asm volatile( \
253
-			".set push \n\t" \
254
-			".set noreorder \n\t" \
255
-			".set mips2 \n\t" \
256
-			"    sync\n\t" \
257
-			".set pop \n\t" \
258
-			: : : "memory" \
259
-			) 
260
-
261
-#define membar_read()  membar()
262
-#define membar_write() membar()
263
-
264
-#endif /* NOSMP */
265
-
266 126
 
127
+#ifdef CC_GCC_LIKE_ASM
267 128
 
268
-/* main asm block */
269
-#define ATOMIC_ASM_OP(op) \
270
-			".set push \n\t" \
271
-			".set noreorder \n\t" \
272
-			".set mips2 \n\t" \
273
-			"1:   ll %1, %0 \n\t" \
274
-			"     " op "\n\t" \
275
-			"     sc %2, %0 \n\t" \
276
-			"     beqz %2, 1b \n\t" \
277
-			"     nop \n\t" \
278
-			".set pop \n\t" 
129
+#if defined __CPU_i386 || defined __CPU_x86_64
279 130
 
131
+#include "atomic/atomic_x86.h"
280 132
 
281
-#define ATOMIC_FUNC_DECL(NAME, OP, RET_TYPE, RET_EXPR) \
282
-	inline static RET_TYPE atomic_##NAME (atomic_t *var) \
283
-	{ \
284
-		long ret, tmp; \
285
-		asm volatile( \
286
-			ATOMIC_ASM_OP(OP) \
287
-			: "=m"((var)->val), "=&r"(ret), "=&r"(tmp)  \
288
-			: "m"((var)->val) \
289
-			 \
290
-			); \
291
-		return RET_EXPR; \
292
-	}
133
+#elif defined __CPU_mips2 || defined __CPU_mips64 || \
134
+	  ( defined __CPU_mips && defined MIPS_HAS_LLSC )
293 135
 
136
+#include "atomic/atomic_mips2.h"
294 137
 
295
-/* same as above, but with CT in %3 */
296
-#define ATOMIC_FUNC_DECL_CT(NAME, OP, CT, RET_TYPE, RET_EXPR) \
297
-	inline static RET_TYPE atomic_##NAME (atomic_t *var) \
298
-	{ \
299
-		long ret, tmp; \
300
-		asm volatile( \
301
-			ATOMIC_ASM_OP(OP) \
302
-			: "=m"((var)->val), "=&r"(ret), "=&r"(tmp)  \
303
-			: "r"((CT)), "m"((var)->val) \
304
-			 \
305
-			); \
306
-		return RET_EXPR; \
307
-	}
138
+#elif defined __CPU_ppc || defined __CPU_ppc64
308 139
 
140
+#include "atomic/atomic_ppc.h"
309 141
 
310
-/* takes an extra param, i which goes in %3 */
311
-#define ATOMIC_FUNC_DECL1(NAME, OP, RET_TYPE, RET_EXPR) \
312
-	inline static RET_TYPE atomic_##NAME (atomic_t *var, long i) \
313
-	{ \
314
-		long ret, tmp; \
315
-		asm volatile( \
316
-			ATOMIC_ASM_OP(OP) \
317
-			: "=m"((var)->val), "=&r"(ret), "=&r"(tmp)  \
318
-			: "r"((i)), "m"((var)->val) \
319
-			 \
320
-			); \
321
-		return RET_EXPR; \
322
-	}
142
+#elif defined __CPU_sparc64
323 143
 
144
+#include "atomic/atomic_sparc64.h"
324 145
 
325
-ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", void, /* no return */ )
326
-ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", long, (ret+1)==0 )
327
-
328
-ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  void, /* no return */ )
329
-ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1, long, (ret-1)==0 )
330
-
331
-ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", void, /* no return */ )
332
-ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", void,  /* no return */ )
333
-
334
-
335
-/* with integrated membar */
336
-
337
-#define mb_atomic_set(v, i) \
338
-	do{ \
339
-		membar(); \
340
-		atomic_set(v, i); \
341
-	}while(0)
342
-
343
-
344
-
345
-inline static long mb_atomic_get(atomic_t* v)
346
-{
347
-	membar();
348
-	return atomic_get(v);
349
-}
350
-
351
-
352
-#define mb_atomic_inc(v) \
353
-	do{ \
354
-		membar(); \
355
-		atomic_inc(v); \
356
-	}while(0)
357
-
358
-#define mb_atomic_dec(v) \
359
-	do{ \
360
-		membar(); \
361
-		atomic_dec(v); \
362
-	}while(0)
363
-
364
-#define mb_atomic_or(v, m) \
365
-	do{ \
366
-		membar(); \
367
-		atomic_or(v, m); \
368
-	}while(0)
369
-
370
-#define mb_atomic_and(v, m) \
371
-	do{ \
372
-		membar(); \
373
-		atomic_and(v, m); \
374
-	}while(0)
375
-
376
-inline static int mb_atomic_inc_and_test(atomic_t* v)
377
-{
378
-	membar();