Browse code

core, lib, modules: restructured source code tree

- new folder src/ to hold the source code for main project applications
- main.c is in src/
- all core files are subfolder are in src/core/
- modules are in src/modules/
- libs are in src/lib/
- application Makefiles are in src/
- application binary is built in src/ (src/kamailio)

Daniel-Constantin Mierla authored on 07/12/2016 11:03:51
Showing 1 changed files
1 1
deleted file mode 100644
... ...
@@ -1,393 +0,0 @@
1
-/* 
2
- * Copyright (C) 2006 iptelorg GmbH
3
- *
4
- * Permission to use, copy, modify, and distribute this software for any
5
- * purpose with or without fee is hereby granted, provided that the above
6
- * copyright notice and this permission notice appear in all copies.
7
- *
8
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15
- */
16
-
17
-/**
18
- * @file
19
- * @brief Atomic ops and memory barriers for ARM (>= v3)
20
- * 
21
- * Atomic ops and memory barriers for ARM architecture (starting from version 3)
22
- * see atomic_ops.h for more info.
23
- * 
24
- * Config defines:
25
- * - NOSMP
26
- * - __CPU_arm
27
- * - __CPU_arm6    - armv6 support (supports atomic ops via ldrex/strex)
28
- * @ingroup atomic
29
- */
30
-
31
-#ifndef _atomic_arm_h
32
-#define _atomic_arm_h
33
-
34
-
35
-
36
-
37
-#ifdef NOSMP
38
-#define HAVE_ASM_INLINE_MEMBAR
39
-#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
40
-#define membar_read()  membar()
41
-#define membar_write() membar()
42
-#define membar_depends()   do {} while(0) /* really empty, not even a cc bar.*/
43
-/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
44
- * contain gcc barriers*/
45
-#define membar_enter_lock() do {} while(0)
46
-#define membar_leave_lock() do {} while(0)
47
-/* membars after or before atomic_ops or atomic_setget -> use these or
48
- *  mb_<atomic_op_name>() if you need a memory barrier in one of these
49
- *  situations (on some archs where the atomic operations imply memory
50
- *   barriers is better to use atomic_op_x(); membar_atomic_op() then
51
- *    atomic_op_x(); membar()) */
52
-#define membar_atomic_op()				membar()
53
-#define membar_atomic_setget()			membar()
54
-#define membar_write_atomic_op()		membar_write()
55
-#define membar_write_atomic_setget()	membar_write()
56
-#define membar_read_atomic_op()			membar_read()
57
-#define membar_read_atomic_setget()		membar_read()
58
-#else /* SMP */
59
-#warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
60
-/* fall back to default lock based barriers (don't define HAVE_ASM...) */
61
-#endif /* NOSMP */
62
-
63
-
64
-#ifdef __CPU_arm6
65
-
66
-
67
-#define HAVE_ASM_INLINE_ATOMIC_OPS
68
-
69
-/* hack to get some membars */
70
-#ifndef NOSMP
71
-#include "atomic_unknown.h"
72
-#endif
73
-
74
-/* main asm block 
75
- *  use %0 as input and write the output in %1*/
76
-#define ATOMIC_ASM_OP(op) \
77
-			"1:   ldrex %0, [%3] \n\t" \
78
-			"     " op "\n\t" \
79
-			"     strex %0, %1, [%3] \n\t" \
80
-			"     cmp %0, #0 \n\t" \
81
-			"     bne 1b \n\t"
82
-
83
-/* same as above but writes %4 instead of %1, and %0 will contain 
84
- * the prev. val*/
85
-#define ATOMIC_ASM_OP2(op) \
86
-			"1:   ldrex %0, [%3] \n\t" \
87
-			"     " op "\n\t" \
88
-			"     strex %1, %4, [%3] \n\t" \
89
-			"     cmp %1, #0 \n\t" \
90
-			"     bne 1b \n\t"
91
-
92
-/* no extra param, %0 contains *var, %1 should contain the result */
93
-#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
94
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
95
-	{ \
96
-		P_TYPE ret, tmp; \
97
-		asm volatile( \
98
-			ATOMIC_ASM_OP(OP) \
99
-			: "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var)  : "cc" \
100
-			); \
101
-		return RET_EXPR; \
102
-	}
103
-
104
-/* one extra param in %4 */
105
-#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
106
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
107
-														P_TYPE v) \
108
-	{ \
109
-		P_TYPE ret, tmp; \
110
-		asm volatile( \
111
-			ATOMIC_ASM_OP(OP) \
112
-			: "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var), "r"(v) : "cc" \
113
-			); \
114
-		return RET_EXPR; \
115
-	}
116
-
117
-
118
-/* as above, but %4 should contain the result, and %0 is returned*/
119
-#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
120
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
121
-														P_TYPE v) \
122
-	{ \
123
-		P_TYPE ret, tmp; \
124
-		asm volatile( \
125
-			ATOMIC_ASM_OP2(OP) \
126
-			: "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var), "r"(v) : "cc" \
127
-			); \
128
-		return RET_EXPR; \
129
-	}
130
-
131
-
132
-#define ATOMIC_XCHG_DECL(NAME, P_TYPE) \
133
-	inline static P_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
134
-														P_TYPE v ) \
135
-	{ \
136
-		P_TYPE ret; \
137
-		asm volatile( \
138
-			"     swp %0, %2, [%3] \n\t" \
139
-			: "=&r"(ret),  "=m"(*var) :\
140
-				"r"(v), "r"(var) \
141
-			); \
142
-		return ret; \
143
-	}
144
-
145
-
146
-/* cmpxchg: %5=old, %4=new_v, %3=var
147
- * if (*var==old) *var=new_v
148
- * returns the original *var (can be used to check if it succeeded: 
149
- *  if old==cmpxchg(var, old, new_v) -> success
150
- */
151
-#define ATOMIC_CMPXCHG_DECL(NAME, P_TYPE) \
152
-	inline static P_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
153
-														P_TYPE old, \
154
-														P_TYPE new_v) \
155
-	{ \
156
-		P_TYPE ret, tmp; \
157
-		asm volatile( \
158
-			"1:   ldrex %0, [%3] \n\t" \
159
-			"     cmp %0, %5 \n\t" \
160
-			"     strexeq %1, %4, [%3] \n\t" \
161
-			"     cmp %1, #0 \n\t" \
162
-			"     bne 1b \n\t" \
163
-			/* strexeq is exec. only if cmp was successful \
164
-			 * => if not successful %1 is not changed and remains 0 */ \
165
-			: "=&r"(ret), "=&r"(tmp), "=m"(*var) :\
166
-				"r"(var), "r"(new_v), "r"(old), "1"(0) : "cc" \
167
-			); \
168
-		return ret; \
169
-	}
170
-
171
-
172
-
173
-ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", int, void, /* no return */ )
174
-ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", int, void, /* no return */ )
175
-ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", int, void, /* no return */ )
176
-ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", int, void, /* no return */ )
177
-ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", int, int, ret==0 )
178
-ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", int, int, ret==0 )
179
-//ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , int, int,  ret)
180
-ATOMIC_XCHG_DECL(get_and_set, int)
181
-ATOMIC_CMPXCHG_DECL(cmpxchg, int)
182
-ATOMIC_FUNC_DECL1(add,     "add  %1, %0, %4", int, int, ret )
183
-
184
-ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", long, void, /* no return */ )
185
-ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", long, void, /* no return */ )
186
-ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", long, void, /* no return */ )
187
-ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", long, void, /* no return */ )
188
-ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", long, long, ret==0 )
189
-ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", long, long, ret==0 )
190
-//ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long,  ret)
191
-ATOMIC_XCHG_DECL(get_and_set, long)
192
-ATOMIC_CMPXCHG_DECL(cmpxchg, long)
193
-ATOMIC_FUNC_DECL1(add,     "add  %1, %0, %4", long, long, ret )
194
-
195
-#define atomic_inc(var) atomic_inc_int(&(var)->val)
196
-#define atomic_dec(var) atomic_dec_int(&(var)->val)
197
-#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
198
-#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
199
-#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
200
-#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
201
-#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
202
-#define atomic_cmpxchg(var, old, new_v) \
203
-	atomic_cmpxchg_int(&(var)->val, old, new_v)
204
-#define atomic_add(var, v) atomic_add_int(&(var)->val, (v))
205
-
206
-
207
-/* with integrated membar */
208
-
209
-#define mb_atomic_set_int(v, i) \
210
-	do{ \
211
-		membar(); \
212
-		atomic_set_int(v, i); \
213
-	}while(0)
214
-
215
-
216
-
217
-inline static int mb_atomic_get_int(volatile int* v)
218
-{
219
-	membar();
220
-	return atomic_get_int(v);
221
-}
222
-
223
-
224
-#define mb_atomic_inc_int(v) \
225
-	do{ \
226
-		membar(); \
227
-		atomic_inc_int(v); \
228
-	}while(0)
229
-
230
-#define mb_atomic_dec_int(v) \
231
-	do{ \
232
-		membar(); \
233
-		atomic_dec_int(v); \
234
-	}while(0)
235
-
236
-#define mb_atomic_or_int(v, m) \
237
-	do{ \
238
-		membar(); \
239
-		atomic_or_int(v, m); \
240
-	}while(0)
241
-
242
-#define mb_atomic_and_int(v, m) \
243
-	do{ \
244
-		membar(); \
245
-		atomic_and_int(v, m); \
246
-	}while(0)
247
-
248
-inline static int mb_atomic_inc_and_test_int(volatile int* v)
249
-{
250
-	membar();
251
-	return atomic_inc_and_test_int(v);
252
-}
253
-
254
-inline static int mb_atomic_dec_and_test_int(volatile int* v)
255
-{
256
-	membar();
257
-	return atomic_dec_and_test_int(v);
258
-}
259
-
260
-
261
-inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
262
-{
263
-	membar();
264
-	return atomic_get_and_set_int(v, i);
265
-}
266
-
267
-inline static int mb_atomic_cmpxchg_int(volatile int* v, int o, int n)
268
-{
269
-	membar();
270
-	return atomic_cmpxchg_int(v, o, n);
271
-}
272
-
273
-inline static int mb_atomic_add_int(volatile int* v, int i)
274
-{
275
-	membar();
276
-	return atomic_add_int(v, i);
277
-}
278
-
279
-
280
-
281
-#define mb_atomic_set_long(v, i) \
282
-	do{ \
283
-		membar(); \
284
-		atomic_set_long(v, i); \
285
-	}while(0)
286
-
287
-
288
-
289
-inline static long mb_atomic_get_long(volatile long* v)
290
-{
291
-	membar();
292
-	return atomic_get_long(v);
293
-}
294
-
295
-
296
-#define mb_atomic_inc_long(v) \
297
-	do{ \
298
-		membar(); \
299
-		atomic_inc_long(v); \
300
-	}while(0)
301
-
302
-
303
-#define mb_atomic_dec_long(v) \
304
-	do{ \
305
-		membar(); \
306
-		atomic_dec_long(v); \
307
-	}while(0)
308
-
309
-#define mb_atomic_or_long(v, m) \
310
-	do{ \
311
-		membar(); \
312
-		atomic_or_long(v, m); \
313
-	}while(0)
314
-
315
-#define mb_atomic_and_long(v, m) \
316
-	do{ \
317
-		membar(); \
318
-		atomic_and_long(v, m); \
319
-	}while(0)
320
-
321
-inline static long mb_atomic_inc_and_test_long(volatile long* v)
322
-{
323
-	membar();
324
-	return atomic_inc_and_test_long(v);
325
-}
326
-
327
-inline static long mb_atomic_dec_and_test_long(volatile long* v)
328
-{
329
-	membar();
330
-	return atomic_dec_and_test_long(v);
331
-}
332
-
333
-
334
-inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
335
-{
336
-	membar();
337
-	return atomic_get_and_set_long(v, l);
338
-}
339
-
340
-inline static long mb_atomic_cmpxchg_long(volatile long* v, long o, long n)
341
-{
342
-	membar();
343
-	return atomic_cmpxchg_long(v, o, n);
344
-}
345
-
346
-inline static long mb_atomic_add_long(volatile long* v, long i)
347
-{
348
-	membar();
349
-	return atomic_add_long(v, i);
350
-}
351
-
352
-#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
353
-#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
354
-#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
355
-#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
356
-#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
357
-#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
358
-#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
359
-#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
360
-#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
361
-#define mb_atomic_cmpxchg(var, o, n) mb_atomic_cmpxchg_int(&(var)->val, o, n)
362
-#define mb_atomic_add(var, i) mb_atomic_add_int(&(var)->val, i)
363
-
364
-
365
-
366
-#else /* ! __CPU_arm6 => __CPU_arm */
367
-
368
-/* no atomic ops for v <6 , only SWP supported
369
- * Atomic ops could be implemented if one bit is sacrificed and used like
370
- *  a spinlock, e.g:
371
- *          mov %r0, #0x1
372
- *       1: swp %r1, %r0, [&atomic_val]
373
- *          if (%r1 & 0x1) goto 1 # wait if first bit is 1 
374
- *          %r1>>=1  # restore the value (only 31 bits can be used )
375
- *          %r1=op (%r1, ...) 
376
- *          %r1<<=1   # shift back the value, such that the first bit is 0
377
- *          str %r1, [&atomic_val]  # write the value
378
- *
379
- * However only 31 bits could be used (=> atomic_*_int and atomic_*_long
380
- *  would still have to be lock based, since in these cases we guarantee all 
381
- *  the bits)  and I'm not sure there would be a significant performance
382
- *  benefit when compared with the fallback lock based version:
383
- *    lock(atomic_lock);
384
- *    atomic_val=op(*atomic_val, ...)
385
- *    unlock(atomic_lock);
386
- *
387
- *  -- andrei
388
- */
389
-
390
-#endif /* __CPU_arm6 */
391
-
392
-
393
-#endif
Browse code

atomic Remove history from source code

Olle E. Johansson authored on 03/04/2016 18:09:15
Showing 1 changed files
... ...
@@ -28,16 +28,6 @@
28 28
  * @ingroup atomic
29 29
  */
30 30
 
31
-/* 
32
- * History:
33
- * --------
34
- *  2006-03-31  created by andrei
35
- *  2007-05-10  added atomic_add and atomic_cmpxchg (andrei)
36
- *  2007-05-29  added membar_depends(), membar_*_atomic_op and
37
- *                membar_*_atomic_setget (andrei)
38
- */
39
-
40
-
41 31
 #ifndef _atomic_arm_h
42 32
 #define _atomic_arm_h
43 33
 
Browse code

doxygen: convert existing docs to use proper doxygen structures and groups, small cleanups

Henning Westerholt authored on 23/06/2011 21:39:01
Showing 1 changed files
... ...
@@ -1,6 +1,4 @@
1 1
 /* 
2
- * $Id$
3
- * 
4 2
  * Copyright (C) 2006 iptelorg GmbH
5 3
  *
6 4
  * Permission to use, copy, modify, and distribute this software for any
... ...
@@ -16,15 +14,20 @@
16 14
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 15
  */
18 16
 
19
-/** @file	@brief
20
- *  atomic ops and memory barriers for arm (>= v3)
21
- *  see atomic_ops.h for more details 
22
- *
23
- * Config defines: - NOSMP
24
- *                 - __CPU_arm
25
- *                 - __CPU_arm6    - armv6 support (supports atomic ops
26
- *                                    via ldrex/strex)
27
- */ 
17
+/**
18
+ * @file
19
+ * @brief Atomic ops and memory barriers for ARM (>= v3)
20
+ * 
21
+ * Atomic ops and memory barriers for ARM architecture (starting from version 3)
22
+ * see atomic_ops.h for more info.
23
+ * 
24
+ * Config defines:
25
+ * - NOSMP
26
+ * - __CPU_arm
27
+ * - __CPU_arm6    - armv6 support (supports atomic ops via ldrex/strex)
28
+ * @ingroup atomic
29
+ */
30
+
28 31
 /* 
29 32
  * History:
30 33
  * --------
Browse code

Doxygen updates

oej authored on 25/10/2009 19:11:28
Showing 1 changed files
... ...
@@ -15,7 +15,8 @@
15 15
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 16
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
  */
18
-/*
18
+
19
+/** @file	@brief
19 20
  *  atomic ops and memory barriers for arm (>= v3)
20 21
  *  see atomic_ops.h for more details 
21 22
  *
Browse code

- armv6 arch fixes: - fixed return values for *atomic_inc_and_test* and *atomic_dec_and_test* - fixed *atomic_cmpxchg*: init %1 to 0 (or else if cmp fails and strexeq is not executed a "garbage" non zero value in %1 would cause an infinite loop) - removed "not tested" warning

Andrei Pelinescu-Onciul authored on 06/05/2008 16:35:35
Showing 1 changed files
... ...
@@ -39,7 +39,6 @@
39 39
 
40 40
 
41 41
 
42
-#warning "arm atomic operations support not tested"
43 42
 
44 43
 #ifdef NOSMP
45 44
 #define HAVE_ASM_INLINE_MEMBAR
... ...
@@ -167,8 +166,10 @@
167 166
 			"     strexeq %1, %4, [%3] \n\t" \
168 167
 			"     cmp %1, #0 \n\t" \
169 168
 			"     bne 1b \n\t" \
169
+			/* strexeq is exec. only if cmp was successful \
170
+			 * => if not successful %1 is not changed and remains 0 */ \
170 171
 			: "=&r"(ret), "=&r"(tmp), "=m"(*var) :\
171
-				"r"(var), "r"(new_v), "r"(old) : "cc" \
172
+				"r"(var), "r"(new_v), "r"(old), "1"(0) : "cc" \
172 173
 			); \
173 174
 		return ret; \
174 175
 	}
... ...
@@ -179,8 +180,8 @@ ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", int, void, /* no return */ )
179 180
 ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", int, void, /* no return */ )
180 181
 ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", int, void, /* no return */ )
181 182
 ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", int, void, /* no return */ )
182
-ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", int, int, ret )
183
-ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", int, int, ret )
183
+ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", int, int, ret==0 )
184
+ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", int, int, ret==0 )
184 185
 //ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , int, int,  ret)
185 186
 ATOMIC_XCHG_DECL(get_and_set, int)
186 187
 ATOMIC_CMPXCHG_DECL(cmpxchg, int)
... ...
@@ -190,8 +191,8 @@ ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", long, void, /* no return */ )
190 191
 ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", long, void, /* no return */ )
191 192
 ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", long, void, /* no return */ )
192 193
 ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", long, void, /* no return */ )
193
-ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", long, long, ret )
194
-ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", long, long, ret )
194
+ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", long, long, ret==0 )
195
+ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", long, long, ret==0 )
195 196
 //ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long,  ret)
196 197
 ATOMIC_XCHG_DECL(get_and_set, long)
197 198
 ATOMIC_CMPXCHG_DECL(cmpxchg, long)
Browse code

- added membar_depends(), needed on smp archs. with separate cache banks where it's possible to get a new pointer value, but the old pointer content (e.g. if the two are in different cache banks and the "content" bank is very busy processing a long invalidations queue). For now only Alpha SMP needs it, on all other archs is a no-op (for more info see atomic_ops.h , http://lse.sourceforge.net/locking/wmbdd.html, http://www.linuxjournal.com/article/8212 or Alpha Architecture Reference Manual Chapter 5.6.

- added membar_atomic_op(), membar_atomic_setget(), membar_read_atomic_op(),
membar_read_atomic_setget(), membar_write_atomic_op(),
membar_write_atomic_setget() -- special case memory barriers that can be
optimized if the atomic ops already force some kind of barrier (e.g. x86),
see the description in atomic_ops.h for more info.

Andrei Pelinescu-Onciul authored on 29/05/2007 11:31:29
Showing 1 changed files
... ...
@@ -29,6 +29,8 @@
29 29
  * --------
30 30
  *  2006-03-31  created by andrei
31 31
  *  2007-05-10  added atomic_add and atomic_cmpxchg (andrei)
32
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
33
+ *                membar_*_atomic_setget (andrei)
32 34
  */
33 35
 
34 36
 
... ...
@@ -44,10 +46,22 @@
44 46
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
45 47
 #define membar_read()  membar()
46 48
 #define membar_write() membar()
49
+#define membar_depends()   do {} while(0) /* really empty, not even a cc bar.*/
47 50
 /* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
48 51
  * contain gcc barriers*/
49
-#define membar_enter_lock() 
50
-#define membar_leave_lock()
52
+#define membar_enter_lock() do {} while(0)
53
+#define membar_leave_lock() do {} while(0)
54
+/* membars after or before atomic_ops or atomic_setget -> use these or
55
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
56
+ *  situations (on some archs where the atomic operations imply memory
57
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
58
+ *    atomic_op_x(); membar()) */
59
+#define membar_atomic_op()				membar()
60
+#define membar_atomic_setget()			membar()
61
+#define membar_write_atomic_op()		membar_write()
62
+#define membar_write_atomic_setget()	membar_write()
63
+#define membar_read_atomic_op()			membar_read()
64
+#define membar_read_atomic_setget()		membar_read()
51 65
 #else /* SMP */
52 66
 #warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
53 67
 /* fall back to default lock based barriers (don't define HAVE_ASM...) */
Browse code

- parts of atomic_ops.h moved into atomic/atomic_common.h and atomic/atomic_native.h

- added membar_eneter_lock() and membar_leave_lock() (to be used only if
creating locks using the atomic ops functions, for more info see atomic_ops.h)

Andrei Pelinescu-Onciul authored on 14/05/2007 17:29:31
Showing 1 changed files
... ...
@@ -44,6 +44,10 @@
44 44
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
45 45
 #define membar_read()  membar()
46 46
 #define membar_write() membar()
47
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
48
+ * contain gcc barriers*/
49
+#define membar_enter_lock() 
50
+#define membar_leave_lock()
47 51
 #else /* SMP */
48 52
 #warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
49 53
 /* fall back to default lock based barriers (don't define HAVE_ASM...) */
Browse code

- atomic_add & atomic_cmpxchg added to ppc - atomic_unkown (used when the procesor does not suport atomic ops or is not among the supported ones), tries now to use a "hash" of locks if GEN_LOCK_SET_T_UNLIMITED is defined => less contention on multi-cpus - atomic_ops.h defines *_UNLIMITED macros when the number of locks or set size are limited only by the available memory (everything exept SYSV sems) - license changes: all the atomic* stuff and the locks are now under a BSD (OpenBSD) style license

Andrei Pelinescu-Onciul authored on 11/05/2007 20:44:15
Showing 1 changed files
... ...
@@ -3,26 +3,17 @@
3 3
  * 
4 4
  * Copyright (C) 2006 iptelorg GmbH
5 5
  *
6
- * This file is part of ser, a free SIP server.
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
7 9
  *
8
- * ser is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version
12
- *
13
- * For a license to use the ser software under conditions
14
- * other than those described here, or to purchase support for this
15
- * software, please contact iptel.org by e-mail at the following addresses:
16
- *    info@iptel.org
17
- *
18
- * ser is distributed in the hope that it will be useful,
19
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
- * GNU General Public License for more details.
22
- *
23
- * You should have received a copy of the GNU General Public License
24
- * along with this program; if not, write to the Free Software
25
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 17
  */
27 18
 /*
28 19
  *  atomic ops and memory barriers for arm (>= v3)
... ...
@@ -260,6 +251,18 @@ inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
260 251
 	return atomic_get_and_set_int(v, i);
261 252
 }
262 253
 
254
+inline static int mb_atomic_cmpxchg_int(volatile int* v, int o, int n)
255
+{
256
+	membar();
257
+	return atomic_cmpxchg_int(v, o, n);
258
+}
259
+
260
+inline static int mb_atomic_add_int(volatile int* v, int i)
261
+{
262
+	membar();
263
+	return atomic_add_int(v, i);
264
+}
265
+
263 266
 
264 267
 
265 268
 #define mb_atomic_set_long(v, i) \
... ...
@@ -321,6 +324,17 @@ inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
321 324
 	return atomic_get_and_set_long(v, l);
322 325
 }
323 326
 
327
+inline static long mb_atomic_cmpxchg_long(volatile long* v, long o, long n)
328
+{
329
+	membar();
330
+	return atomic_cmpxchg_long(v, o, n);
331
+}
332
+
333
+inline static long mb_atomic_add_long(volatile long* v, long i)
334
+{
335
+	membar();
336
+	return atomic_add_long(v, i);
337
+}
324 338
 
325 339
 #define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
326 340
 #define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
... ...
@@ -331,6 +345,9 @@ inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
331 345
 #define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
332 346
 #define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
333 347
 #define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
348
+#define mb_atomic_cmpxchg(var, o, n) mb_atomic_cmpxchg_int(&(var)->val, o, n)
349
+#define mb_atomic_add(var, i) mb_atomic_add_int(&(var)->val, i)
350
+
334 351
 
335 352
 
336 353
 #else /* ! __CPU_arm6 => __CPU_arm */
Browse code

- alpha, armv6 and mip isa2+ atomic_add and atomic_cmpxchg (armv6 & alpha not tested at all due to lacking hardware or emulators)

Andrei Pelinescu-Onciul authored on 10/05/2007 18:27:07
Showing 1 changed files
... ...
@@ -37,6 +37,7 @@
37 37
  * History:
38 38
  * --------
39 39
  *  2006-03-31  created by andrei
40
+ *  2007-05-10  added atomic_add and atomic_cmpxchg (andrei)
40 41
  */
41 42
 
42 43
 
... ...
@@ -126,6 +127,44 @@
126 127
 	}
127 128
 
128 129
 
130
+#define ATOMIC_XCHG_DECL(NAME, P_TYPE) \
131
+	inline static P_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
132
+														P_TYPE v ) \
133
+	{ \
134
+		P_TYPE ret; \
135
+		asm volatile( \
136
+			"     swp %0, %2, [%3] \n\t" \
137
+			: "=&r"(ret),  "=m"(*var) :\
138
+				"r"(v), "r"(var) \
139
+			); \
140
+		return ret; \
141
+	}
142
+
143
+
144
+/* cmpxchg: %5=old, %4=new_v, %3=var
145
+ * if (*var==old) *var=new_v
146
+ * returns the original *var (can be used to check if it succeeded: 
147
+ *  if old==cmpxchg(var, old, new_v) -> success
148
+ */
149
+#define ATOMIC_CMPXCHG_DECL(NAME, P_TYPE) \
150
+	inline static P_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
151
+														P_TYPE old, \
152
+														P_TYPE new_v) \
153
+	{ \
154
+		P_TYPE ret, tmp; \
155
+		asm volatile( \
156
+			"1:   ldrex %0, [%3] \n\t" \
157
+			"     cmp %0, %5 \n\t" \
158
+			"     strexeq %1, %4, [%3] \n\t" \
159
+			"     cmp %1, #0 \n\t" \
160
+			"     bne 1b \n\t" \
161
+			: "=&r"(ret), "=&r"(tmp), "=m"(*var) :\
162
+				"r"(var), "r"(new_v), "r"(old) : "cc" \
163
+			); \
164
+		return ret; \
165
+	}
166
+
167
+
129 168
 
130 169
 ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", int, void, /* no return */ )
131 170
 ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", int, void, /* no return */ )
... ...
@@ -133,7 +172,10 @@ ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", int, void, /* no return */ )
133 172
 ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", int, void, /* no return */ )
134 173
 ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", int, int, ret )
135 174
 ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", int, int, ret )
136
-ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , int, int,  ret)
175
+//ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , int, int,  ret)
176
+ATOMIC_XCHG_DECL(get_and_set, int)
177
+ATOMIC_CMPXCHG_DECL(cmpxchg, int)
178
+ATOMIC_FUNC_DECL1(add,     "add  %1, %0, %4", int, int, ret )
137 179
 
138 180
 ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", long, void, /* no return */ )
139 181
 ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", long, void, /* no return */ )
... ...
@@ -141,7 +183,10 @@ ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", long, void, /* no return */ )
141 183
 ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", long, void, /* no return */ )
142 184
 ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", long, long, ret )
143 185
 ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", long, long, ret )
144
-ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long,  ret)
186
+//ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long,  ret)
187
+ATOMIC_XCHG_DECL(get_and_set, long)
188
+ATOMIC_CMPXCHG_DECL(cmpxchg, long)
189
+ATOMIC_FUNC_DECL1(add,     "add  %1, %0, %4", long, long, ret )
145 190
 
146 191
 #define atomic_inc(var) atomic_inc_int(&(var)->val)
147 192
 #define atomic_dec(var) atomic_dec_int(&(var)->val)
... ...
@@ -150,6 +195,9 @@ ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long,  ret)
150 195
 #define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
151 196
 #define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
152 197
 #define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
198
+#define atomic_cmpxchg(var, old, new_v) \
199
+	atomic_cmpxchg_int(&(var)->val, old, new_v)
200
+#define atomic_add(var, v) atomic_add_int(&(var)->val, (v))
153 201
 
154 202
 
155 203
 /* with integrated membar */
Browse code

- added atomic ops & mem. barriers support for: - arm - arm v6 (untested, but it compiles ok) - alpha (untested. but it compiles ok) - fastlock: minor fixes - Makefile.defs: support for mip64 and armv6; various minor fixes

Andrei Pelinescu-Onciul authored on 31/03/2006 21:22:40
Showing 1 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,315 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  atomic ops and memory barriers for arm (>= v3)
29
+ *  see atomic_ops.h for more details 
30
+ *
31
+ * Config defines: - NOSMP
32
+ *                 - __CPU_arm
33
+ *                 - __CPU_arm6    - armv6 support (supports atomic ops
34
+ *                                    via ldrex/strex)
35
+ */ 
36
+/* 
37
+ * History:
38
+ * --------
39
+ *  2006-03-31  created by andrei
40
+ */
41
+
42
+
43
+#ifndef _atomic_arm_h
44
+#define _atomic_arm_h
45
+
46
+
47
+
48
+#warning "arm atomic operations support not tested"
49
+
50
+#ifdef NOSMP
51
+#define HAVE_ASM_INLINE_MEMBAR
52
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
53
+#define membar_read()  membar()
54
+#define membar_write() membar()
55
+#else /* SMP */
56
+#warning SMP not supported for arm atomic ops, try compiling with -DNOSMP
57
+/* fall back to default lock based barriers (don't define HAVE_ASM...) */
58
+#endif /* NOSMP */
59
+
60
+
61
+#ifdef __CPU_arm6
62
+
63
+
64
+#define HAVE_ASM_INLINE_ATOMIC_OPS
65
+
66
+/* hack to get some membars */
67
+#ifndef NOSMP
68
+#include "atomic_unknown.h"
69
+#endif
70
+
71
+/* main asm block 
72
+ *  use %0 as input and write the output in %1*/
73
+#define ATOMIC_ASM_OP(op) \
74
+			"1:   ldrex %0, [%3] \n\t" \
75
+			"     " op "\n\t" \
76
+			"     strex %0, %1, [%3] \n\t" \
77
+			"     cmp %0, #0 \n\t" \
78
+			"     bne 1b \n\t"
79
+
80
+/* same as above but writes %4 instead of %1, and %0 will contain 
81
+ * the prev. val*/
82
+#define ATOMIC_ASM_OP2(op) \
83
+			"1:   ldrex %0, [%3] \n\t" \
84
+			"     " op "\n\t" \
85
+			"     strex %1, %4, [%3] \n\t" \
86
+			"     cmp %1, #0 \n\t" \
87
+			"     bne 1b \n\t"
88
+
89
+/* no extra param, %0 contains *var, %1 should contain the result */
90
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
91
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
92
+	{ \
93
+		P_TYPE ret, tmp; \
94
+		asm volatile( \
95
+			ATOMIC_ASM_OP(OP) \
96
+			: "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var)  : "cc" \
97
+			); \
98
+		return RET_EXPR; \
99
+	}
100
+
101
+/* one extra param in %4 */
102
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
103
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
104
+														P_TYPE v) \
105
+	{ \
106
+		P_TYPE ret, tmp; \
107
+		asm volatile( \
108
+			ATOMIC_ASM_OP(OP) \
109
+			: "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var), "r"(v) : "cc" \
110
+			); \
111
+		return RET_EXPR; \
112
+	}
113
+
114
+
115
+/* as above, but %4 should contain the result, and %0 is returned*/
116
+#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
117
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
118
+														P_TYPE v) \
119
+	{ \
120
+		P_TYPE ret, tmp; \
121
+		asm volatile( \
122
+			ATOMIC_ASM_OP2(OP) \
123
+			: "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var), "r"(v) : "cc" \
124
+			); \
125
+		return RET_EXPR; \
126
+	}
127
+
128
+
129
+
130
+ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", int, void, /* no return */ )
131
+ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", int, void, /* no return */ )
132
+ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", int, void, /* no return */ )
133
+ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", int, void, /* no return */ )
134
+ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", int, int, ret )
135
+ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", int, int, ret )
136
+ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , int, int,  ret)
137
+
138
+ATOMIC_FUNC_DECL(inc,      "add  %1, %0, #1", long, void, /* no return */ )
139
+ATOMIC_FUNC_DECL(dec,      "sub  %1, %0, #1", long, void, /* no return */ )
140
+ATOMIC_FUNC_DECL1(and,     "and  %1, %0, %4", long, void, /* no return */ )
141
+ATOMIC_FUNC_DECL1(or,      "orr  %1, %0, %4", long, void, /* no return */ )
142
+ATOMIC_FUNC_DECL(inc_and_test, "add  %1, %0, #1", long, long, ret )
143
+ATOMIC_FUNC_DECL(dec_and_test, "sub  %1, %0, #1", long, long, ret )
144
+ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long,  ret)
145
+
146
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
147
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
148
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
149
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
150
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
151
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
152
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
153
+
154
+
155
+/* with integrated membar */
156
+
157
+#define mb_atomic_set_int(v, i) \
158
+	do{ \
159
+		membar(); \
160
+		atomic_set_int(v, i); \
161
+	}while(0)
162
+
163
+
164
+
165
+inline static int mb_atomic_get_int(volatile int* v)
166
+{
167
+	membar();
168
+	return atomic_get_int(v);
169
+}
170
+
171
+
172
+#define mb_atomic_inc_int(v) \
173
+	do{ \
174
+		membar(); \
175
+		atomic_inc_int(v); \
176
+	}while(0)
177
+
178
+#define mb_atomic_dec_int(v) \