Browse code

core, lib, modules: restructured source code tree

- new folder src/ to hold the source code for main project applications
- main.c is in src/
- all core files are subfolder are in src/core/
- modules are in src/modules/
- libs are in src/lib/
- application Makefiles are in src/
- application binary is built in src/ (src/kamailio)

Daniel-Constantin Mierla authored on 07/12/2016 11:03:51
Showing 1 changed files
1 1
deleted file mode 100644
... ...
@@ -1,410 +0,0 @@
1
-/* 
2
- * Copyright (C) 2006 iptelorg GmbH
3
- *
4
- * Permission to use, copy, modify, and distribute this software for any
5
- * purpose with or without fee is hereby granted, provided that the above
6
- * copyright notice and this permission notice appear in all copies.
7
- *
8
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15
- */
16
-
17
-/** 
18
- * @file 
19
- * @brief Atomic operations and memory barriers (MIPS isa 2 and MIPS64 specific)
20
- * 
21
- * Atomic operations and memory barriers (MIPS isa 2 and MIPS64 specific)
22
- * \warning atomic ops do not include memory barriers, see atomic_ops.h for
23
- * more details.
24
- * \warning not tested on MIPS64 (not even a compile test)
25
- *
26
- * Config defines:
27
- * - NOSMP (in NOSMP mode it will also work on mips isa 1 CPUs that support
28
- *   LL and SC, see MIPS_HAS_LLSC in atomic_ops.h)
29
- * - __CPU_MIPS64 (mips64 arch., in 64 bit mode: long and void* are 64 bits)
30
- * - __CPU_MIPS2 or __CPU_MIPS && MIPS_HAS_LLSC && NOSMP (if __CPU_MIPS64 is not defined)
31
- * @ingroup atomic
32
- */
33
-
34
-#ifndef _atomic_mips2_h
35
-#define _atomic_mips2_h
36
-
37
-#define HAVE_ASM_INLINE_ATOMIC_OPS
38
-#define HAVE_ASM_INLINE_MEMBAR
39
-
40
-#ifdef __CPU_mips64
41
-#warning mips64 atomic code was not tested, please report problems to \
42
-		serdev@iptel.org or andrei@iptel.org
43
-#endif
44
-
45
-#ifdef NOSMP
46
-#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
47
-#define membar_read()  membar()
48
-#define membar_write() membar()
49
-#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
50
-/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
51
- * contain gcc barriers*/
52
-#define membar_enter_lock() do {} while(0)
53
-#define membar_leave_lock() do {} while(0)
54
-/* membars after or before atomic_ops or atomic_setget -> use these or
55
- *  mb_<atomic_op_name>() if you need a memory barrier in one of these
56
- *  situations (on some archs where the atomic operations imply memory
57
- *   barriers is better to use atomic_op_x(); membar_atomic_op() then
58
- *    atomic_op_x(); membar()) */
59
-#define membar_atomic_op()				membar()
60
-#define membar_atomic_setget()			membar()
61
-#define membar_write_atomic_op()		membar_write()
62
-#define membar_write_atomic_setget()	membar_write()
63
-#define membar_read_atomic_op()			membar_read()
64
-#define membar_read_atomic_setget()		membar_read()
65
-
66
-#else
67
-
68
-#define membar() \
69
-	asm volatile( \
70
-			".set push \n\t" \
71
-			".set noreorder \n\t" \
72
-			".set mips2 \n\t" \
73
-			"    sync\n\t" \
74
-			".set pop \n\t" \
75
-			: : : "memory" \
76
-			) 
77
-
78
-#define membar_read()  membar()
79
-#define membar_write() membar()
80
-#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
81
-#define membar_enter_lock() membar()
82
-#define membar_leave_lock() membar()
83
-/* membars after or before atomic_ops or atomic_setget -> use these or
84
- *  mb_<atomic_op_name>() if you need a memory barrier in one of these
85
- *  situations (on some archs where the atomic operations imply memory
86
- *   barriers is better to use atomic_op_x(); membar_atomic_op() then
87
- *    atomic_op_x(); membar()) */
88
-#define membar_atomic_op()				membar()
89
-#define membar_atomic_setget()			membar()
90
-#define membar_write_atomic_op()		membar_write()
91
-#define membar_write_atomic_setget()	membar_write()
92
-#define membar_read_atomic_op()			membar_read()
93
-#define membar_read_atomic_setget()		membar_read()
94
-
95
-#endif /* NOSMP */
96
-
97
-
98
-
99
-/* main asm block */
100
-#define ATOMIC_ASM_OP_int(op) \
101
-			".set push \n\t" \
102
-			".set noreorder \n\t" \
103
-			".set mips2 \n\t" \
104
-			"1:   ll %1, %0 \n\t" \
105
-			"     " op "\n\t" \
106
-			"     sc %2, %0 \n\t" \
107
-			"     beqz %2, 1b \n\t" \
108
-			"     nop \n\t" /* delay slot */ \
109
-			".set pop \n\t" 
110
-
111
-#ifdef __CPU_mips64
112
-#define ATOMIC_ASM_OP_long(op) \
113
-			".set push \n\t" \
114
-			".set noreorder \n\t" \
115
-			"1:   lld %1, %0 \n\t" \
116
-			"     " op "\n\t" \
117
-			"     scd %2, %0 \n\t" \
118
-			"     beqz %2, 1b \n\t" \
119
-			"     nop \n\t" /* delay slot */ \
120
-			".set pop \n\t" 
121
-#else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips & MIPS_HAS_LLSC */
122
-#define ATOMIC_ASM_OP_long(op) ATOMIC_ASM_OP_int(op)
123
-#endif
124
-
125
-
126
-#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
127
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
128
-	{ \
129
-		P_TYPE ret, tmp; \
130
-		asm volatile( \
131
-			ATOMIC_ASM_OP_##P_TYPE(OP) \
132
-			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
133
-			: "m"(*var) \
134
-			 \
135
-			); \
136
-		return RET_EXPR; \
137
-	}
138
-
139
-
140
-/* same as above, but with CT in %3 */
141
-#define ATOMIC_FUNC_DECL_CT(NAME, OP, CT, P_TYPE, RET_TYPE, RET_EXPR) \
142
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
143
-	{ \
144
-		P_TYPE ret, tmp; \
145
-		asm volatile( \
146
-			ATOMIC_ASM_OP_##P_TYPE(OP) \
147
-			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
148
-			: "r"((CT)), "m"(*var) \
149
-			 \
150
-			); \
151
-		return RET_EXPR; \
152
-	}
153
-
154
-
155
-/* takes an extra param, i which goes in %3 */
156
-#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
157
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
158
-														P_TYPE i) \
159
-	{ \
160
-		P_TYPE ret, tmp; \
161
-		asm volatile( \
162
-			ATOMIC_ASM_OP_##P_TYPE(OP) \
163
-			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
164
-			: "r"((i)), "m"(*var) \
165
-			 \
166
-			); \
167
-		return RET_EXPR; \
168
-	}
169
-
170
-
171
-/* takes an extra param, like above, but i  goes in %2 */
172
-#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
173
-	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
174
-														P_TYPE i) \
175
-	{ \
176
-		P_TYPE ret; \
177
-		asm volatile( \
178
-			ATOMIC_ASM_OP_##P_TYPE(OP) \
179
-			: "=m"(*var), "=&r"(ret), "+&r"(i)  \
180
-			: "m"(*var) \
181
-			 \
182
-			); \
183
-		return RET_EXPR; \
184
-	}
185
-
186
-
187
-/* %0=var, %1=*var, %2=new, %3=old :
188
- * ret=*var; if *var==old  then *var=new; return ret
189
- * => if succesfull (changed var to new)  ret==old */
190
-#define ATOMIC_CMPXCHG_DECL(NAME, P_TYPE) \
191
-	inline static P_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
192
-														P_TYPE old, \
193
-														P_TYPE new_v) \
194
-	{ \
195
-		asm volatile( \
196
-			ATOMIC_ASM_OP_##P_TYPE("bne %1, %3, 2f \n\t nop") \
197
-			"2:    \n\t" \
198
-			: "=m"(*var), "=&r"(old), "=r"(new_v)  \
199
-			: "r"(old), "m"(*var), "2"(new_v) \
200
-			 \
201
-			); \
202
-		return old; \
203
-	}
204
-
205
-ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", int, void, /* no return */ )
206
-ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  int, void, /* no return */ )
207
-ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", int, void, /* no return */ )
208
-ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", int, void,  /* no return */ )
209
-ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", int, int, (ret+1)==0 )
210
-ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1, int, int, (ret-1)==0 )
211
-ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, int, int, ret )
212
-ATOMIC_CMPXCHG_DECL(cmpxchg, int)
213
-ATOMIC_FUNC_DECL1(add, "addu %2, %1, %3 \n\t move %1, %2", int, int, ret )
214
-
215
-#ifdef __CPU_mips64
216
-
217
-ATOMIC_FUNC_DECL(inc,      "daddiu %2, %1, 1", long, void, /* no return */ )
218
-ATOMIC_FUNC_DECL_CT(dec,   "dsubu %2, %1, %3", 1,  long, void, /* no return */ )
219
-ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", long, void, /* no return */ )
220
-ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
221
-ATOMIC_FUNC_DECL(inc_and_test, "daddiu %2, %1, 1", long, long, (ret+1)==0 )
222
-ATOMIC_FUNC_DECL_CT(dec_and_test, "dsubu %2, %1, %3", 1,long, long, (ret-1)==0 )
223
-ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
224
-ATOMIC_CMPXCHG_DECL(cmpxchg, long)
225
-ATOMIC_FUNC_DECL1(add, "daddu %2, %1, %3 \n\t move %1, %2", long, long, ret )
226
-
227
-#else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips */
228
-
229
-ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", long, void, /* no return */ )
230
-ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  long, void, /* no return */ )
231
-ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", long, void, /* no return */ )
232
-ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
233
-ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", long, long, (ret+1)==0 )
234
-ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1,long, long, (ret-1)==0 )
235
-ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
236
-ATOMIC_CMPXCHG_DECL(cmpxchg, long)
237
-ATOMIC_FUNC_DECL1(add, "addu %2, %1, %3 \n\t move %1, %2", long, long, ret )
238
-
239
-#endif /* __CPU_mips64 */
240
-
241
-#define atomic_inc(var) atomic_inc_int(&(var)->val)
242
-#define atomic_dec(var) atomic_dec_int(&(var)->val)
243
-#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
244
-#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
245
-#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
246
-#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
247
-#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
248
-#define atomic_add(var, i) atomic_add_int(&(var)->val, i)
249
-#define atomic_cmpxchg(var, old, new_v)  \
250
-	atomic_cmpxchg_int(&(var)->val, old, new_v)
251
-
252
-
253
-/* with integrated membar */
254
-
255
-#define mb_atomic_set_int(v, i) \
256
-	do{ \
257
-		membar(); \
258
-		atomic_set_int(v, i); \
259
-	}while(0)
260
-
261
-
262
-
263
-inline static int mb_atomic_get_int(volatile int* v)
264
-{
265
-	membar();
266
-	return atomic_get_int(v);
267
-}
268
-
269
-
270
-#define mb_atomic_inc_int(v) \
271
-	do{ \
272
-		membar(); \
273
-		atomic_inc_int(v); \
274
-	}while(0)
275
-
276
-#define mb_atomic_dec_int(v) \
277
-	do{ \
278
-		membar(); \
279
-		atomic_dec_int(v); \
280
-	}while(0)
281
-
282
-#define mb_atomic_or_int(v, m) \
283
-	do{ \
284
-		membar(); \
285
-		atomic_or_int(v, m); \
286
-	}while(0)
287
-
288
-#define mb_atomic_and_int(v, m) \
289
-	do{ \
290
-		membar(); \
291
-		atomic_and_int(v, m); \
292
-	}while(0)
293
-
294
-inline static int mb_atomic_inc_and_test_int(volatile int* v)
295
-{
296
-	membar();
297
-	return atomic_inc_and_test_int(v);
298
-}
299
-
300
-inline static int mb_atomic_dec_and_test_int(volatile int* v)
301
-{
302
-	membar();
303
-	return atomic_dec_and_test_int(v);
304
-}
305
-
306
-
307
-inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
308
-{
309
-	membar();
310
-	return atomic_get_and_set_int(v, i);
311
-}
312
-
313
-inline static int mb_atomic_cmpxchg_int(volatile int* v, int o, int n)
314
-{
315
-	membar();
316
-	return atomic_cmpxchg_int(v, o, n);
317
-}
318
-
319
-inline static int mb_atomic_add_int(volatile int* v, int i)
320
-{
321
-	membar();
322
-	return atomic_add_int(v, i);
323
-}
324
-
325
-
326
-#define mb_atomic_set_long(v, i) \
327
-	do{ \
328
-		membar(); \
329
-		atomic_set_long(v, i); \
330
-	}while(0)
331
-
332
-
333
-
334
-inline static long mb_atomic_get_long(volatile long* v)
335
-{
336
-	membar();
337
-	return atomic_get_long(v);
338
-}
339
-
340
-
341
-#define mb_atomic_inc_long(v) \
342
-	do{ \
343
-		membar(); \
344
-		atomic_inc_long(v); \
345
-	}while(0)
346
-
347
-
348
-#define mb_atomic_dec_long(v) \
349
-	do{ \
350
-		membar(); \
351
-		atomic_dec_long(v); \
352
-	}while(0)
353
-
354
-#define mb_atomic_or_long(v, m) \
355
-	do{ \
356
-		membar(); \
357
-		atomic_or_long(v, m); \
358
-	}while(0)
359
-
360
-#define mb_atomic_and_long(v, m) \
361
-	do{ \
362
-		membar(); \
363
-		atomic_and_long(v, m); \
364
-	}while(0)
365
-
366
-inline static long mb_atomic_inc_and_test_long(volatile long* v)
367
-{
368
-	membar();
369
-	return atomic_inc_and_test_long(v);
370
-}
371
-
372
-inline static long mb_atomic_dec_and_test_long(volatile long* v)
373
-{
374
-	membar();
375
-	return atomic_dec_and_test_long(v);
376
-}
377
-
378
-
379
-inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
380
-{
381
-	membar();
382
-	return atomic_get_and_set_long(v, l);
383
-}
384
-
385
-inline static long mb_atomic_cmpxchg_long(volatile long* v, long o, long n)
386
-{
387
-	membar();
388
-	return atomic_cmpxchg_long(v, o, n);
389
-}
390
-
391
-inline static long mb_atomic_add_long(volatile long* v, long i)
392
-{
393
-	membar();
394
-	return atomic_add_long(v, i);
395
-}
396
-
397
-
398
-#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
399
-#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
400
-#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
401
-#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
402
-#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
403
-#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
404
-#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
405
-#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
406
-#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
407
-#define mb_atomic_cmpxchg(var, o, n) mb_atomic_cmpxchg_int(&(var)->val, o, n)
408
-#define mb_atomic_add(var, i) mb_atomic_add_int(&(var)->val, i)
409
-
410
-#endif
Browse code

atomic Remove history from source code

Olle E. Johansson authored on 03/04/2016 18:09:15
Showing 1 changed files
... ...
@@ -31,16 +31,6 @@
31 31
  * @ingroup atomic
32 32
  */
33 33
 
34
-/* 
35
- * History:
36
- * --------
37
- *  2006-03-08  created by andrei
38
- *  2007-05-10  added atomic_add & atomic_cmpxchg (andrei)
39
- *  2007-05-29  added membar_depends(), membar_*_atomic_op and
40
- *                membar_*_atomic_setget (andrei)
41
- */
42
-
43
-
44 34
 #ifndef _atomic_mips2_h
45 35
 #define _atomic_mips2_h
46 36
 
Browse code

doxygen: convert existing docs to use proper doxygen structures and groups, small cleanups

Henning Westerholt authored on 23/06/2011 21:39:01
Showing 1 changed files
... ...
@@ -1,6 +1,4 @@
1 1
 /* 
2
- * $Id$
3
- * 
4 2
  * Copyright (C) 2006 iptelorg GmbH
5 3
  *
6 4
  * Permission to use, copy, modify, and distribute this software for any
... ...
@@ -16,20 +14,23 @@
16 14
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 15
  */
18 16
 
19
-/** @file @brief
20
- *  atomic operations and memory barriers (mips isa 2 and mips64 specific)
21
- *  WARNING: atomic ops do not include memory barriers
22
- *  see atomic_ops.h for more details 
23
- *  WARNING: not tested on mips64 (not even a compile test)
17
+/** 
18
+ * @file 
19
+ * @brief Atomic operations and memory barriers (MIPS isa 2 and MIPS64 specific)
20
+ * 
21
+ * Atomic operations and memory barriers (MIPS isa 2 and MIPS64 specific)
22
+ * \warning atomic ops do not include memory barriers, see atomic_ops.h for
23
+ * more details.
24
+ * \warning not tested on MIPS64 (not even a compile test)
24 25
  *
25
- *  Config defines:  - NOSMP (in NOSMP mode it will also work on mips isa 1
26
- *                            cpus that support LL and SC, see MIPS_HAS_LLSC
27
- *                            in atomic_ops.h)
28
- *                   - __CPU_MIPS64 (mips64 arch., in 64 bit mode: long and
29
- *                                    void* are 64 bits)
30
- *                   - __CPU_MIPS2 or __CPU_MIPS && MIPS_HAS_LLSC && NOSMP
31
- *                                 (if __CPU_MIPS64 is not defined)
26
+ * Config defines:
27
+ * - NOSMP (in NOSMP mode it will also work on mips isa 1 CPUs that support
28
+ *   LL and SC, see MIPS_HAS_LLSC in atomic_ops.h)
29
+ * - __CPU_MIPS64 (mips64 arch., in 64 bit mode: long and void* are 64 bits)
30
+ * - __CPU_MIPS2 or __CPU_MIPS && MIPS_HAS_LLSC && NOSMP (if __CPU_MIPS64 is not defined)
31
+ * @ingroup atomic
32 32
  */
33
+
33 34
 /* 
34 35
  * History:
35 36
  * --------
Browse code

Doxygen updates

oej authored on 25/10/2009 19:11:28
Showing 1 changed files
... ...
@@ -15,7 +15,8 @@
15 15
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 16
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 17
  */
18
-/*
18
+
19
+/** @file @brief
19 20
  *  atomic operations and memory barriers (mips isa 2 and mips64 specific)
20 21
  *  WARNING: atomic ops do not include memory barriers
21 22
  *  see atomic_ops.h for more details 
Browse code

- added membar_depends(), needed on smp archs. with separate cache banks where it's possible to get a new pointer value, but the old pointer content (e.g. if the two are in different cache banks and the "content" bank is very busy processing a long invalidations queue). For now only Alpha SMP needs it, on all other archs is a no-op (for more info see atomic_ops.h , http://lse.sourceforge.net/locking/wmbdd.html, http://www.linuxjournal.com/article/8212 or Alpha Architecture Reference Manual Chapter 5.6.

- added membar_atomic_op(), membar_atomic_setget(), membar_read_atomic_op(),
membar_read_atomic_setget(), membar_write_atomic_op(),
membar_write_atomic_setget() -- special case memory barriers that can be
optimized if the atomic ops already force some kind of barrier (e.g. x86),
see the description in atomic_ops.h for more info.

Andrei Pelinescu-Onciul authored on 29/05/2007 11:31:29
Showing 1 changed files
... ...
@@ -34,6 +34,8 @@
34 34
  * --------
35 35
  *  2006-03-08  created by andrei
36 36
  *  2007-05-10  added atomic_add & atomic_cmpxchg (andrei)
37
+ *  2007-05-29  added membar_depends(), membar_*_atomic_op and
38
+ *                membar_*_atomic_setget (andrei)
37 39
  */
38 40
 
39 41
 
... ...
@@ -52,10 +54,22 @@
52 54
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
53 55
 #define membar_read()  membar()
54 56
 #define membar_write() membar()
57
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
55 58
 /* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
56 59
  * contain gcc barriers*/
57
-#define membar_enter_lock() 
58
-#define membar_leave_lock()
60
+#define membar_enter_lock() do {} while(0)
61
+#define membar_leave_lock() do {} while(0)
62
+/* membars after or before atomic_ops or atomic_setget -> use these or
63
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
64
+ *  situations (on some archs where the atomic operations imply memory
65
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
66
+ *    atomic_op_x(); membar()) */
67
+#define membar_atomic_op()				membar()
68
+#define membar_atomic_setget()			membar()
69
+#define membar_write_atomic_op()		membar_write()
70
+#define membar_write_atomic_setget()	membar_write()
71
+#define membar_read_atomic_op()			membar_read()
72
+#define membar_read_atomic_setget()		membar_read()
59 73
 
60 74
 #else
61 75
 
... ...
@@ -71,8 +85,20 @@
71 85
 
72 86
 #define membar_read()  membar()
73 87
 #define membar_write() membar()
88
+#define membar_depends()  do {} while(0) /* really empty, not even a cc bar. */
74 89
 #define membar_enter_lock() membar()
75 90
 #define membar_leave_lock() membar()
91
+/* membars after or before atomic_ops or atomic_setget -> use these or
92
+ *  mb_<atomic_op_name>() if you need a memory barrier in one of these
93
+ *  situations (on some archs where the atomic operations imply memory
94
+ *   barriers is better to use atomic_op_x(); membar_atomic_op() then
95
+ *    atomic_op_x(); membar()) */
96
+#define membar_atomic_op()				membar()
97
+#define membar_atomic_setget()			membar()
98
+#define membar_write_atomic_op()		membar_write()
99
+#define membar_write_atomic_setget()	membar_write()
100
+#define membar_read_atomic_op()			membar_read()
101
+#define membar_read_atomic_setget()		membar_read()
76 102
 
77 103
 #endif /* NOSMP */
78 104
 
Browse code

- parts of atomic_ops.h moved into atomic/atomic_common.h and atomic/atomic_native.h

- added membar_eneter_lock() and membar_leave_lock() (to be used only if
creating locks using the atomic ops functions, for more info see atomic_ops.h)

Andrei Pelinescu-Onciul authored on 14/05/2007 17:29:31
Showing 1 changed files
... ...
@@ -52,6 +52,11 @@
52 52
 #define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
53 53
 #define membar_read()  membar()
54 54
 #define membar_write() membar()
55
+/* lock barriers: empty, not needed for NOSMP; the lock/unlock should already
56
+ * contain gcc barriers*/
57
+#define membar_enter_lock() 
58
+#define membar_leave_lock()
59
+
55 60
 #else
56 61
 
57 62
 #define membar() \
... ...
@@ -66,6 +71,8 @@
66 71
 
67 72
 #define membar_read()  membar()
68 73
 #define membar_write() membar()
74
+#define membar_enter_lock() membar()
75
+#define membar_leave_lock() membar()
69 76
 
70 77
 #endif /* NOSMP */
71 78
 
Browse code

- atomic_add & atomic_cmpxchg added to ppc - atomic_unkown (used when the procesor does not suport atomic ops or is not among the supported ones), tries now to use a "hash" of locks if GEN_LOCK_SET_T_UNLIMITED is defined => less contention on multi-cpus - atomic_ops.h defines *_UNLIMITED macros when the number of locks or set size are limited only by the available memory (everything exept SYSV sems) - license changes: all the atomic* stuff and the locks are now under a BSD (OpenBSD) style license

Andrei Pelinescu-Onciul authored on 11/05/2007 20:44:15
Showing 1 changed files
... ...
@@ -3,26 +3,17 @@
3 3
  * 
4 4
  * Copyright (C) 2006 iptelorg GmbH
5 5
  *
6
- * This file is part of ser, a free SIP server.
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
7 9
  *
8
- * ser is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version
12
- *
13
- * For a license to use the ser software under conditions
14
- * other than those described here, or to purchase support for this
15
- * software, please contact iptel.org by e-mail at the following addresses:
16
- *    info@iptel.org
17
- *
18
- * ser is distributed in the hope that it will be useful,
19
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
- * GNU General Public License for more details.
22
- *
23
- * You should have received a copy of the GNU General Public License
24
- * along with this program; if not, write to the Free Software
25
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 17
  */
27 18
 /*
28 19
  *  atomic operations and memory barriers (mips isa 2 and mips64 specific)
... ...
@@ -294,6 +285,17 @@ inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
294 285
 	return atomic_get_and_set_int(v, i);
295 286
 }
296 287
 
288
+inline static int mb_atomic_cmpxchg_int(volatile int* v, int o, int n)
289
+{
290
+	membar();
291
+	return atomic_cmpxchg_int(v, o, n);
292
+}
293
+
294
+inline static int mb_atomic_add_int(volatile int* v, int i)
295
+{
296
+	membar();
297
+	return atomic_add_int(v, i);
298
+}
297 299
 
298 300
 
299 301
 #define mb_atomic_set_long(v, i) \
... ...
@@ -355,6 +357,18 @@ inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
355 357
 	return atomic_get_and_set_long(v, l);
356 358
 }
357 359
 
360
+inline static long mb_atomic_cmpxchg_long(volatile long* v, long o, long n)
361
+{
362
+	membar();
363
+	return atomic_cmpxchg_long(v, o, n);
364
+}
365
+
366
+inline static long mb_atomic_add_long(volatile long* v, long i)
367
+{
368
+	membar();
369
+	return atomic_add_long(v, i);
370
+}
371
+
358 372
 
359 373
 #define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
360 374
 #define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
... ...
@@ -365,5 +379,7 @@ inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
365 379
 #define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
366 380
 #define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
367 381
 #define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
382
+#define mb_atomic_cmpxchg(var, o, n) mb_atomic_cmpxchg_int(&(var)->val, o, n)
383
+#define mb_atomic_add(var, i) mb_atomic_add_int(&(var)->val, i)
368 384
 
369 385
 #endif
Browse code

- alpha, armv6 and mip isa2+ atomic_add and atomic_cmpxchg (armv6 & alpha not tested at all due to lacking hardware or emulators)

Andrei Pelinescu-Onciul authored on 10/05/2007 18:27:07
Showing 1 changed files
... ...
@@ -42,6 +42,7 @@
42 42
  * History:
43 43
  * --------
44 44
  *  2006-03-08  created by andrei
45
+ *  2007-05-10  added atomic_add & atomic_cmpxchg (andrei)
45 46
  */
46 47
 
47 48
 
... ...
@@ -167,6 +168,23 @@
167 168
 	}
168 169
 
169 170
 
171
+/* %0=var, %1=*var, %2=new, %3=old :
172
+ * ret=*var; if *var==old  then *var=new; return ret
173
+ * => if succesfull (changed var to new)  ret==old */
174
+#define ATOMIC_CMPXCHG_DECL(NAME, P_TYPE) \
175
+	inline static P_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
176
+														P_TYPE old, \
177
+														P_TYPE new_v) \
178
+	{ \
179
+		asm volatile( \
180
+			ATOMIC_ASM_OP_##P_TYPE("bne %1, %3, 2f \n\t nop") \
181
+			"2:    \n\t" \
182
+			: "=m"(*var), "=&r"(old), "=r"(new_v)  \
183
+			: "r"(old), "m"(*var), "2"(new_v) \
184
+			 \
185
+			); \
186
+		return old; \
187
+	}
170 188
 
171 189
 ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", int, void, /* no return */ )
172 190
 ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  int, void, /* no return */ )
... ...
@@ -175,6 +193,8 @@ ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", int, void,  /* no return */ )
175 193
 ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", int, int, (ret+1)==0 )
176 194
 ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1, int, int, (ret-1)==0 )
177 195
 ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, int, int, ret )
196
+ATOMIC_CMPXCHG_DECL(cmpxchg, int)
197
+ATOMIC_FUNC_DECL1(add, "addu %2, %1, %3 \n\t move %1, %2", int, int, ret )
178 198
 
179 199
 #ifdef __CPU_mips64
180 200
 
... ...
@@ -185,6 +205,8 @@ ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
185 205
 ATOMIC_FUNC_DECL(inc_and_test, "daddiu %2, %1, 1", long, long, (ret+1)==0 )
186 206
 ATOMIC_FUNC_DECL_CT(dec_and_test, "dsubu %2, %1, %3", 1,long, long, (ret-1)==0 )
187 207
 ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
208
+ATOMIC_CMPXCHG_DECL(cmpxchg, long)
209
+ATOMIC_FUNC_DECL1(add, "daddu %2, %1, %3 \n\t move %1, %2", long, long, ret )
188 210
 
189 211
 #else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips */
190 212
 
... ...
@@ -195,6 +217,8 @@ ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
195 217
 ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", long, long, (ret+1)==0 )
196 218
 ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1,long, long, (ret-1)==0 )
197 219
 ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
220
+ATOMIC_CMPXCHG_DECL(cmpxchg, long)
221
+ATOMIC_FUNC_DECL1(add, "addu %2, %1, %3 \n\t move %1, %2", long, long, ret )
198 222
 
199 223
 #endif /* __CPU_mips64 */
200 224
 
... ...
@@ -205,6 +229,9 @@ ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
205 229
 #define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
206 230
 #define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
207 231
 #define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
232
+#define atomic_add(var, i) atomic_add_int(&(var)->val, i)
233
+#define atomic_cmpxchg(var, old, new_v)  \
234
+	atomic_cmpxchg_int(&(var)->val, old, new_v)
208 235
 
209 236
 
210 237
 /* with integrated membar */
Browse code

- makefile: - compile in 64bit mode by default on sparc64 - sparc <= v8 support - -CC_GCC_LIKE_ASM is defined when the compiler supports gcc style inline asm (gcc and icc)

- atomic operations and memory barriers support for:
- x86
- x86_64
- mips (only in NOSMP mode and if it supports ll and sc)
- mips2 (mips32, isa >= 2)
- mips64
- powerpc
- powerpc64
- sparc <= v8 (only memory barriers, the atomic operations are implemented
using locks because there is no hardware support for them)
- sparc64 - both 32 (v8plus) and 64 bit mode
If there is no support for the compiler/arch. combination, it falls back to
locks.

The code is tested (only basic tests: it runs and the results are ok, but no
parallel tests) on x86, x86_64, mips2, powerpc, sparc64 (both modes).
The sparc version runs ok on sparc64 (so it's most likely ok).
powerpc64 and mips64 not tested due to no access to the corresponding
hardware, but they do compile ok.
For more details see the comments at the beginning of atomic_ops.h.

Andrei Pelinescu-Onciul authored on 30/03/2006 19:56:06
Showing 1 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,342 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  atomic operations and memory barriers (mips isa 2 and mips64 specific)
29
+ *  WARNING: atomic ops do not include memory barriers
30
+ *  see atomic_ops.h for more details 
31
+ *  WARNING: not tested on mips64 (not even a compile test)
32
+ *
33
+ *  Config defines:  - NOSMP (in NOSMP mode it will also work on mips isa 1
34
+ *                            cpus that support LL and SC, see MIPS_HAS_LLSC
35
+ *                            in atomic_ops.h)
36
+ *                   - __CPU_MIPS64 (mips64 arch., in 64 bit mode: long and
37
+ *                                    void* are 64 bits)
38
+ *                   - __CPU_MIPS2 or __CPU_MIPS && MIPS_HAS_LLSC && NOSMP
39
+ *                                 (if __CPU_MIPS64 is not defined)
40
+ */
41
+/* 
42
+ * History:
43
+ * --------
44
+ *  2006-03-08  created by andrei
45
+ */
46
+
47
+
48
+#ifndef _atomic_mips2_h
49
+#define _atomic_mips2_h
50
+
51
+#define HAVE_ASM_INLINE_ATOMIC_OPS
52
+#define HAVE_ASM_INLINE_MEMBAR
53
+
54
+#ifdef __CPU_mips64
55
+#warning mips64 atomic code was not tested, please report problems to \
56
+		serdev@iptel.org or andrei@iptel.org
57
+#endif
58
+
59
+#ifdef NOSMP
60
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
61
+#define membar_read()  membar()
62
+#define membar_write() membar()
63
+#else
64
+
65
+#define membar() \
66
+	asm volatile( \
67
+			".set push \n\t" \
68
+			".set noreorder \n\t" \
69
+			".set mips2 \n\t" \
70
+			"    sync\n\t" \
71
+			".set pop \n\t" \
72
+			: : : "memory" \
73
+			) 
74
+
75
+#define membar_read()  membar()
76
+#define membar_write() membar()
77
+
78
+#endif /* NOSMP */
79
+
80
+
81
+
82
+/* main asm block */
83
+#define ATOMIC_ASM_OP_int(op) \
84
+			".set push \n\t" \
85
+			".set noreorder \n\t" \
86
+			".set mips2 \n\t" \
87
+			"1:   ll %1, %0 \n\t" \
88
+			"     " op "\n\t" \
89
+			"     sc %2, %0 \n\t" \
90
+			"     beqz %2, 1b \n\t" \
91
+			"     nop \n\t" /* delay slot */ \
92
+			".set pop \n\t" 
93
+
94
+#ifdef __CPU_mips64
95
+#define ATOMIC_ASM_OP_long(op) \
96
+			".set push \n\t" \
97
+			".set noreorder \n\t" \
98
+			"1:   lld %1, %0 \n\t" \
99
+			"     " op "\n\t" \
100
+			"     scd %2, %0 \n\t" \
101
+			"     beqz %2, 1b \n\t" \
102
+			"     nop \n\t" /* delay slot */ \
103
+			".set pop \n\t" 
104
+#else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips & MIPS_HAS_LLSC */
105
+#define ATOMIC_ASM_OP_long(op) ATOMIC_ASM_OP_int(op)
106
+#endif
107
+
108
+
109
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
110
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
111
+	{ \
112
+		P_TYPE ret, tmp; \
113
+		asm volatile( \
114
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
115
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
116
+			: "m"(*var) \
117
+			 \
118
+			); \
119
+		return RET_EXPR; \
120
+	}
121
+
122
+
123
+/* same as above, but with CT in %3 */
124
+#define ATOMIC_FUNC_DECL_CT(NAME, OP, CT, P_TYPE, RET_TYPE, RET_EXPR) \
125
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
126
+	{ \
127
+		P_TYPE ret, tmp; \
128
+		asm volatile( \
129
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
130
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
131
+			: "r"((CT)), "m"(*var) \
132
+			 \
133
+			); \
134
+		return RET_EXPR; \
135
+	}
136
+
137
+
138
+/* takes an extra param, i which goes in %3 */
139
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
140
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
141
+														P_TYPE i) \
142
+	{ \
143
+		P_TYPE ret, tmp; \
144
+		asm volatile( \
145
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
146
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
147
+			: "r"((i)), "m"(*var) \
148
+			 \
149
+			); \
150
+		return RET_EXPR; \
151
+	}
152
+
153
+
154
+/* takes an extra param, like above, but i  goes in %2 */
155
+#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
156
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
157
+														P_TYPE i) \
158
+	{ \
159
+		P_TYPE ret; \
160
+		asm volatile( \
161
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
162
+			: "=m"(*var), "=&r"(ret), "+&r"(i)  \
163
+			: "m"(*var) \
164
+			 \
165
+			); \
166
+		return RET_EXPR; \
167
+	}
168
+
169
+
170
+
171
+ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", int, void, /* no return */ )
172
+ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  int, void, /* no return */ )
173
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", int, void, /* no return */ )
174
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", int, void,  /* no return */ )
175
+ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", int, int, (ret+1)==0 )
176
+ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1, int, int, (ret-1)==0 )
177
+ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, int, int, ret )
178
+
179
+#ifdef __CPU_mips64
180
+
181
+ATOMIC_FUNC_DECL(inc,      "daddiu %2, %1, 1", long, void, /* no return */ )
182
+ATOMIC_FUNC_DECL_CT(dec,   "dsubu %2, %1, %3", 1,  long, void, /* no return */ )
183
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", long, void, /* no return */ )
184
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
185
+ATOMIC_FUNC_DECL(inc_and_test, "daddiu %2, %1, 1", long, long, (ret+1)==0 )
186
+ATOMIC_FUNC_DECL_CT(dec_and_test, "dsubu %2, %1, %3", 1,long, long, (ret-1)==0 )