Browse code

- makefile: - compile in 64bit mode by default on sparc64 - sparc <= v8 support - -CC_GCC_LIKE_ASM is defined when the compiler supports gcc style inline asm (gcc and icc)

- atomic operations and memory barriers support for:
- x86
- x86_64
- mips (only in NOSMP mode and if it supports ll and sc)
- mips2 (mips32, isa >= 2)
- mips64
- powerpc
- powerpc64
- sparc <= v8 (only memory barriers, the atomic operations are implemented
using locks because there is no hardware support for them)
- sparc64 - both 32 (v8plus) and 64 bit mode
If there is no support for the compiler/arch. combination, it falls back to
locks.

The code is tested (only basic tests: it runs and the results are ok, but no
parallel tests) on x86, x86_64, mips2, powerpc, sparc64 (both modes).
The sparc version runs ok on sparc64 (so it's most likely ok).
powerpc64 and mips64 not tested due to no access to the corresponding
hardware, but they do compile ok.
For more details see the comments at the beginning of atomic_ops.h.

Andrei Pelinescu-Onciul authored on 30/03/2006 19:56:06
Showing 13 changed files
... ...
@@ -45,6 +45,9 @@
45 45
 #  2005-07-25  better solaris arch detection (andrei)
46 46
 #  2005-09-12  -mallign-double removed (too many problems) (andrei)
47 47
 #  2005-10-02  distcc get gcc version hack (andrei)
48
+#  2006-03-30  64 bit mode compile by default on sparc64 (-m64), added
49
+#              CC_GCC_LIKE_ASM and SPARC64_MODE (andrei)
50
+#              sparc <= v8 support (andrei)
48 51
 
49 52
 
50 53
 # check if already included/exported
... ...
@@ -445,7 +448,7 @@ endif
445 445
 
446 446
 ifeq ($(ARCH), sparc)
447 447
 	# smp no supported on sparc32
448
-	DEFS+= -DNOSMP 
448
+	DEFS+= -DNOSMP # FIXME
449 449
 	use_fast_lock=yes
450 450
 endif
451 451
 
... ...
@@ -489,6 +492,7 @@ ifeq ($(mode), release)
489 489
 ifeq	($(ARCH), i386)
490 490
 		# if gcc 
491 491
 ifeq		($(CC_NAME), gcc)
492
+				DEFS+=-DCC_GCC_LIKE_ASM
492 493
 				#common stuff
493 494
 				CFLAGS=-g -O9 -funroll-loops  -Wcast-align $(PROFILE) \
494 495
 					-Wall  
... ...
@@ -532,6 +536,7 @@ endif			# CC_SHORTVER, 4.x
532 532
 
533 533
 else		# CC_NAME, gcc
534 534
 ifeq		($(CC_NAME), icc)
535
+			DEFS+=-DCC_GCC_LIKE_ASM
535 536
 			CFLAGS=-g -O3  -ipo -ipo_obj -unroll  $(PROFILE) \
536 537
 					 -tpp6 -xK  #-openmp  #optimize for PIII 
537 538
 				# -prefetch doesn't seem to work
... ...
@@ -549,6 +554,7 @@ endif	#ARCH, i386
549 549
 ifeq	($(ARCH), x86_64)
550 550
 		# if gcc 
551 551
 ifeq		($(CC_NAME), gcc)
552
+				DEFS+=-DCC_GCC_LIKE_ASM
552 553
 				#common stuff
553 554
 				CFLAGS=-g -O9 -funroll-loops  -Wcast-align $(PROFILE) \
554 555
 					-Wall 
... ...
@@ -592,6 +598,7 @@ endif			# CC_SHORTVER, 4.x
592 592
 
593 593
 else		# CC_NAME, gcc
594 594
 ifeq		($(CC_NAME), icc)
595
+			DEFS+=-DCC_GCC_LIKE_ASM
595 596
 			CFLAGS=-g -O3  -ipo -ipo_obj -unroll  $(PROFILE) \
596 597
 					 -tpp6 -xK  #-openmp  #optimize for PIII 
597 598
 				# -prefetch doesn't seem to work
... ...
@@ -605,15 +612,21 @@ endif		#CC_NAME, icc
605 605
 endif		#CC_NAME, gcc
606 606
 endif	#ARCH, x86_64
607 607
 
608
-	#if sparc
608
+	#if sparc64
609 609
 ifeq	($(ARCH), sparc64)
610 610
 			#if gcc
611 611
 ifeq		($(CC_NAME), gcc)
612
+				DEFS+=-DCC_GCC_LIKE_ASM -DSPARC64_MODE
612 613
 				#common stuff
613
-				CFLAGS=-g -O9 -funroll-loops  $(PROFILE) \
614
+				CFLAGS=-m64 -g -O9 -funroll-loops  $(PROFILE) \
614 615
 					-Wall\
615 616
 					#-Wcast-align \
616 617
 					#-Wmissing-prototypes 
618
+				# use -m64 to force 64 bit (but add it also to LDFLAGS and
619
+				#  don't forget to define SPARC64_MODE)
620
+				# -m32 for 32 bit (default on solaris),
621
+				# nothing for arch. default
622
+				LDFLAGS+=-m64
617 623
 				#if gcc 4.x
618 624
 ifeq			($(CC_SHORTVER), 4.x)
619 625
 					CPU ?= ultrasparc
... ...
@@ -633,9 +646,6 @@ ifeq			($(CC_SHORTVER), 3.0)
633 633
 					CPU ?= ultrasparc
634 634
 					#use 32bit for now
635 635
 					CFLAGS+= -mcpu=ultrasparc -mtune=$(CPU)   \
636
-					# use -m64 to force 64 bit (but add it also to LDFLAGS), 
637
-					# -m32 for 32 bit (default on solaris),
638
-					# nothing for arch. default
639 636
 					# -mcpu=v9 or ultrasparc? # -mtune implied by -mcpu
640 637
 					#-mno-epilogue #try to inline function exit code
641 638
 					#-mflat # omit save/restore
... ...
@@ -647,7 +657,7 @@ $(warning 			Old gcc detected ($(CC_SHORTVER)), use  gcc >= 3.1 \
647 647
 ifneq				($(OS), netbsd)
648 648
 						# on netbsd/sparc64,  gcc 2.95.3 does not compile
649 649
 						# ser with -mv8
650
-						CFLAGS+= -mv8 
650
+						CFLAGS+= -mv9 
651 651
 endif
652 652
 ifeq					($(ASTYPE), solaris)
653 653
 							CFLAGS+= -Wa,-xarch=v8plus
... ...
@@ -657,7 +667,7 @@ else			#CC_SHORTVER, 2.9x
657 657
 $(warning			You are using an old and unsupported gcc \
658 658
 					 version ($(CC_SHORTVER)), compile at your own risk!)
659 659
 					
660
-					CFLAGS+= -mv8 
660
+					CFLAGS+= -mv9 
661 661
 ifeq					($(ASTYPE), solaris)
662 662
 							CFLAGS+= -Wa,-xarch=v8plus
663 663
 endif					
... ...
@@ -666,10 +676,11 @@ endif			#CC_SHORTVER, 2.9x
666 666
 endif			#CC_SHORTVER, 3.0
667 667
 endif			#CC_SHORTVER, 3.4
668 668
 endif			#CC_SHORTVER, 4.x
669
-
669
+	
670 670
 else		#CC_NAME, gcc
671 671
 ifeq		($(CC_NAME), suncc)
672
-			CFLAGS+=-g -xO5 -fast -native -xarch=v8plusa -xCC \
672
+			DEFS+=-DSPARC64_MODE
673
+			CFLAGS+= -m64 -g -xO5 -fast -native -xarch=v9 -xCC \
673 674
 					-xc99 # C99 support
674 675
 			# -Dinline="" # add this if cc < 5.3 (define inline as null)
675 676
 else
... ...
@@ -679,10 +690,69 @@ endif		#CC_NAME, suncc
679 679
 endif		#CC_NAME, gcc
680 680
 endif	#ARCH, sparc64
681 681
 
682
+	#if sparc
683
+ifeq	($(ARCH), sparc)
684
+			#if gcc
685
+ifeq		($(CC_NAME), gcc)
686
+				DEFS+=-DCC_GCC_LIKE_ASM
687
+				#common stuff
688
+				CFLAGS=-g -O9 -funroll-loops  $(PROFILE) \
689
+					-Wall\
690
+					#-Wcast-align \
691
+					#-Wmissing-prototypes 
692
+				#if gcc 4.x
693
+ifeq			($(CC_SHORTVER), 4.x)
694
+					CPU ?= v8 
695
+					#use 32bit for now
696
+					CFLAGS+= -minline-all-stringops \
697
+							-mtune=$(CPU) \
698
+							-ftree-vectorize
699
+else
700
+				#if gcc 3.4
701
+ifeq			($(CC_SHORTVER), 3.4)
702
+					CPU ?= v8
703
+					#use 32bit for now
704
+					CFLAGS+= -mtune=$(CPU)
705
+else
706
+				#if gcc 3.0
707
+ifeq			($(CC_SHORTVER), 3.0)
708
+					CPU ?= v8 
709
+					#use 32bit for now
710
+					CFLAGS+= -mtune=$(CPU)   \
711
+					#-mno-epilogue #try to inline function exit code
712
+					#-mflat # omit save/restore
713
+					#-,faster-structs #faster non Sparc ABI structure copy ops
714
+else			# CC_SHORTVER, 3.0
715
+ifeq			($(CC_SHORTVER), 2.9x) #older gcc version (2.9[1-5])
716
+$(warning 			Old gcc detected ($(CC_SHORTVER)), use  gcc >= 3.1 \
717
+					for better results)
718
+else			#CC_SHORTVER, 2.9x
719
+				#really old version
720
+$(warning			You are using an old and unsupported gcc \
721
+					 version ($(CC_SHORTVER)), compile at your own risk!)
722
+					
723
+endif			#CC_SHORTVER, 2.9x
724
+endif			#CC_SHORTVER, 3.0
725
+endif			#CC_SHORTVER, 3.4
726
+endif			#CC_SHORTVER, 4.x
727
+	
728
+else		#CC_NAME, gcc
729
+ifeq		($(CC_NAME), suncc)
730
+			CFLAGS+= -g -xO5 -fast -native -xCC \
731
+					-xc99 # C99 support
732
+			# -Dinline="" # add this if cc < 5.3 (define inline as null)
733
+else
734
+				#other compilers
735
+$(error 			Unsupported compiler ($(CC):$(CC_NAME)), try gcc)
736
+endif		#CC_NAME, suncc
737
+endif		#CC_NAME, gcc
738
+endif	#ARCH, sparc
739
+
682 740
 	#if ipaq/netwinder
683 741
 ifeq	($(ARCH), arm)
684 742
 		# if gcc 
685 743
 ifeq		($(CC_NAME), gcc)
744
+				DEFS+=-DCC_GCC_LIKE_ASM
686 745
 				#common stuff
687 746
 				CFLAGS=-O9 -funroll-loops  -Wcast-align $(PROFILE) \
688 747
 					-Wall   
... ...
@@ -725,6 +795,7 @@ endif	#ARCH, arm
725 725
 ifeq	($(ARCH), mips)
726 726
 		# if gcc 
727 727
 ifeq		($(CC_NAME), gcc)
728
+				DEFS+=-DCC_GCC_LIKE_ASM
728 729
 				#common stuff
729 730
 				CFLAGS=-O9 -funroll-loops  $(PROFILE) \
730 731
 					-Wall 
... ...
@@ -766,6 +837,7 @@ endif	#ARCH, mips
766 766
 ifeq	($(ARCH), mips2)
767 767
 		# if gcc 
768 768
 ifeq		($(CC_NAME), gcc)
769
+				DEFS+=-DCC_GCC_LIKE_ASM
769 770
 				#common stuff
770 771
 				CFLAGS= -mips2 -O9 -funroll-loops $(PROFILE) \
771 772
 					-Wall 
... ...
@@ -806,6 +878,7 @@ endif	#ARCH, mips2
806 806
 ifeq	($(ARCH), alpha)
807 807
 		# if gcc 
808 808
 ifeq		($(CC_NAME), gcc)
809
+				DEFS+=-DCC_GCC_LIKE_ASM
809 810
 				#common stuff
810 811
 				CFLAGS= -O9 -funroll-loops $(PROFILE)  -Wall 
811 812
 			#if gcc 4.0+
... ...
@@ -844,6 +917,7 @@ endif	#ARCH, alpha
844 844
 ifeq	($(ARCH), ppc)
845 845
 		# if gcc 
846 846
 ifeq		($(CC_NAME), gcc)
847
+				DEFS+=-DCC_GCC_LIKE_ASM
847 848
 				#common stuff
848 849
 				CFLAGS= -O9 -funroll-loops $(PROFILE)  -Wall 
849 850
 			#if gcc 4.0+
... ...
@@ -884,6 +958,7 @@ endif	#ARCH, ppc
884 884
 ifeq	($(ARCH), ppc64)
885 885
 		# if gcc 
886 886
 ifeq		($(CC_NAME), gcc)
887
+				DEFS+=-DCC_GCC_LIKE_ASM
887 888
 				#common stuff
888 889
 				CFLAGS= -O9 -funroll-loops $(PROFILE)  -Wall 
889 890
 ifeq			($(CC_SHORTVER), 4.x)
... ...
@@ -950,7 +1025,9 @@ else	#mode,release
950 950
 ifeq	($(CC_NAME), gcc)
951 951
 		CFLAGS=-g -Wcast-align $(PROFILE)
952 952
 ifeq		($(ARCH), sparc64)
953
-			CFLAGS+= -mcpu=ultrasparc 
953
+			DEFS+=SPARC64_MODE
954
+			CFLAGS+= -mcpu=ultrasparc -m64
955
+			LDFLAGS+=-m64
954 956
 endif
955 957
 ifeq		($(LDTYPE), solaris)
956 958
 			#solaris ld
957 959
new file mode 100644
... ...
@@ -0,0 +1,342 @@
0
+/* 
1
+ * $Id$
2
+ * 
3
+ * Copyright (C) 2006 iptelorg GmbH
4
+ *
5
+ * This file is part of ser, a free SIP server.
6
+ *
7
+ * ser is free software; you can redistribute it and/or modify
8
+ * it under the terms of the GNU General Public License as published by
9
+ * the Free Software Foundation; either version 2 of the License, or
10
+ * (at your option) any later version
11
+ *
12
+ * For a license to use the ser software under conditions
13
+ * other than those described here, or to purchase support for this
14
+ * software, please contact iptel.org by e-mail at the following addresses:
15
+ *    info@iptel.org
16
+ *
17
+ * ser is distributed in the hope that it will be useful,
18
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
19
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20
+ * GNU General Public License for more details.
21
+ *
22
+ * You should have received a copy of the GNU General Public License
23
+ * along with this program; if not, write to the Free Software
24
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25
+ */
26
+/*
27
+ *  atomic operations and memory barriers (mips isa 2 and mips64 specific)
28
+ *  WARNING: atomic ops do not include memory barriers
29
+ *  see atomic_ops.h for more details 
30
+ *  WARNING: not tested on mips64 (not even a compile test)
31
+ *
32
+ *  Config defines:  - NOSMP (in NOSMP mode it will also work on mips isa 1
33
+ *                            cpus that support LL and SC, see MIPS_HAS_LLSC
34
+ *                            in atomic_ops.h)
35
+ *                   - __CPU_MIPS64 (mips64 arch., in 64 bit mode: long and
36
+ *                                    void* are 64 bits)
37
+ *                   - __CPU_MIPS2 or __CPU_MIPS && MIPS_HAS_LLSC && NOSMP
38
+ *                                 (if __CPU_MIPS64 is not defined)
39
+ */
40
+/* 
41
+ * History:
42
+ * --------
43
+ *  2006-03-08  created by andrei
44
+ */
45
+
46
+
47
+#ifndef _atomic_mips2_h
48
+#define _atomic_mips2_h
49
+
50
+#define HAVE_ASM_INLINE_ATOMIC_OPS
51
+#define HAVE_ASM_INLINE_MEMBAR
52
+
53
+#ifdef __CPU_mips64
54
+#warning mips64 atomic code was not tested, please report problems to \
55
+		serdev@iptel.org or andrei@iptel.org
56
+#endif
57
+
58
+#ifdef NOSMP
59
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
60
+#define membar_read()  membar()
61
+#define membar_write() membar()
62
+#else
63
+
64
+#define membar() \
65
+	asm volatile( \
66
+			".set push \n\t" \
67
+			".set noreorder \n\t" \
68
+			".set mips2 \n\t" \
69
+			"    sync\n\t" \
70
+			".set pop \n\t" \
71
+			: : : "memory" \
72
+			) 
73
+
74
+#define membar_read()  membar()
75
+#define membar_write() membar()
76
+
77
+#endif /* NOSMP */
78
+
79
+
80
+
81
+/* main asm block */
82
+#define ATOMIC_ASM_OP_int(op) \
83
+			".set push \n\t" \
84
+			".set noreorder \n\t" \
85
+			".set mips2 \n\t" \
86
+			"1:   ll %1, %0 \n\t" \
87
+			"     " op "\n\t" \
88
+			"     sc %2, %0 \n\t" \
89
+			"     beqz %2, 1b \n\t" \
90
+			"     nop \n\t" /* delay slot */ \
91
+			".set pop \n\t" 
92
+
93
+#ifdef __CPU_mips64
94
+#define ATOMIC_ASM_OP_long(op) \
95
+			".set push \n\t" \
96
+			".set noreorder \n\t" \
97
+			"1:   lld %1, %0 \n\t" \
98
+			"     " op "\n\t" \
99
+			"     scd %2, %0 \n\t" \
100
+			"     beqz %2, 1b \n\t" \
101
+			"     nop \n\t" /* delay slot */ \
102
+			".set pop \n\t" 
103
+#else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips & MIPS_HAS_LLSC */
104
+#define ATOMIC_ASM_OP_long(op) ATOMIC_ASM_OP_int(op)
105
+#endif
106
+
107
+
108
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
109
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
110
+	{ \
111
+		P_TYPE ret, tmp; \
112
+		asm volatile( \
113
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
114
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
115
+			: "m"(*var) \
116
+			 \
117
+			); \
118
+		return RET_EXPR; \
119
+	}
120
+
121
+
122
+/* same as above, but with CT in %3 */
123
+#define ATOMIC_FUNC_DECL_CT(NAME, OP, CT, P_TYPE, RET_TYPE, RET_EXPR) \
124
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
125
+	{ \
126
+		P_TYPE ret, tmp; \
127
+		asm volatile( \
128
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
129
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
130
+			: "r"((CT)), "m"(*var) \
131
+			 \
132
+			); \
133
+		return RET_EXPR; \
134
+	}
135
+
136
+
137
+/* takes an extra param, i which goes in %3 */
138
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
139
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
140
+														P_TYPE i) \
141
+	{ \
142
+		P_TYPE ret, tmp; \
143
+		asm volatile( \
144
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
145
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
146
+			: "r"((i)), "m"(*var) \
147
+			 \
148
+			); \
149
+		return RET_EXPR; \
150
+	}
151
+
152
+
153
+/* takes an extra param, like above, but i  goes in %2 */
154
+#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
155
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
156
+														P_TYPE i) \
157
+	{ \
158
+		P_TYPE ret; \
159
+		asm volatile( \
160
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
161
+			: "=m"(*var), "=&r"(ret), "+&r"(i)  \
162
+			: "m"(*var) \
163
+			 \
164
+			); \
165
+		return RET_EXPR; \
166
+	}
167
+
168
+
169
+
170
+ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", int, void, /* no return */ )
171
+ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  int, void, /* no return */ )
172
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", int, void, /* no return */ )
173
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", int, void,  /* no return */ )
174
+ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", int, int, (ret+1)==0 )
175
+ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1, int, int, (ret-1)==0 )
176
+ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, int, int, ret )
177
+
178
+#ifdef __CPU_mips64
179
+
180
+ATOMIC_FUNC_DECL(inc,      "daddiu %2, %1, 1", long, void, /* no return */ )
181
+ATOMIC_FUNC_DECL_CT(dec,   "dsubu %2, %1, %3", 1,  long, void, /* no return */ )
182
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", long, void, /* no return */ )
183
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
184
+ATOMIC_FUNC_DECL(inc_and_test, "daddiu %2, %1, 1", long, long, (ret+1)==0 )
185
+ATOMIC_FUNC_DECL_CT(dec_and_test, "dsubu %2, %1, %3", 1,long, long, (ret-1)==0 )
186
+ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
187
+
188
+#else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips */
189
+
190
+ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", long, void, /* no return */ )
191
+ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  long, void, /* no return */ )
192
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", long, void, /* no return */ )
193
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
194
+ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", long, long, (ret+1)==0 )
195
+ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1,long, long, (ret-1)==0 )
196
+ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
197
+
198
+#endif /* __CPU_mips64 */
199
+
200
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
201
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
202
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
203
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
204
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
205
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
206
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
207
+
208
+
209
+/* with integrated membar */
210
+
211
+#define mb_atomic_set_int(v, i) \
212
+	do{ \
213
+		membar(); \
214
+		atomic_set_int(v, i); \
215
+	}while(0)
216
+
217
+
218
+
219
+inline static int mb_atomic_get_int(volatile int* v)
220
+{
221
+	membar();
222
+	return atomic_get_int(v);
223
+}
224
+
225
+
226
+#define mb_atomic_inc_int(v) \
227
+	do{ \
228
+		membar(); \
229
+		atomic_inc_int(v); \
230
+	}while(0)
231
+
232
+#define mb_atomic_dec_int(v) \
233
+	do{ \
234
+		membar(); \
235
+		atomic_dec_int(v); \
236
+	}while(0)
237
+
238
+#define mb_atomic_or_int(v, m) \
239
+	do{ \
240
+		membar(); \
241
+		atomic_or_int(v, m); \
242
+	}while(0)
243
+
244
+#define mb_atomic_and_int(v, m) \
245
+	do{ \
246
+		membar(); \
247
+		atomic_and_int(v, m); \
248
+	}while(0)
249
+
250
+inline static int mb_atomic_inc_and_test_int(volatile int* v)
251
+{
252
+	membar();
253
+	return atomic_inc_and_test_int(v);
254
+}
255
+
256
+inline static int mb_atomic_dec_and_test_int(volatile int* v)
257
+{
258
+	membar();
259
+	return atomic_dec_and_test_int(v);
260
+}
261
+
262
+
263
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
264
+{
265
+	membar();
266
+	return atomic_get_and_set_int(v, i);
267
+}
268
+
269
+
270
+
271
+#define mb_atomic_set_long(v, i) \
272
+	do{ \
273
+		membar(); \
274
+		atomic_set_long(v, i); \
275
+	}while(0)
276
+
277
+
278
+
279
+inline static long mb_atomic_get_long(volatile long* v)
280
+{
281
+	membar();
282
+	return atomic_get_long(v);
283
+}
284
+
285
+
286
+#define mb_atomic_inc_long(v) \
287
+	do{ \
288
+		membar(); \
289
+		atomic_inc_long(v); \
290
+	}while(0)
291
+
292
+
293
+#define mb_atomic_dec_long(v) \
294
+	do{ \
295
+		membar(); \
296
+		atomic_dec_long(v); \
297
+	}while(0)
298
+
299
+#define mb_atomic_or_long(v, m) \
300
+	do{ \
301
+		membar(); \
302
+		atomic_or_long(v, m); \
303
+	}while(0)
304
+
305
+#define mb_atomic_and_long(v, m) \
306
+	do{ \
307
+		membar(); \
308
+		atomic_and_long(v, m); \
309
+	}while(0)
310
+
311
+inline static long mb_atomic_inc_and_test_long(volatile long* v)
312
+{
313
+	membar();
314
+	return atomic_inc_and_test_long(v);
315
+}
316
+
317
+inline static long mb_atomic_dec_and_test_long(volatile long* v)
318
+{
319
+	membar();
320
+	return atomic_dec_and_test_long(v);
321
+}
322
+
323
+
324
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
325
+{
326
+	membar();
327
+	return atomic_get_and_set_long(v, l);
328
+}
329
+
330
+
331
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
332
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
333
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
334
+#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
335
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
336
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
337
+#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
338
+#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
339
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
340
+
341
+#endif
0 342
new file mode 100644
... ...
@@ -0,0 +1,296 @@
0
+/* 
1
+ * $Id$
2
+ * 
3
+ * Copyright (C) 2006 iptelorg GmbH
4
+ *
5
+ * This file is part of ser, a free SIP server.
6
+ *
7
+ * ser is free software; you can redistribute it and/or modify
8
+ * it under the terms of the GNU General Public License as published by
9
+ * the Free Software Foundation; either version 2 of the License, or
10
+ * (at your option) any later version
11
+ *
12
+ * For a license to use the ser software under conditions
13
+ * other than those described here, or to purchase support for this
14
+ * software, please contact iptel.org by e-mail at the following addresses:
15
+ *    info@iptel.org
16
+ *
17
+ * ser is distributed in the hope that it will be useful,
18
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
19
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20
+ * GNU General Public License for more details.
21
+ *
22
+ * You should have received a copy of the GNU General Public License
23
+ * along with this program; if not, write to the Free Software
24
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25
+ */
26
+/*
27
+ *  atomic operations and memory barriers (powerpc and powerpc64 versions)
28
+ *  WARNING: atomic ops do not include memory barriers
29
+ *  see atomic_ops.h for more details 
30
+ *  WARNING: not tested on ppc64
31
+ *
32
+ *  Config defines:  - NOSMP
33
+ *                   - __CPU_ppc64  (powerpc64 w/ 64 bits long and void*)
34
+ *                   - __CPU_ppc    (powerpc or powerpc64 32bit mode)
35
+ */
36
+/* 
37
+ * History:
38
+ * --------
39
+ *  2006-03-24  created by andrei
40
+ */
41
+
42
+#ifndef _atomic_ppc_h
43
+#define _atomic_ppc_h
44
+
45
+#define HAVE_ASM_INLINE_ATOMIC_OPS
46
+#define HAVE_ASM_INLINE_MEMBAR
47
+
48
+#ifdef __CPU_ppc64
49
+#warning powerpc64 atomic code was not tested, please report problems to \
50
+		serdev@iptel.org or andrei@iptel.org
51
+#endif
52
+
53
+
54
+#ifdef NOSMP
55
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
56
+#define membar_read()  membar()
57
+#define membar_write() membar()
58
+#else
59
+#define membar() asm volatile ("sync \n\t" : : : "memory") 
60
+/* lwsync orders LoadLoad, LoadStore and StoreStore */
61
+#define membar_read() asm volatile ("lwsync \n\t" : : : "memory") 
62
+/* on "normal" cached mem. eieio orders StoreStore */
63
+#define membar_write() asm volatile ("eieio \n\t" : : : "memory") 
64
+#endif /* NOSMP */
65
+
66
+
67
+#define ATOMIC_ASM_OP0_int(op) \
68
+	"1: lwarx  %0, 0, %2 \n\t" \
69
+	"   " op " \n\t" \
70
+	"   stwcx. %0, 0, %2 \n\t" \
71
+	"   bne- 1b \n\t"
72
+
73
+#define ATOMIC_ASM_OP3_int(op) \
74
+	"1: lwarx  %0, 0, %2 \n\t" \
75
+	"   " op " \n\t" \
76
+	"   stwcx. %3, 0, %2 \n\t" \
77
+	"   bne- 1b \n\t"
78
+
79
+#ifdef __CPU_ppc64
80
+#define ATOMIC_ASM_OP0_long(op) \
81
+	"1: ldarx  %0, 0, %2 \n\t" \
82
+	"   " op " \n\t" \
83
+	"   stdcx. %0, 0, %2 \n\t" \
84
+	"   bne- 1b \n\t"
85
+
86
+#define ATOMIC_ASM_OP3_long(op) \
87
+	"1: ldarx  %0, 0, %2 \n\t" \
88
+	"   " op " \n\t" \
89
+	"   stdcx. %3, 0, %2 \n\t" \
90
+	"   bne- 1b \n\t"
91
+
92
+#else /* __CPU_ppc */
93
+#define ATOMIC_ASM_OP0_long ATOMIC_ASM_OP0_int
94
+#define ATOMIC_ASM_OP3_long ATOMIC_ASM_OP3_int
95
+#endif
96
+
97
+
98
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
99
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
100
+	{ \
101
+		P_TYPE ret; \
102
+		asm volatile( \
103
+			ATOMIC_ASM_OP0_##P_TYPE(OP) \
104
+			: "=&r"(ret), "=m"(*var) : "r"(var) : "cc" \
105
+			); \
106
+		return RET_EXPR; \
107
+	}
108
+
109
+/* same as above, but takes an extra param, v, which goes in %3 */
110
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
111
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
112
+															P_TYPE v) \
113
+	{ \
114
+		P_TYPE ret; \
115
+		asm volatile( \
116
+			ATOMIC_ASM_OP0_##P_TYPE(OP) \
117
+			: "=&r"(ret), "=m"(*var) : "r"(var), "r"(v)  : "cc" \
118
+			); \
119
+		return RET_EXPR; \
120
+	}
121
+
122
+/* same as above, but uses ATOMIC_ASM_OP3, v in %3 and %3 not changed */
123
+#define ATOMIC_FUNC_DECL3(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
124
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
125
+															P_TYPE v) \
126
+	{ \
127
+		P_TYPE ret; \
128
+		asm volatile( \
129
+			ATOMIC_ASM_OP3_##P_TYPE(OP) \
130
+			: "=&r"(ret), "=m"(*var) : "r"(var), "r"(v)  : "cc" \
131
+			); \
132
+		return RET_EXPR; \
133
+	}
134
+
135
+
136
+
137
+ATOMIC_FUNC_DECL(inc,      "addic  %0, %0,  1", int, void, /* no return */ )
138
+ATOMIC_FUNC_DECL(dec,      "addic %0, %0,  -1", int, void, /* no return */ )
139
+ATOMIC_FUNC_DECL1(and,     "and     %0, %0, %3", int, void, /* no return */ )
140
+ATOMIC_FUNC_DECL1(or,      "or     %0, %0, %3", int, void, /* no return */ )
141
+ATOMIC_FUNC_DECL(inc_and_test, "addic   %0, %0, 1", int, int, (ret==0) )
142
+ATOMIC_FUNC_DECL(dec_and_test, "addic  %0, %0, -1", int, int, (ret==0) )
143
+ATOMIC_FUNC_DECL3(get_and_set, /* no extra op needed */ , int, int,  ret)
144
+
145
+ATOMIC_FUNC_DECL(inc,      "addic  %0, %0,  1", long, void, /* no return */ )
146
+ATOMIC_FUNC_DECL(dec,      "addic %0, %0,  -1", long, void, /* no return */ )
147
+ATOMIC_FUNC_DECL1(and,     "and     %0, %0, %3",long, void, /* no return */ )
148
+ATOMIC_FUNC_DECL1(or,      "or     %0, %0, %3", long, void, /* no return */ )
149
+ATOMIC_FUNC_DECL(inc_and_test, "addic   %0, %0, 1", long, long, (ret==0) )
150
+ATOMIC_FUNC_DECL(dec_and_test, "addic  %0, %0, -1", long, long, (ret==0) )
151
+ATOMIC_FUNC_DECL3(get_and_set, /* no extra op needed */ , long, long,  ret)
152
+
153
+
154
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
155
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
156
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
157
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
158
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
159
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
160
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
161
+
162
+
163
+/* with integrated membar */
164
+
165
+#define mb_atomic_set_int(v, i) \
166
+	do{ \
167
+		membar(); \
168
+		atomic_set_int(v, i); \
169
+	}while(0)
170
+
171
+
172
+
173
+inline static int mb_atomic_get_int(volatile int* v)
174
+{
175
+	membar();
176
+	return atomic_get_int(v);
177
+}
178
+
179
+
180
+#define mb_atomic_inc_int(v) \
181
+	do{ \
182
+		membar(); \
183
+		atomic_inc_int(v); \
184
+	}while(0)
185
+
186
+#define mb_atomic_dec_int(v) \
187
+	do{ \
188
+		membar(); \
189
+		atomic_dec_int(v); \
190
+	}while(0)
191
+
192
+#define mb_atomic_or_int(v, m) \
193
+	do{ \
194
+		membar(); \
195
+		atomic_or_int(v, m); \
196
+	}while(0)
197
+
198
+#define mb_atomic_and_int(v, m) \
199
+	do{ \
200
+		membar(); \
201
+		atomic_and_int(v, m); \
202
+	}while(0)
203
+
204
+inline static int mb_atomic_inc_and_test_int(volatile int* v)
205
+{
206
+	membar();
207
+	return atomic_inc_and_test_int(v);
208
+}
209
+
210
+inline static int mb_atomic_dec_and_test_int(volatile int* v)
211
+{
212
+	membar();
213
+	return atomic_dec_and_test_int(v);
214
+}
215
+
216
+
217
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
218
+{
219
+	membar();
220
+	return atomic_get_and_set_int(v, i);
221
+}
222
+
223
+
224
+
225
+#define mb_atomic_set_long(v, i) \
226
+	do{ \
227
+		membar(); \
228
+		atomic_set_long(v, i); \
229
+	}while(0)
230
+
231
+
232
+
233
+inline static long mb_atomic_get_long(volatile long* v)
234
+{
235
+	membar();
236
+	return atomic_get_long(v);
237
+}
238
+
239
+
240
+#define mb_atomic_inc_long(v) \
241
+	do{ \
242
+		membar(); \
243
+		atomic_inc_long(v); \
244
+	}while(0)
245
+
246
+
247
+#define mb_atomic_dec_long(v) \
248
+	do{ \
249
+		membar(); \
250
+		atomic_dec_long(v); \
251
+	}while(0)
252
+
253
+#define mb_atomic_or_long(v, m) \
254
+	do{ \
255
+		membar(); \
256
+		atomic_or_long(v, m); \
257
+	}while(0)
258
+
259
+#define mb_atomic_and_long(v, m) \
260
+	do{ \
261
+		membar(); \
262
+		atomic_and_long(v, m); \
263
+	}while(0)
264
+
265
+inline static long mb_atomic_inc_and_test_long(volatile long* v)
266
+{
267
+	membar();
268
+	return atomic_inc_and_test_long(v);
269
+}
270
+
271
+inline static long mb_atomic_dec_and_test_long(volatile long* v)
272
+{
273
+	membar();
274
+	return atomic_dec_and_test_long(v);
275
+}
276
+
277
+
278
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
279
+{
280
+	membar();
281
+	return atomic_get_and_set_long(v, l);
282
+}
283
+
284
+
285
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
286
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
287
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
288
+#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
289
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
290
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
291
+#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
292
+#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
293
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
294
+
295
+#endif
0 296
new file mode 100644
... ...
@@ -0,0 +1,60 @@
0
+/* 
1
+ * $Id$
2
+ * 
3
+ * Copyright (C) 2006 iptelorg GmbH
4
+ *
5
+ * This file is part of ser, a free SIP server.
6
+ *
7
+ * ser is free software; you can redistribute it and/or modify
8
+ * it under the terms of the GNU General Public License as published by
9
+ * the Free Software Foundation; either version 2 of the License, or
10
+ * (at your option) any later version
11
+ *
12
+ * For a license to use the ser software under conditions
13
+ * other than those described here, or to purchase support for this
14
+ * software, please contact iptel.org by e-mail at the following addresses:
15
+ *    info@iptel.org
16
+ *
17
+ * ser is distributed in the hope that it will be useful,
18
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
19
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20
+ * GNU General Public License for more details.
21
+ *
22
+ * You should have received a copy of the GNU General Public License
23
+ * along with this program; if not, write to the Free Software
24
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25
+ */
26
+/*
27
+ *  memory barriers for sparc32 ( version < v 9))
28
+ *  see atomic_ops.h for more details 
29
+ *
30
+ * Config defines: NOSMP
31
+ */
32
+/* 
33
+ * History:
34
+ * --------
35
+ *  2006-03-28  created by andrei
36
+ */
37
+
38
+
39
+#ifndef _atomic_sparc_h
40
+#define _atomic_sparc_h
41
+
42
+#define HAVE_ASM_INLINE_MEMBAR
43
+
44
+
45
+#warning "sparc32 atomic operations support not tested"
46
+
47
+#ifdef NOSMP
48
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
49
+#define membar_read()  membar()
50
+#define membar_write() membar()
51
+#else /* SMP */
52
+#define membar_write() asm volatile ("stbar \n\t" : : : "memory") 
53
+#define membar() membar_write()
54
+#define membar_read() asm volatile ("" : : : "memory") 
55
+#endif /* NOSMP */
56
+
57
+
58
+
59
+#endif
0 60
new file mode 100644
... ...
@@ -0,0 +1,286 @@
0
+/* 
1
+ * $Id$
2
+ * 
3
+ * Copyright (C) 2006 iptelorg GmbH
4
+ *
5
+ * This file is part of ser, a free SIP server.
6
+ *
7
+ * ser is free software; you can redistribute it and/or modify
8
+ * it under the terms of the GNU General Public License as published by
9
+ * the Free Software Foundation; either version 2 of the License, or
10
+ * (at your option) any later version
11
+ *
12
+ * For a license to use the ser software under conditions
13
+ * other than those described here, or to purchase support for this
14
+ * software, please contact iptel.org by e-mail at the following addresses:
15
+ *    info@iptel.org
16
+ *
17
+ * ser is distributed in the hope that it will be useful,
18
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
19
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20
+ * GNU General Public License for more details.
21
+ *
22
+ * You should have received a copy of the GNU General Public License
23
+ * along with this program; if not, write to the Free Software
24
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25
+ */
26
+/*
27
+ *  atomic operations and memory barriers (sparc64 version, 32 and 64 bit modes)
28
+ *  WARNING: atomic ops do not include memory barriers
29
+ *  see atomic_ops.h for more details 
30
+ *
31
+ *  Config defs: - SPARC64_MODE (if defined long is assumed to be 64 bits
32
+ *                               else long & void* are assumed to be 32 for
33
+ *                               sparc32plus code)
34
+ *               - NOSMP
35
+ */
36
+/* 
37
+ * History:
38
+ * --------
39
+ *  2006-03-28  created by andrei
40
+ */
41
+
42
+
43
+#ifndef _atomic_sparc64_h
44
+#define _atomic_sparc64_h
45
+
46
+#define HAVE_ASM_INLINE_ATOMIC_OPS
47
+#define HAVE_ASM_INLINE_MEMBAR
48
+
49
+
50
+
51
+/* try to guess if in SPARC64_MODE */
52
+#if ! defined SPARC64_MODE && \
53
+	(defined __LP64__ || defined _LP64 || defined __arch64__)
54
+#define SPARC64_MODE
55
+#endif
56
+
57
+
58
+#ifdef NOSMP
59
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
60
+#define membar_read()  membar()
61
+#define membar_write() membar()
62
+#else /* SMP */
63
+#define membar() \
64
+	asm volatile ( \
65
+			"membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad \n\t" \
66
+			: : : "memory")
67
+
68
+#define membar_read() asm volatile ("membar #LoadLoad \n\t" : : : "memory")
69
+#define membar_write() asm volatile ("membar #StoreStore \n\t" : : : "memory")
70
+#endif /* NOSMP */
71
+
72
+
73
+
74
+/* 32 bit version, op should store the result in %1, and use %0 as input,
75
+ *  both %0 and %1 are modified */
76
+#define ATOMIC_ASM_OP_int(op)\
77
+	"   ldsw [%3], %0 \n\t"  /* signed or lduw? */ \
78
+	"1: " op " \n\t" \
79
+	"   cas  [%3], %0, %1 \n\t" \
80
+	"   cmp %0, %1 \n\t" \
81
+	"   bne,a,pn  %%icc, 1b \n\t"  /* predict not taken, annul */ \
82
+	"   mov %1, %0\n\t"  /* delay slot */
83
+
84
+#ifdef SPARC64_MODE
85
+/* 64 bit version, same as above */
86
+#define ATOMIC_ASM_OP_long(op)\
87
+	"   ldx [%3], %0 \n\t" \
88
+	"1: " op " \n\t" \
89
+	"   casx  [%3], %0, %1 \n\t" \
90
+	"   cmp %0, %1 \n\t" \
91
+	"   bne,a,pn  %%xcc, 1b \n\t"  /* predict not taken, annul */ \
92
+	"   mov %1, %0\n\t"  /* delay slot */
93
+	
94
+#else /* no SPARC64_MODE => 32bit mode on a sparc64*/
95
+#define ATOMIC_ASM_OP_long(op) ATOMIC_ASM_OP_int(op)
96
+#endif
97
+
98
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
99
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
100
+	{ \
101
+		P_TYPE ret, tmp; \
102
+		asm volatile( \
103
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
104
+			: "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var) : "cc" \
105
+			); \
106
+		return RET_EXPR; \
107
+	}
108
+
109
+
110
+/* same as above, but takes an extra param, v, which goes in %4 */
111
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
112
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
113
+															P_TYPE v) \
114
+	{ \
115
+		P_TYPE ret, tmp; \
116
+		asm volatile( \
117
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
118
+			: "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var), "r"(v) : "cc" \
119
+			); \
120
+		return RET_EXPR; \
121
+	}
122
+
123
+
124
+
125
+
126
+ATOMIC_FUNC_DECL(inc,      "add  %0,  1, %1", int, void, /* no return */ )
127
+ATOMIC_FUNC_DECL(dec,      "sub  %0,  1, %1", int, void, /* no return */ )
128
+ATOMIC_FUNC_DECL1(and,     "and  %0, %4, %1", int, void, /* no return */ )
129
+ATOMIC_FUNC_DECL1(or,      "or   %0, %4, %1", int, void, /* no return */ )
130
+ATOMIC_FUNC_DECL(inc_and_test, "add   %0, 1, %1", int, int, ((ret+1)==0) )
131
+ATOMIC_FUNC_DECL(dec_and_test, "sub   %0, 1, %1", int, int, ((ret-1)==0) )
132
+ATOMIC_FUNC_DECL1(get_and_set, "mov %4, %1" , int, int,  ret)
133
+
134
+
135
+ATOMIC_FUNC_DECL(inc,      "add  %0,  1, %1", long, void, /* no return */ )
136
+ATOMIC_FUNC_DECL(dec,      "sub  %0,  1, %1", long, void, /* no return */ )
137
+ATOMIC_FUNC_DECL1(and,     "and  %0, %4, %1", long, void, /* no return */ )
138
+ATOMIC_FUNC_DECL1(or,      "or   %0, %4, %1", long, void, /* no return */ )
139
+ATOMIC_FUNC_DECL(inc_and_test, "add   %0, 1, %1", long, long, ((ret+1)==0) )
140
+ATOMIC_FUNC_DECL(dec_and_test, "sub   %0, 1, %1", long, long, ((ret-1)==0) )
141
+ATOMIC_FUNC_DECL1(get_and_set, "mov %4, %1" , long, long,  ret)
142
+
143
+
144
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
145
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
146
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
147
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
148
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
149
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
150
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
151
+
152
+
153
+/* with integrated membar */
154
+
155
+#define mb_atomic_set_int(v, i) \
156
+	do{ \
157
+		membar(); \
158
+		atomic_set_int(v, i); \
159
+	}while(0)
160
+
161
+
162
+
163
+inline static int mb_atomic_get_int(volatile int* v)
164
+{
165
+	membar();
166
+	return atomic_get_int(v);
167
+}
168
+
169
+
170
+#define mb_atomic_inc_int(v) \
171
+	do{ \
172
+		membar(); \
173
+		atomic_inc_int(v); \
174
+	}while(0)
175
+
176
+#define mb_atomic_dec_int(v) \
177
+	do{ \
178
+		membar(); \
179
+		atomic_dec_int(v); \
180
+	}while(0)
181
+
182
+#define mb_atomic_or_int(v, m) \
183
+	do{ \
184
+		membar(); \
185
+		atomic_or_int(v, m); \
186
+	}while(0)
187
+
188
+#define mb_atomic_and_int(v, m) \
189
+	do{ \
190
+		membar(); \
191
+		atomic_and_int(v, m); \
192
+	}while(0)
193
+
194
+inline static int mb_atomic_inc_and_test_int(volatile int* v)
195
+{
196
+	membar();
197
+	return atomic_inc_and_test_int(v);
198
+}
199
+
200
+inline static int mb_atomic_dec_and_test_int(volatile int* v)
201
+{
202
+	membar();
203
+	return atomic_dec_and_test_int(v);
204
+}
205
+
206
+
207
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
208
+{
209
+	membar();
210
+	return atomic_get_and_set_int(v, i);
211
+}
212
+
213
+
214
+
215
+#define mb_atomic_set_long(v, i) \
216
+	do{ \
217
+		membar(); \
218
+		atomic_set_long(v, i); \
219
+	}while(0)
220
+
221
+
222
+
223
+inline static long mb_atomic_get_long(volatile long* v)
224
+{
225
+	membar();
226
+	return atomic_get_long(v);
227
+}
228
+
229
+
230
+#define mb_atomic_inc_long(v) \
231
+	do{ \
232
+		membar(); \
233
+		atomic_inc_long(v); \
234
+	}while(0)
235
+
236
+
237
+#define mb_atomic_dec_long(v) \
238
+	do{ \
239
+		membar(); \
240
+		atomic_dec_long(v); \
241
+	}while(0)
242
+
243
+#define mb_atomic_or_long(v, m) \
244
+	do{ \
245
+		membar(); \
246
+		atomic_or_long(v, m); \
247
+	}while(0)
248
+
249
+#define mb_atomic_and_long(v, m) \
250
+	do{ \
251
+		membar(); \
252
+		atomic_and_long(v, m); \
253
+	}while(0)
254
+
255
+inline static long mb_atomic_inc_and_test_long(volatile long* v)
256
+{
257
+	membar();
258
+	return atomic_inc_and_test_long(v);
259
+}
260
+
261
+inline static long mb_atomic_dec_and_test_long(volatile long* v)
262
+{
263
+	membar();
264
+	return atomic_dec_and_test_long(v);
265
+}
266
+
267
+
268
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
269
+{
270
+	membar();
271
+	return atomic_get_and_set_long(v, l);
272
+}
273
+
274
+
275
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
276
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
277
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
278
+#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
279
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
280
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
281
+#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
282
+#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
283
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
284
+
285
+#endif
0 286
new file mode 100644
... ...
@@ -0,0 +1,233 @@
0
+/* 
1
+ * $Id$
2
+ * 
3
+ * Copyright (C) 2006 iptelorg GmbH
4
+ *
5
+ * This file is part of ser, a free SIP server.
6
+ *
7
+ * ser is free software; you can redistribute it and/or modify
8
+ * it under the terms of the GNU General Public License as published by
9
+ * the Free Software Foundation; either version 2 of the License, or
10
+ * (at your option) any later version
11
+ *
12
+ * For a license to use the ser software under conditions
13
+ * other than those described here, or to purchase support for this
14
+ * software, please contact iptel.org by e-mail at the following addresses:
15
+ *    info@iptel.org
16
+ *
17
+ * ser is distributed in the hope that it will be useful,
18
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
19
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20
+ * GNU General Public License for more details.
21
+ *
22
+ * You should have received a copy of the GNU General Public License
23
+ * along with this program; if not, write to the Free Software
24
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25
+ */
26
+/*
27
+ *  atomic operations and memory barriers implemented using locks
28
+ *  (for architectures not yet supported via inline asm)
29
+ *
30
+ *  WARNING: atomic ops do not include memory barriers
31
+ *  see atomic_ops.h for more details 
32
+ *
33
+ *  Config defs: - NOSMP (membars are null in this case)
34
+ *               - HAVE_ASM_INLINE_MEMBAR (membars arleady defined =>
35
+ *                                          use them)
36
+ *               - HAVE_ASM_INLINE_ATOMIC_OPS (atomic ops already defined
37
+ *                                               => don't redefine them)
38
+ *
39
+ */
40
+/* 
41
+ * History:
42
+ * --------
43
+ *  2006-03-08  created by andrei
44
+ */
45
+
46
+#ifndef _atomic_unknown_h
47
+#define _atomic_unknown_h
48
+
49
+#include "../lock_ops.h"
50
+
51
+extern gen_lock_t* _atomic_lock; /* declared and init in ../atomic.c */
52
+
53
+#define atomic_lock    lock_get(_atomic_lock)
54
+#define atomic_unlock  lock_release(_atomic_lock)
55
+
56
+
57
+#ifndef HAVE_ASM_INLINE_MEMBAR
58
+
59
+#define ATOMIC_OPS_USE_LOCK
60
+
61
+
62
+#ifdef NOSMP
63
+#define membar()
64
+#else /* SMP */
65
+
66
+#warning no native memory barrier implementations, falling back to slow lock \
67
+	       based workarround
68
+
69
+/* memory barriers 
70
+ *  not a known cpu -> fall back lock/unlock: safe but costly  (it should 
71
+ *  include a memory barrier effect) */
72
+#define membar() \
73
+	do{\
74
+		atomic_lock; \
75
+		atomic_unlock; \
76
+	} while(0)
77
+#endif /* NOSMP */
78
+
79
+
80
+#define membar_write() membar()
81
+
82
+#define membar_read()  membar()
83
+
84
+#endif /* HAVE_ASM_INLINE_MEMBAR */
85
+
86
+
87
+#ifndef HAVE_ASM_INLINE_ATOMIC_OPS
88
+
89
+#ifndef ATOMIC_OPS_USE_LOCK
90
+#define ATOMIC_OPS_USE_LOCK
91
+#endif
92
+
93
+/* atomic ops */
94
+
95
+
96
+/* OP can include var (function param), no other var. is declared */
97
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
98
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
99
+	{ \
100
+		atomic_lock; \
101
+		OP ; \
102
+		atomic_unlock; \
103
+		return RET_EXPR; \
104
+	}
105
+
106
+
107
+/* like above, but takes an extra param: v =>
108
+ *  OP can use var and v (function params) */
109
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
110
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
111
+														P_TYPE v) \
112
+	{ \
113
+		atomic_lock; \
114
+		OP ; \
115
+		atomic_unlock; \
116
+		return RET_EXPR; \
117
+	}
118
+
119
+/* OP can include var (function param), and ret (return)
120
+ *  ( like ATOMIC_FUNC_DECL, but includes ret) */
121
+#define ATOMIC_FUNC_DECL_RET(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
122
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
123
+	{ \
124
+		P_TYPE ret; \
125
+		atomic_lock; \
126
+		OP ; \
127
+		atomic_unlock; \
128
+		return RET_EXPR; \
129
+	}
130
+
131
+/* like ATOMIC_FUNC_DECL1, but declares an extra variable: P_TYPE ret */
132
+#define ATOMIC_FUNC_DECL1_RET(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
133
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
134
+														P_TYPE v) \
135
+	{ \
136
+		P_TYPE ret; \
137
+		atomic_lock; \
138
+		OP ; \
139
+		atomic_unlock; \
140
+		return RET_EXPR; \
141
+	}
142
+
143
+ATOMIC_FUNC_DECL(inc,      (*var)++, int, void, /* no return */ )
144
+ATOMIC_FUNC_DECL(dec,      (*var)--, int, void, /* no return */ )
145
+ATOMIC_FUNC_DECL1(and,     *var&=v, int, void, /* no return */ )
146
+ATOMIC_FUNC_DECL1(or,      *var|=v, int, void, /* no return */ )
147
+ATOMIC_FUNC_DECL_RET(inc_and_test, ret=++(*var), int, int, (ret==0) )
148
+ATOMIC_FUNC_DECL_RET(dec_and_test, ret=--(*var), int, int, (ret==0) )
149
+ATOMIC_FUNC_DECL1_RET(get_and_set, ret=*var;*var=v , int, int,  ret)
150
+
151
+ATOMIC_FUNC_DECL(inc,      (*var)++, long, void, /* no return */ )
152
+ATOMIC_FUNC_DECL(dec,      (*var)--, long, void, /* no return */ )
153
+ATOMIC_FUNC_DECL1(and,     *var&=v, long, void, /* no return */ )
154
+ATOMIC_FUNC_DECL1(or,      *var|=v, long, void, /* no return */ )
155
+ATOMIC_FUNC_DECL_RET(inc_and_test, ret=++(*var), long, long, (ret==0) )
156
+ATOMIC_FUNC_DECL_RET(dec_and_test, ret=--(*var), long, long, (ret==0) )
157
+ATOMIC_FUNC_DECL1_RET(get_and_set, ret=*var;*var=v , long, long,  ret)
158
+
159
+
160
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
161
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
162
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
163
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
164
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
165
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
166
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
167
+
168
+
169
+/* memory barrier versions, the same as "normal" versions (since the
170
+ *  locks act as membars), *  except fot * the set/get 
171
+ */
172
+
173
+/* mb_atomic_{set,get} use membar() : if we're lucky we have membars
174
+ * for the arch. (e.g. sparc32) => membar() might be cheaper then lock/unlock */
175
+#define mb_atomic_set_int(v, i) \
176
+	do{ \
177
+		membar(); \
178
+		atomic_set_int(v, i); \
179
+	}while(0)
180
+
181
+inline static int  mb_atomic_get_int(volatile int* v)
182
+{
183
+		membar();
184
+		return atomic_get_int(v);
185
+}
186
+
187
+
188
+#define mb_atomic_set_long(v, i) \