Browse code

- makefile: - compile in 64bit mode by default on sparc64 - sparc <= v8 support - -CC_GCC_LIKE_ASM is defined when the compiler supports gcc style inline asm (gcc and icc)

- atomic operations and memory barriers support for:
- x86
- x86_64
- mips (only in NOSMP mode and if it supports ll and sc)
- mips2 (mips32, isa >= 2)
- mips64
- powerpc
- powerpc64
- sparc <= v8 (only memory barriers, the atomic operations are implemented
using locks because there is no hardware support for them)
- sparc64 - both 32 (v8plus) and 64 bit mode
If there is no support for the compiler/arch. combination, it falls back to
locks.

The code is tested (only basic tests: it runs and the results are ok, but no
parallel tests) on x86, x86_64, mips2, powerpc, sparc64 (both modes).
The sparc version runs ok on sparc64 (so it's most likely ok).
powerpc64 and mips64 not tested due to no access to the corresponding
hardware, but they do compile ok.
For more details see the comments at the beginning of atomic_ops.h.

Andrei Pelinescu-Onciul authored on 30/03/2006 19:56:06
Showing 13 changed files
... ...
@@ -45,6 +45,9 @@
45 45
 #  2005-07-25  better solaris arch detection (andrei)
46 46
 #  2005-09-12  -mallign-double removed (too many problems) (andrei)
47 47
 #  2005-10-02  distcc get gcc version hack (andrei)
48
+#  2006-03-30  64 bit mode compile by default on sparc64 (-m64), added
49
+#              CC_GCC_LIKE_ASM and SPARC64_MODE (andrei)
50
+#              sparc <= v8 support (andrei)
48 51
 
49 52
 
50 53
 # check if already included/exported
... ...
@@ -445,7 +448,7 @@ endif
445 448
 
446 449
 ifeq ($(ARCH), sparc)
447 450
 	# smp no supported on sparc32
448
-	DEFS+= -DNOSMP 
451
+	DEFS+= -DNOSMP # FIXME
449 452
 	use_fast_lock=yes
450 453
 endif
451 454
 
... ...
@@ -489,6 +492,7 @@ ifeq ($(mode), release)
489 492
 ifeq	($(ARCH), i386)
490 493
 		# if gcc 
491 494
 ifeq		($(CC_NAME), gcc)
495
+				DEFS+=-DCC_GCC_LIKE_ASM
492 496
 				#common stuff
493 497
 				CFLAGS=-g -O9 -funroll-loops  -Wcast-align $(PROFILE) \
494 498
 					-Wall  
... ...
@@ -532,6 +536,7 @@ endif			# CC_SHORTVER, 4.x
532 536
 
533 537
 else		# CC_NAME, gcc
534 538
 ifeq		($(CC_NAME), icc)
539
+			DEFS+=-DCC_GCC_LIKE_ASM
535 540
 			CFLAGS=-g -O3  -ipo -ipo_obj -unroll  $(PROFILE) \
536 541
 					 -tpp6 -xK  #-openmp  #optimize for PIII 
537 542
 				# -prefetch doesn't seem to work
... ...
@@ -549,6 +554,7 @@ endif	#ARCH, i386
549 554
 ifeq	($(ARCH), x86_64)
550 555
 		# if gcc 
551 556
 ifeq		($(CC_NAME), gcc)
557
+				DEFS+=-DCC_GCC_LIKE_ASM
552 558
 				#common stuff
553 559
 				CFLAGS=-g -O9 -funroll-loops  -Wcast-align $(PROFILE) \
554 560
 					-Wall 
... ...
@@ -592,6 +598,7 @@ endif			# CC_SHORTVER, 4.x
592 598
 
593 599
 else		# CC_NAME, gcc
594 600
 ifeq		($(CC_NAME), icc)
601
+			DEFS+=-DCC_GCC_LIKE_ASM
595 602
 			CFLAGS=-g -O3  -ipo -ipo_obj -unroll  $(PROFILE) \
596 603
 					 -tpp6 -xK  #-openmp  #optimize for PIII 
597 604
 				# -prefetch doesn't seem to work
... ...
@@ -605,15 +612,21 @@ endif		#CC_NAME, icc
605 612
 endif		#CC_NAME, gcc
606 613
 endif	#ARCH, x86_64
607 614
 
608
-	#if sparc
615
+	#if sparc64
609 616
 ifeq	($(ARCH), sparc64)
610 617
 			#if gcc
611 618
 ifeq		($(CC_NAME), gcc)
619
+				DEFS+=-DCC_GCC_LIKE_ASM -DSPARC64_MODE
612 620
 				#common stuff
613
-				CFLAGS=-g -O9 -funroll-loops  $(PROFILE) \
621
+				CFLAGS=-m64 -g -O9 -funroll-loops  $(PROFILE) \
614 622
 					-Wall\
615 623
 					#-Wcast-align \
616 624
 					#-Wmissing-prototypes 
625
+				# use -m64 to force 64 bit (but add it also to LDFLAGS and
626
+				#  don't forget to define SPARC64_MODE)
627
+				# -m32 for 32 bit (default on solaris),
628
+				# nothing for arch. default
629
+				LDFLAGS+=-m64
617 630
 				#if gcc 4.x
618 631
 ifeq			($(CC_SHORTVER), 4.x)
619 632
 					CPU ?= ultrasparc
... ...
@@ -633,9 +646,6 @@ ifeq			($(CC_SHORTVER), 3.0)
633 646
 					CPU ?= ultrasparc
634 647
 					#use 32bit for now
635 648
 					CFLAGS+= -mcpu=ultrasparc -mtune=$(CPU)   \
636
-					# use -m64 to force 64 bit (but add it also to LDFLAGS), 
637
-					# -m32 for 32 bit (default on solaris),
638
-					# nothing for arch. default
639 649
 					# -mcpu=v9 or ultrasparc? # -mtune implied by -mcpu
640 650
 					#-mno-epilogue #try to inline function exit code
641 651
 					#-mflat # omit save/restore
... ...
@@ -647,7 +657,7 @@ $(warning 			Old gcc detected ($(CC_SHORTVER)), use  gcc >= 3.1 \
647 657
 ifneq				($(OS), netbsd)
648 658
 						# on netbsd/sparc64,  gcc 2.95.3 does not compile
649 659
 						# ser with -mv8
650
-						CFLAGS+= -mv8 
660
+						CFLAGS+= -mv9 
651 661
 endif
652 662
 ifeq					($(ASTYPE), solaris)
653 663
 							CFLAGS+= -Wa,-xarch=v8plus
... ...
@@ -657,7 +667,7 @@ else			#CC_SHORTVER, 2.9x
657 667
 $(warning			You are using an old and unsupported gcc \
658 668
 					 version ($(CC_SHORTVER)), compile at your own risk!)
659 669
 					
660
-					CFLAGS+= -mv8 
670
+					CFLAGS+= -mv9 
661 671
 ifeq					($(ASTYPE), solaris)
662 672
 							CFLAGS+= -Wa,-xarch=v8plus
663 673
 endif					
... ...
@@ -666,10 +676,11 @@ endif			#CC_SHORTVER, 2.9x
666 676
 endif			#CC_SHORTVER, 3.0
667 677
 endif			#CC_SHORTVER, 3.4
668 678
 endif			#CC_SHORTVER, 4.x
669
-
679
+	
670 680
 else		#CC_NAME, gcc
671 681
 ifeq		($(CC_NAME), suncc)
672
-			CFLAGS+=-g -xO5 -fast -native -xarch=v8plusa -xCC \
682
+			DEFS+=-DSPARC64_MODE
683
+			CFLAGS+= -m64 -g -xO5 -fast -native -xarch=v9 -xCC \
673 684
 					-xc99 # C99 support
674 685
 			# -Dinline="" # add this if cc < 5.3 (define inline as null)
675 686
 else
... ...
@@ -679,10 +690,69 @@ endif		#CC_NAME, suncc
679 690
 endif		#CC_NAME, gcc
680 691
 endif	#ARCH, sparc64
681 692
 
693
+	#if sparc
694
+ifeq	($(ARCH), sparc)
695
+			#if gcc
696
+ifeq		($(CC_NAME), gcc)
697
+				DEFS+=-DCC_GCC_LIKE_ASM
698
+				#common stuff
699
+				CFLAGS=-g -O9 -funroll-loops  $(PROFILE) \
700
+					-Wall\
701
+					#-Wcast-align \
702
+					#-Wmissing-prototypes 
703
+				#if gcc 4.x
704
+ifeq			($(CC_SHORTVER), 4.x)
705
+					CPU ?= v8 
706
+					#use 32bit for now
707
+					CFLAGS+= -minline-all-stringops \
708
+							-mtune=$(CPU) \
709
+							-ftree-vectorize
710
+else
711
+				#if gcc 3.4
712
+ifeq			($(CC_SHORTVER), 3.4)
713
+					CPU ?= v8
714
+					#use 32bit for now
715
+					CFLAGS+= -mtune=$(CPU)
716
+else
717
+				#if gcc 3.0
718
+ifeq			($(CC_SHORTVER), 3.0)
719
+					CPU ?= v8 
720
+					#use 32bit for now
721
+					CFLAGS+= -mtune=$(CPU)   \
722
+					#-mno-epilogue #try to inline function exit code
723
+					#-mflat # omit save/restore
724
+					#-,faster-structs #faster non Sparc ABI structure copy ops
725
+else			# CC_SHORTVER, 3.0
726
+ifeq			($(CC_SHORTVER), 2.9x) #older gcc version (2.9[1-5])
727
+$(warning 			Old gcc detected ($(CC_SHORTVER)), use  gcc >= 3.1 \
728
+					for better results)
729
+else			#CC_SHORTVER, 2.9x
730
+				#really old version
731
+$(warning			You are using an old and unsupported gcc \
732
+					 version ($(CC_SHORTVER)), compile at your own risk!)
733
+					
734
+endif			#CC_SHORTVER, 2.9x
735
+endif			#CC_SHORTVER, 3.0
736
+endif			#CC_SHORTVER, 3.4
737
+endif			#CC_SHORTVER, 4.x
738
+	
739
+else		#CC_NAME, gcc
740
+ifeq		($(CC_NAME), suncc)
741
+			CFLAGS+= -g -xO5 -fast -native -xCC \
742
+					-xc99 # C99 support
743
+			# -Dinline="" # add this if cc < 5.3 (define inline as null)
744
+else
745
+				#other compilers
746
+$(error 			Unsupported compiler ($(CC):$(CC_NAME)), try gcc)
747
+endif		#CC_NAME, suncc
748
+endif		#CC_NAME, gcc
749
+endif	#ARCH, sparc
750
+
682 751
 	#if ipaq/netwinder
683 752
 ifeq	($(ARCH), arm)
684 753
 		# if gcc 
685 754
 ifeq		($(CC_NAME), gcc)
755
+				DEFS+=-DCC_GCC_LIKE_ASM
686 756
 				#common stuff
687 757
 				CFLAGS=-O9 -funroll-loops  -Wcast-align $(PROFILE) \
688 758
 					-Wall   
... ...
@@ -725,6 +795,7 @@ endif	#ARCH, arm
725 795
 ifeq	($(ARCH), mips)
726 796
 		# if gcc 
727 797
 ifeq		($(CC_NAME), gcc)
798
+				DEFS+=-DCC_GCC_LIKE_ASM
728 799
 				#common stuff
729 800
 				CFLAGS=-O9 -funroll-loops  $(PROFILE) \
730 801
 					-Wall 
... ...
@@ -766,6 +837,7 @@ endif	#ARCH, mips
766 837
 ifeq	($(ARCH), mips2)
767 838
 		# if gcc 
768 839
 ifeq		($(CC_NAME), gcc)
840
+				DEFS+=-DCC_GCC_LIKE_ASM
769 841
 				#common stuff
770 842
 				CFLAGS= -mips2 -O9 -funroll-loops $(PROFILE) \
771 843
 					-Wall 
... ...
@@ -806,6 +878,7 @@ endif	#ARCH, mips2
806 878
 ifeq	($(ARCH), alpha)
807 879
 		# if gcc 
808 880
 ifeq		($(CC_NAME), gcc)
881
+				DEFS+=-DCC_GCC_LIKE_ASM
809 882
 				#common stuff
810 883
 				CFLAGS= -O9 -funroll-loops $(PROFILE)  -Wall 
811 884
 			#if gcc 4.0+
... ...
@@ -844,6 +917,7 @@ endif	#ARCH, alpha
844 917
 ifeq	($(ARCH), ppc)
845 918
 		# if gcc 
846 919
 ifeq		($(CC_NAME), gcc)
920
+				DEFS+=-DCC_GCC_LIKE_ASM
847 921
 				#common stuff
848 922
 				CFLAGS= -O9 -funroll-loops $(PROFILE)  -Wall 
849 923
 			#if gcc 4.0+
... ...
@@ -884,6 +958,7 @@ endif	#ARCH, ppc
884 958
 ifeq	($(ARCH), ppc64)
885 959
 		# if gcc 
886 960
 ifeq		($(CC_NAME), gcc)
961
+				DEFS+=-DCC_GCC_LIKE_ASM
887 962
 				#common stuff
888 963
 				CFLAGS= -O9 -funroll-loops $(PROFILE)  -Wall 
889 964
 ifeq			($(CC_SHORTVER), 4.x)
... ...
@@ -950,7 +1025,9 @@ else	#mode,release
950 1025
 ifeq	($(CC_NAME), gcc)
951 1026
 		CFLAGS=-g -Wcast-align $(PROFILE)
952 1027
 ifeq		($(ARCH), sparc64)
953
-			CFLAGS+= -mcpu=ultrasparc 
1028
+			DEFS+=SPARC64_MODE
1029
+			CFLAGS+= -mcpu=ultrasparc -m64
1030
+			LDFLAGS+=-m64
954 1031
 endif
955 1032
 ifeq		($(LDTYPE), solaris)
956 1033
 			#solaris ld
957 1034
new file mode 100644
... ...
@@ -0,0 +1,342 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  atomic operations and memory barriers (mips isa 2 and mips64 specific)
29
+ *  WARNING: atomic ops do not include memory barriers
30
+ *  see atomic_ops.h for more details 
31
+ *  WARNING: not tested on mips64 (not even a compile test)
32
+ *
33
+ *  Config defines:  - NOSMP (in NOSMP mode it will also work on mips isa 1
34
+ *                            cpus that support LL and SC, see MIPS_HAS_LLSC
35
+ *                            in atomic_ops.h)
36
+ *                   - __CPU_MIPS64 (mips64 arch., in 64 bit mode: long and
37
+ *                                    void* are 64 bits)
38
+ *                   - __CPU_MIPS2 or __CPU_MIPS && MIPS_HAS_LLSC && NOSMP
39
+ *                                 (if __CPU_MIPS64 is not defined)
40
+ */
41
+/* 
42
+ * History:
43
+ * --------
44
+ *  2006-03-08  created by andrei
45
+ */
46
+
47
+
48
+#ifndef _atomic_mips2_h
49
+#define _atomic_mips2_h
50
+
51
+#define HAVE_ASM_INLINE_ATOMIC_OPS
52
+#define HAVE_ASM_INLINE_MEMBAR
53
+
54
+#ifdef __CPU_mips64
55
+#warning mips64 atomic code was not tested, please report problems to \
56
+		serdev@iptel.org or andrei@iptel.org
57
+#endif
58
+
59
+#ifdef NOSMP
60
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
61
+#define membar_read()  membar()
62
+#define membar_write() membar()
63
+#else
64
+
65
+#define membar() \
66
+	asm volatile( \
67
+			".set push \n\t" \
68
+			".set noreorder \n\t" \
69
+			".set mips2 \n\t" \
70
+			"    sync\n\t" \
71
+			".set pop \n\t" \
72
+			: : : "memory" \
73
+			) 
74
+
75
+#define membar_read()  membar()
76
+#define membar_write() membar()
77
+
78
+#endif /* NOSMP */
79
+
80
+
81
+
82
+/* main asm block */
83
+#define ATOMIC_ASM_OP_int(op) \
84
+			".set push \n\t" \
85
+			".set noreorder \n\t" \
86
+			".set mips2 \n\t" \
87
+			"1:   ll %1, %0 \n\t" \
88
+			"     " op "\n\t" \
89
+			"     sc %2, %0 \n\t" \
90
+			"     beqz %2, 1b \n\t" \
91
+			"     nop \n\t" /* delay slot */ \
92
+			".set pop \n\t" 
93
+
94
+#ifdef __CPU_mips64
95
+#define ATOMIC_ASM_OP_long(op) \
96
+			".set push \n\t" \
97
+			".set noreorder \n\t" \
98
+			"1:   lld %1, %0 \n\t" \
99
+			"     " op "\n\t" \
100
+			"     scd %2, %0 \n\t" \
101
+			"     beqz %2, 1b \n\t" \
102
+			"     nop \n\t" /* delay slot */ \
103
+			".set pop \n\t" 
104
+#else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips & MIPS_HAS_LLSC */
105
+#define ATOMIC_ASM_OP_long(op) ATOMIC_ASM_OP_int(op)
106
+#endif
107
+
108
+
109
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
110
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
111
+	{ \
112
+		P_TYPE ret, tmp; \
113
+		asm volatile( \
114
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
115
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
116
+			: "m"(*var) \
117
+			 \
118
+			); \
119
+		return RET_EXPR; \
120
+	}
121
+
122
+
123
+/* same as above, but with CT in %3 */
124
+#define ATOMIC_FUNC_DECL_CT(NAME, OP, CT, P_TYPE, RET_TYPE, RET_EXPR) \
125
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
126
+	{ \
127
+		P_TYPE ret, tmp; \
128
+		asm volatile( \
129
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
130
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
131
+			: "r"((CT)), "m"(*var) \
132
+			 \
133
+			); \
134
+		return RET_EXPR; \
135
+	}
136
+
137
+
138
+/* takes an extra param, i which goes in %3 */
139
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
140
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
141
+														P_TYPE i) \
142
+	{ \
143
+		P_TYPE ret, tmp; \
144
+		asm volatile( \
145
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
146
+			: "=m"(*var), "=&r"(ret), "=&r"(tmp)  \
147
+			: "r"((i)), "m"(*var) \
148
+			 \
149
+			); \
150
+		return RET_EXPR; \
151
+	}
152
+
153
+
154
+/* takes an extra param, like above, but i  goes in %2 */
155
+#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
156
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
157
+														P_TYPE i) \
158
+	{ \
159
+		P_TYPE ret; \
160
+		asm volatile( \
161
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
162
+			: "=m"(*var), "=&r"(ret), "+&r"(i)  \
163
+			: "m"(*var) \
164
+			 \
165
+			); \
166
+		return RET_EXPR; \
167
+	}
168
+
169
+
170
+
171
+ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", int, void, /* no return */ )
172
+ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  int, void, /* no return */ )
173
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", int, void, /* no return */ )
174
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", int, void,  /* no return */ )
175
+ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", int, int, (ret+1)==0 )
176
+ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1, int, int, (ret-1)==0 )
177
+ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, int, int, ret )
178
+
179
+#ifdef __CPU_mips64
180
+
181
+ATOMIC_FUNC_DECL(inc,      "daddiu %2, %1, 1", long, void, /* no return */ )
182
+ATOMIC_FUNC_DECL_CT(dec,   "dsubu %2, %1, %3", 1,  long, void, /* no return */ )
183
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", long, void, /* no return */ )
184
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
185
+ATOMIC_FUNC_DECL(inc_and_test, "daddiu %2, %1, 1", long, long, (ret+1)==0 )
186
+ATOMIC_FUNC_DECL_CT(dec_and_test, "dsubu %2, %1, %3", 1,long, long, (ret-1)==0 )
187
+ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
188
+
189
+#else /* ! __CPU_mips64 => __CPU_mips2 or __CPU_mips */
190
+
191
+ATOMIC_FUNC_DECL(inc,      "addiu %2, %1, 1", long, void, /* no return */ )
192
+ATOMIC_FUNC_DECL_CT(dec,   "subu %2, %1, %3", 1,  long, void, /* no return */ )
193
+ATOMIC_FUNC_DECL1(and, "and %2, %1, %3", long, void, /* no return */ )
194
+ATOMIC_FUNC_DECL1(or,  "or  %2, %1, %3", long, void,  /* no return */ )
195
+ATOMIC_FUNC_DECL(inc_and_test, "addiu %2, %1, 1", long, long, (ret+1)==0 )
196
+ATOMIC_FUNC_DECL_CT(dec_and_test, "subu %2, %1, %3", 1,long, long, (ret-1)==0 )
197
+ATOMIC_FUNC_DECL2(get_and_set, "" /* nothing needed */, long, long, ret )
198
+
199
+#endif /* __CPU_mips64 */
200
+
201
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
202
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
203
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
204
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
205
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
206
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
207
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
208
+
209
+
210
+/* with integrated membar */
211
+
212
+#define mb_atomic_set_int(v, i) \
213
+	do{ \
214
+		membar(); \
215
+		atomic_set_int(v, i); \
216
+	}while(0)
217
+
218
+
219
+
220
+inline static int mb_atomic_get_int(volatile int* v)
221
+{
222
+	membar();
223
+	return atomic_get_int(v);
224
+}
225
+
226
+
227
+#define mb_atomic_inc_int(v) \
228
+	do{ \
229
+		membar(); \
230
+		atomic_inc_int(v); \
231
+	}while(0)
232
+
233
+#define mb_atomic_dec_int(v) \
234
+	do{ \
235
+		membar(); \
236
+		atomic_dec_int(v); \
237
+	}while(0)
238
+
239
+#define mb_atomic_or_int(v, m) \
240
+	do{ \
241
+		membar(); \
242
+		atomic_or_int(v, m); \
243
+	}while(0)
244
+
245
+#define mb_atomic_and_int(v, m) \
246
+	do{ \
247
+		membar(); \
248
+		atomic_and_int(v, m); \
249
+	}while(0)
250
+
251
+inline static int mb_atomic_inc_and_test_int(volatile int* v)
252
+{
253
+	membar();
254
+	return atomic_inc_and_test_int(v);
255
+}
256
+
257
+inline static int mb_atomic_dec_and_test_int(volatile int* v)
258
+{
259
+	membar();
260
+	return atomic_dec_and_test_int(v);
261
+}
262
+
263
+
264
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
265
+{
266
+	membar();
267
+	return atomic_get_and_set_int(v, i);
268
+}
269
+
270
+
271
+
272
+#define mb_atomic_set_long(v, i) \
273
+	do{ \
274
+		membar(); \
275
+		atomic_set_long(v, i); \
276
+	}while(0)
277
+
278
+
279
+
280
+inline static long mb_atomic_get_long(volatile long* v)
281
+{
282
+	membar();
283
+	return atomic_get_long(v);
284
+}
285
+
286
+
287
+#define mb_atomic_inc_long(v) \
288
+	do{ \
289
+		membar(); \
290
+		atomic_inc_long(v); \
291
+	}while(0)
292
+
293
+
294
+#define mb_atomic_dec_long(v) \
295
+	do{ \
296
+		membar(); \
297
+		atomic_dec_long(v); \
298
+	}while(0)
299
+
300
+#define mb_atomic_or_long(v, m) \
301
+	do{ \
302
+		membar(); \
303
+		atomic_or_long(v, m); \
304
+	}while(0)
305
+
306
+#define mb_atomic_and_long(v, m) \
307
+	do{ \
308
+		membar(); \
309
+		atomic_and_long(v, m); \
310
+	}while(0)
311
+
312
+inline static long mb_atomic_inc_and_test_long(volatile long* v)
313
+{
314
+	membar();
315
+	return atomic_inc_and_test_long(v);
316
+}
317
+
318
+inline static long mb_atomic_dec_and_test_long(volatile long* v)
319
+{
320
+	membar();
321
+	return atomic_dec_and_test_long(v);
322
+}
323
+
324
+
325
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
326
+{
327
+	membar();
328
+	return atomic_get_and_set_long(v, l);
329
+}
330
+
331
+
332
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
333
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
334
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
335
+#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
336
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
337
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
338
+#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
339
+#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
340
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
341
+
342
+#endif
0 343
new file mode 100644
... ...
@@ -0,0 +1,296 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  atomic operations and memory barriers (powerpc and powerpc64 versions)
29
+ *  WARNING: atomic ops do not include memory barriers
30
+ *  see atomic_ops.h for more details 
31
+ *  WARNING: not tested on ppc64
32
+ *
33
+ *  Config defines:  - NOSMP
34
+ *                   - __CPU_ppc64  (powerpc64 w/ 64 bits long and void*)
35
+ *                   - __CPU_ppc    (powerpc or powerpc64 32bit mode)
36
+ */
37
+/* 
38
+ * History:
39
+ * --------
40
+ *  2006-03-24  created by andrei
41
+ */
42
+
43
+#ifndef _atomic_ppc_h
44
+#define _atomic_ppc_h
45
+
46
+#define HAVE_ASM_INLINE_ATOMIC_OPS
47
+#define HAVE_ASM_INLINE_MEMBAR
48
+
49
+#ifdef __CPU_ppc64
50
+#warning powerpc64 atomic code was not tested, please report problems to \
51
+		serdev@iptel.org or andrei@iptel.org
52
+#endif
53
+
54
+
55
+#ifdef NOSMP
56
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
57
+#define membar_read()  membar()
58
+#define membar_write() membar()
59
+#else
60
+#define membar() asm volatile ("sync \n\t" : : : "memory") 
61
+/* lwsync orders LoadLoad, LoadStore and StoreStore */
62
+#define membar_read() asm volatile ("lwsync \n\t" : : : "memory") 
63
+/* on "normal" cached mem. eieio orders StoreStore */
64
+#define membar_write() asm volatile ("eieio \n\t" : : : "memory") 
65
+#endif /* NOSMP */
66
+
67
+
68
+#define ATOMIC_ASM_OP0_int(op) \
69
+	"1: lwarx  %0, 0, %2 \n\t" \
70
+	"   " op " \n\t" \
71
+	"   stwcx. %0, 0, %2 \n\t" \
72
+	"   bne- 1b \n\t"
73
+
74
+#define ATOMIC_ASM_OP3_int(op) \
75
+	"1: lwarx  %0, 0, %2 \n\t" \
76
+	"   " op " \n\t" \
77
+	"   stwcx. %3, 0, %2 \n\t" \
78
+	"   bne- 1b \n\t"
79
+
80
+#ifdef __CPU_ppc64
81
+#define ATOMIC_ASM_OP0_long(op) \
82
+	"1: ldarx  %0, 0, %2 \n\t" \
83
+	"   " op " \n\t" \
84
+	"   stdcx. %0, 0, %2 \n\t" \
85
+	"   bne- 1b \n\t"
86
+
87
+#define ATOMIC_ASM_OP3_long(op) \
88
+	"1: ldarx  %0, 0, %2 \n\t" \
89
+	"   " op " \n\t" \
90
+	"   stdcx. %3, 0, %2 \n\t" \
91
+	"   bne- 1b \n\t"
92
+
93
+#else /* __CPU_ppc */
94
+#define ATOMIC_ASM_OP0_long ATOMIC_ASM_OP0_int
95
+#define ATOMIC_ASM_OP3_long ATOMIC_ASM_OP3_int
96
+#endif
97
+
98
+
99
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
100
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
101
+	{ \
102
+		P_TYPE ret; \
103
+		asm volatile( \
104
+			ATOMIC_ASM_OP0_##P_TYPE(OP) \
105
+			: "=&r"(ret), "=m"(*var) : "r"(var) : "cc" \
106
+			); \
107
+		return RET_EXPR; \
108
+	}
109
+
110
+/* same as above, but takes an extra param, v, which goes in %3 */
111
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
112
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
113
+															P_TYPE v) \
114
+	{ \
115
+		P_TYPE ret; \
116
+		asm volatile( \
117
+			ATOMIC_ASM_OP0_##P_TYPE(OP) \
118
+			: "=&r"(ret), "=m"(*var) : "r"(var), "r"(v)  : "cc" \
119
+			); \
120
+		return RET_EXPR; \
121
+	}
122
+
123
+/* same as above, but uses ATOMIC_ASM_OP3, v in %3 and %3 not changed */
124
+#define ATOMIC_FUNC_DECL3(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
125
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
126
+															P_TYPE v) \
127
+	{ \
128
+		P_TYPE ret; \
129
+		asm volatile( \
130
+			ATOMIC_ASM_OP3_##P_TYPE(OP) \
131
+			: "=&r"(ret), "=m"(*var) : "r"(var), "r"(v)  : "cc" \
132
+			); \
133
+		return RET_EXPR; \
134
+	}
135
+
136
+
137
+
138
+ATOMIC_FUNC_DECL(inc,      "addic  %0, %0,  1", int, void, /* no return */ )
139
+ATOMIC_FUNC_DECL(dec,      "addic %0, %0,  -1", int, void, /* no return */ )
140
+ATOMIC_FUNC_DECL1(and,     "and     %0, %0, %3", int, void, /* no return */ )
141
+ATOMIC_FUNC_DECL1(or,      "or     %0, %0, %3", int, void, /* no return */ )
142
+ATOMIC_FUNC_DECL(inc_and_test, "addic   %0, %0, 1", int, int, (ret==0) )
143
+ATOMIC_FUNC_DECL(dec_and_test, "addic  %0, %0, -1", int, int, (ret==0) )
144
+ATOMIC_FUNC_DECL3(get_and_set, /* no extra op needed */ , int, int,  ret)
145
+
146
+ATOMIC_FUNC_DECL(inc,      "addic  %0, %0,  1", long, void, /* no return */ )
147
+ATOMIC_FUNC_DECL(dec,      "addic %0, %0,  -1", long, void, /* no return */ )
148
+ATOMIC_FUNC_DECL1(and,     "and     %0, %0, %3",long, void, /* no return */ )
149
+ATOMIC_FUNC_DECL1(or,      "or     %0, %0, %3", long, void, /* no return */ )
150
+ATOMIC_FUNC_DECL(inc_and_test, "addic   %0, %0, 1", long, long, (ret==0) )
151
+ATOMIC_FUNC_DECL(dec_and_test, "addic  %0, %0, -1", long, long, (ret==0) )
152
+ATOMIC_FUNC_DECL3(get_and_set, /* no extra op needed */ , long, long,  ret)
153
+
154
+
155
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
156
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
157
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
158
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
159
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
160
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
161
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
162
+
163
+
164
+/* with integrated membar */
165
+
166
+#define mb_atomic_set_int(v, i) \
167
+	do{ \
168
+		membar(); \
169
+		atomic_set_int(v, i); \
170
+	}while(0)
171
+
172
+
173
+
174
+inline static int mb_atomic_get_int(volatile int* v)
175
+{
176
+	membar();
177
+	return atomic_get_int(v);
178
+}
179
+
180
+
181
+#define mb_atomic_inc_int(v) \
182
+	do{ \
183
+		membar(); \
184
+		atomic_inc_int(v); \
185
+	}while(0)
186
+
187
+#define mb_atomic_dec_int(v) \
188
+	do{ \
189
+		membar(); \
190
+		atomic_dec_int(v); \
191
+	}while(0)
192
+
193
+#define mb_atomic_or_int(v, m) \
194
+	do{ \
195
+		membar(); \
196
+		atomic_or_int(v, m); \
197
+	}while(0)
198
+
199
+#define mb_atomic_and_int(v, m) \
200
+	do{ \
201
+		membar(); \
202
+		atomic_and_int(v, m); \
203
+	}while(0)
204
+
205
+inline static int mb_atomic_inc_and_test_int(volatile int* v)
206
+{
207
+	membar();
208
+	return atomic_inc_and_test_int(v);
209
+}
210
+
211
+inline static int mb_atomic_dec_and_test_int(volatile int* v)
212
+{
213
+	membar();
214
+	return atomic_dec_and_test_int(v);
215
+}
216
+
217
+
218
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
219
+{
220
+	membar();
221
+	return atomic_get_and_set_int(v, i);
222
+}
223
+
224
+
225
+
226
+#define mb_atomic_set_long(v, i) \
227
+	do{ \
228
+		membar(); \
229
+		atomic_set_long(v, i); \
230
+	}while(0)
231
+
232
+
233
+
234
+inline static long mb_atomic_get_long(volatile long* v)
235
+{
236
+	membar();
237
+	return atomic_get_long(v);
238
+}
239
+
240
+
241
+#define mb_atomic_inc_long(v) \
242
+	do{ \
243
+		membar(); \
244
+		atomic_inc_long(v); \
245
+	}while(0)
246
+
247
+
248
+#define mb_atomic_dec_long(v) \
249
+	do{ \
250
+		membar(); \
251
+		atomic_dec_long(v); \
252
+	}while(0)
253
+
254
+#define mb_atomic_or_long(v, m) \
255
+	do{ \
256
+		membar(); \
257
+		atomic_or_long(v, m); \
258
+	}while(0)
259
+
260
+#define mb_atomic_and_long(v, m) \
261
+	do{ \
262
+		membar(); \
263
+		atomic_and_long(v, m); \
264
+	}while(0)
265
+
266
+inline static long mb_atomic_inc_and_test_long(volatile long* v)
267
+{
268
+	membar();
269
+	return atomic_inc_and_test_long(v);
270
+}
271
+
272
+inline static long mb_atomic_dec_and_test_long(volatile long* v)
273
+{
274
+	membar();
275
+	return atomic_dec_and_test_long(v);
276
+}
277
+
278
+
279
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
280
+{
281
+	membar();
282
+	return atomic_get_and_set_long(v, l);
283
+}
284
+
285
+
286
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
287
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
288
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
289
+#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
290
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
291
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
292
+#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
293
+#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
294
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
295
+
296
+#endif
0 297
new file mode 100644
... ...
@@ -0,0 +1,60 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  memory barriers for sparc32 ( version < v 9))
29
+ *  see atomic_ops.h for more details 
30
+ *
31
+ * Config defines: NOSMP
32
+ */
33
+/* 
34
+ * History:
35
+ * --------
36
+ *  2006-03-28  created by andrei
37
+ */
38
+
39
+
40
+#ifndef _atomic_sparc_h
41
+#define _atomic_sparc_h
42
+
43
+#define HAVE_ASM_INLINE_MEMBAR
44
+
45
+
46
+#warning "sparc32 atomic operations support not tested"
47
+
48
+#ifdef NOSMP
49
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
50
+#define membar_read()  membar()
51
+#define membar_write() membar()
52
+#else /* SMP */
53
+#define membar_write() asm volatile ("stbar \n\t" : : : "memory") 
54
+#define membar() membar_write()
55
+#define membar_read() asm volatile ("" : : : "memory") 
56
+#endif /* NOSMP */
57
+
58
+
59
+
60
+#endif
0 61
new file mode 100644
... ...
@@ -0,0 +1,286 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  atomic operations and memory barriers (sparc64 version, 32 and 64 bit modes)
29
+ *  WARNING: atomic ops do not include memory barriers
30
+ *  see atomic_ops.h for more details 
31
+ *
32
+ *  Config defs: - SPARC64_MODE (if defined long is assumed to be 64 bits
33
+ *                               else long & void* are assumed to be 32 for
34
+ *                               sparc32plus code)
35
+ *               - NOSMP
36
+ */
37
+/* 
38
+ * History:
39
+ * --------
40
+ *  2006-03-28  created by andrei
41
+ */
42
+
43
+
44
+#ifndef _atomic_sparc64_h
45
+#define _atomic_sparc64_h
46
+
47
+#define HAVE_ASM_INLINE_ATOMIC_OPS
48
+#define HAVE_ASM_INLINE_MEMBAR
49
+
50
+
51
+
52
+/* try to guess if in SPARC64_MODE */
53
+#if ! defined SPARC64_MODE && \
54
+	(defined __LP64__ || defined _LP64 || defined __arch64__)
55
+#define SPARC64_MODE
56
+#endif
57
+
58
+
59
+#ifdef NOSMP
60
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/
61
+#define membar_read()  membar()
62
+#define membar_write() membar()
63
+#else /* SMP */
64
+#define membar() \
65
+	asm volatile ( \
66
+			"membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad \n\t" \
67
+			: : : "memory")
68
+
69
+#define membar_read() asm volatile ("membar #LoadLoad \n\t" : : : "memory")
70
+#define membar_write() asm volatile ("membar #StoreStore \n\t" : : : "memory")
71
+#endif /* NOSMP */
72
+
73
+
74
+
75
+/* 32 bit version, op should store the result in %1, and use %0 as input,
76
+ *  both %0 and %1 are modified */
77
+#define ATOMIC_ASM_OP_int(op)\
78
+	"   ldsw [%3], %0 \n\t"  /* signed or lduw? */ \
79
+	"1: " op " \n\t" \
80
+	"   cas  [%3], %0, %1 \n\t" \
81
+	"   cmp %0, %1 \n\t" \
82
+	"   bne,a,pn  %%icc, 1b \n\t"  /* predict not taken, annul */ \
83
+	"   mov %1, %0\n\t"  /* delay slot */
84
+
85
+#ifdef SPARC64_MODE
86
+/* 64 bit version, same as above */
87
+#define ATOMIC_ASM_OP_long(op)\
88
+	"   ldx [%3], %0 \n\t" \
89
+	"1: " op " \n\t" \
90
+	"   casx  [%3], %0, %1 \n\t" \
91
+	"   cmp %0, %1 \n\t" \
92
+	"   bne,a,pn  %%xcc, 1b \n\t"  /* predict not taken, annul */ \
93
+	"   mov %1, %0\n\t"  /* delay slot */
94
+	
95
+#else /* no SPARC64_MODE => 32bit mode on a sparc64*/
96
+#define ATOMIC_ASM_OP_long(op) ATOMIC_ASM_OP_int(op)
97
+#endif
98
+
99
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
100
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
101
+	{ \
102
+		P_TYPE ret, tmp; \
103
+		asm volatile( \
104
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
105
+			: "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var) : "cc" \
106
+			); \
107
+		return RET_EXPR; \
108
+	}
109
+
110
+
111
+/* same as above, but takes an extra param, v, which goes in %4 */
112
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
113
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
114
+															P_TYPE v) \
115
+	{ \
116
+		P_TYPE ret, tmp; \
117
+		asm volatile( \
118
+			ATOMIC_ASM_OP_##P_TYPE(OP) \
119
+			: "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var), "r"(v) : "cc" \
120
+			); \
121
+		return RET_EXPR; \
122
+	}
123
+
124
+
125
+
126
+
127
+ATOMIC_FUNC_DECL(inc,      "add  %0,  1, %1", int, void, /* no return */ )
128
+ATOMIC_FUNC_DECL(dec,      "sub  %0,  1, %1", int, void, /* no return */ )
129
+ATOMIC_FUNC_DECL1(and,     "and  %0, %4, %1", int, void, /* no return */ )
130
+ATOMIC_FUNC_DECL1(or,      "or   %0, %4, %1", int, void, /* no return */ )
131
+ATOMIC_FUNC_DECL(inc_and_test, "add   %0, 1, %1", int, int, ((ret+1)==0) )
132
+ATOMIC_FUNC_DECL(dec_and_test, "sub   %0, 1, %1", int, int, ((ret-1)==0) )
133
+ATOMIC_FUNC_DECL1(get_and_set, "mov %4, %1" , int, int,  ret)
134
+
135
+
136
+ATOMIC_FUNC_DECL(inc,      "add  %0,  1, %1", long, void, /* no return */ )
137
+ATOMIC_FUNC_DECL(dec,      "sub  %0,  1, %1", long, void, /* no return */ )
138
+ATOMIC_FUNC_DECL1(and,     "and  %0, %4, %1", long, void, /* no return */ )
139
+ATOMIC_FUNC_DECL1(or,      "or   %0, %4, %1", long, void, /* no return */ )
140
+ATOMIC_FUNC_DECL(inc_and_test, "add   %0, 1, %1", long, long, ((ret+1)==0) )
141
+ATOMIC_FUNC_DECL(dec_and_test, "sub   %0, 1, %1", long, long, ((ret-1)==0) )
142
+ATOMIC_FUNC_DECL1(get_and_set, "mov %4, %1" , long, long,  ret)
143
+
144
+
145
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
146
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
147
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
148
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
149
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
150
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
151
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
152
+
153
+
154
+/* with integrated membar */
155
+
156
+#define mb_atomic_set_int(v, i) \
157
+	do{ \
158
+		membar(); \
159
+		atomic_set_int(v, i); \
160
+	}while(0)
161
+
162
+
163
+
164
+inline static int mb_atomic_get_int(volatile int* v)
165
+{
166
+	membar();
167
+	return atomic_get_int(v);
168
+}
169
+
170
+
171
+#define mb_atomic_inc_int(v) \
172
+	do{ \
173
+		membar(); \
174
+		atomic_inc_int(v); \
175
+	}while(0)
176
+
177
+#define mb_atomic_dec_int(v) \
178
+	do{ \
179
+		membar(); \
180
+		atomic_dec_int(v); \
181
+	}while(0)
182
+
183
+#define mb_atomic_or_int(v, m) \
184
+	do{ \
185
+		membar(); \
186
+		atomic_or_int(v, m); \
187
+	}while(0)
188
+
189
+#define mb_atomic_and_int(v, m) \
190
+	do{ \
191
+		membar(); \
192
+		atomic_and_int(v, m); \
193
+	}while(0)
194
+
195
+inline static int mb_atomic_inc_and_test_int(volatile int* v)
196
+{
197
+	membar();
198
+	return atomic_inc_and_test_int(v);
199
+}
200
+
201
+inline static int mb_atomic_dec_and_test_int(volatile int* v)
202
+{
203
+	membar();
204
+	return atomic_dec_and_test_int(v);
205
+}
206
+
207
+
208
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i)
209
+{
210
+	membar();
211
+	return atomic_get_and_set_int(v, i);
212
+}
213
+
214
+
215
+
216
+#define mb_atomic_set_long(v, i) \
217
+	do{ \
218
+		membar(); \
219
+		atomic_set_long(v, i); \
220
+	}while(0)
221
+
222
+
223
+
224
+inline static long mb_atomic_get_long(volatile long* v)
225
+{
226
+	membar();
227
+	return atomic_get_long(v);
228
+}
229
+
230
+
231
+#define mb_atomic_inc_long(v) \
232
+	do{ \
233
+		membar(); \
234
+		atomic_inc_long(v); \
235
+	}while(0)
236
+
237
+
238
+#define mb_atomic_dec_long(v) \
239
+	do{ \
240
+		membar(); \
241
+		atomic_dec_long(v); \
242
+	}while(0)
243
+
244
+#define mb_atomic_or_long(v, m) \
245
+	do{ \
246
+		membar(); \
247
+		atomic_or_long(v, m); \
248
+	}while(0)
249
+
250
+#define mb_atomic_and_long(v, m) \
251
+	do{ \
252
+		membar(); \
253
+		atomic_and_long(v, m); \
254
+	}while(0)
255
+
256
+inline static long mb_atomic_inc_and_test_long(volatile long* v)
257
+{
258
+	membar();
259
+	return atomic_inc_and_test_long(v);
260
+}
261
+
262
+inline static long mb_atomic_dec_and_test_long(volatile long* v)
263
+{
264
+	membar();
265
+	return atomic_dec_and_test_long(v);
266
+}
267
+
268
+
269
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l)
270
+{
271
+	membar();
272
+	return atomic_get_and_set_long(v, l);
273
+}
274
+
275
+
276
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val)
277
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val)
278
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask))
279
+#define mb_atomic_or(var, mask)  mb_atomic_or_int(&(var)->val, (mask))
280
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val)
281
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val)
282
+#define mb_atomic_get(var)	mb_atomic_get_int(&(var)->val)
283
+#define mb_atomic_set(var, i)	mb_atomic_set_int(&(var)->val, i)
284
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i)
285
+
286
+#endif
0 287
new file mode 100644
... ...
@@ -0,0 +1,233 @@
1
+/* 
2
+ * $Id$
3
+ * 
4
+ * Copyright (C) 2006 iptelorg GmbH
5
+ *
6
+ * This file is part of ser, a free SIP server.
7
+ *
8
+ * ser is free software; you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation; either version 2 of the License, or
11
+ * (at your option) any later version
12
+ *
13
+ * For a license to use the ser software under conditions
14
+ * other than those described here, or to purchase support for this
15
+ * software, please contact iptel.org by e-mail at the following addresses:
16
+ *    info@iptel.org
17
+ *
18
+ * ser is distributed in the hope that it will be useful,
19
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
+ * GNU General Public License for more details.
22
+ *
23
+ * You should have received a copy of the GNU General Public License
24
+ * along with this program; if not, write to the Free Software
25
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26
+ */
27
+/*
28
+ *  atomic operations and memory barriers implemented using locks
29
+ *  (for architectures not yet supported via inline asm)
30
+ *
31
+ *  WARNING: atomic ops do not include memory barriers
32
+ *  see atomic_ops.h for more details 
33
+ *
34
+ *  Config defs: - NOSMP (membars are null in this case)
35
+ *               - HAVE_ASM_INLINE_MEMBAR (membars arleady defined =>
36
+ *                                          use them)
37
+ *               - HAVE_ASM_INLINE_ATOMIC_OPS (atomic ops already defined
38
+ *                                               => don't redefine them)
39
+ *
40
+ */
41
+/* 
42
+ * History:
43
+ * --------
44
+ *  2006-03-08  created by andrei
45
+ */
46
+
47
+#ifndef _atomic_unknown_h
48
+#define _atomic_unknown_h
49
+
50
+#include "../lock_ops.h"
51
+
52
+extern gen_lock_t* _atomic_lock; /* declared and init in ../atomic.c */
53
+
54
+#define atomic_lock    lock_get(_atomic_lock)
55
+#define atomic_unlock  lock_release(_atomic_lock)
56
+
57
+
58
+#ifndef HAVE_ASM_INLINE_MEMBAR
59
+
60
+#define ATOMIC_OPS_USE_LOCK
61
+
62
+
63
+#ifdef NOSMP
64
+#define membar()
65
+#else /* SMP */
66
+
67
+#warning no native memory barrier implementations, falling back to slow lock \
68
+	       based workarround
69
+
70
+/* memory barriers 
71
+ *  not a known cpu -> fall back lock/unlock: safe but costly  (it should 
72
+ *  include a memory barrier effect) */
73
+#define membar() \
74
+	do{\
75
+		atomic_lock; \
76
+		atomic_unlock; \
77
+	} while(0)
78
+#endif /* NOSMP */
79
+
80
+
81
+#define membar_write() membar()
82
+
83
+#define membar_read()  membar()
84
+
85
+#endif /* HAVE_ASM_INLINE_MEMBAR */
86
+
87
+
88
+#ifndef HAVE_ASM_INLINE_ATOMIC_OPS
89
+
90
+#ifndef ATOMIC_OPS_USE_LOCK
91
+#define ATOMIC_OPS_USE_LOCK
92
+#endif
93
+
94
+/* atomic ops */
95
+
96
+
97
+/* OP can include var (function param), no other var. is declared */
98
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
99
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
100
+	{ \
101
+		atomic_lock; \
102
+		OP ; \
103
+		atomic_unlock; \
104
+		return RET_EXPR; \
105
+	}
106
+
107
+
108
+/* like above, but takes an extra param: v =>
109
+ *  OP can use var and v (function params) */
110
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
111
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
112
+														P_TYPE v) \
113
+	{ \
114
+		atomic_lock; \
115
+		OP ; \
116
+		atomic_unlock; \
117
+		return RET_EXPR; \
118
+	}
119
+
120
+/* OP can include var (function param), and ret (return)
121
+ *  ( like ATOMIC_FUNC_DECL, but includes ret) */
122
+#define ATOMIC_FUNC_DECL_RET(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
123
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \
124
+	{ \
125
+		P_TYPE ret; \
126
+		atomic_lock; \
127
+		OP ; \
128
+		atomic_unlock; \
129
+		return RET_EXPR; \
130
+	}
131
+
132
+/* like ATOMIC_FUNC_DECL1, but declares an extra variable: P_TYPE ret */
133
+#define ATOMIC_FUNC_DECL1_RET(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \
134
+	inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \
135
+														P_TYPE v) \
136
+	{ \
137
+		P_TYPE ret; \
138
+		atomic_lock; \
139
+		OP ; \
140
+		atomic_unlock; \
141
+		return RET_EXPR; \
142
+	}
143
+
144
+ATOMIC_FUNC_DECL(inc,      (*var)++, int, void, /* no return */ )
145
+ATOMIC_FUNC_DECL(dec,      (*var)--, int, void, /* no return */ )
146
+ATOMIC_FUNC_DECL1(and,     *var&=v, int, void, /* no return */ )
147
+ATOMIC_FUNC_DECL1(or,      *var|=v, int, void, /* no return */ )
148
+ATOMIC_FUNC_DECL_RET(inc_and_test, ret=++(*var), int, int, (ret==0) )
149
+ATOMIC_FUNC_DECL_RET(dec_and_test, ret=--(*var), int, int, (ret==0) )
150
+ATOMIC_FUNC_DECL1_RET(get_and_set, ret=*var;*var=v , int, int,  ret)
151
+
152
+ATOMIC_FUNC_DECL(inc,      (*var)++, long, void, /* no return */ )
153
+ATOMIC_FUNC_DECL(dec,      (*var)--, long, void, /* no return */ )
154
+ATOMIC_FUNC_DECL1(and,     *var&=v, long, void, /* no return */ )
155
+ATOMIC_FUNC_DECL1(or,      *var|=v, long, void, /* no return */ )
156
+ATOMIC_FUNC_DECL_RET(inc_and_test, ret=++(*var), long, long, (ret==0) )
157
+ATOMIC_FUNC_DECL_RET(dec_and_test, ret=--(*var), long, long, (ret==0) )
158
+ATOMIC_FUNC_DECL1_RET(get_and_set, ret=*var;*var=v , long, long,  ret)
159
+
160
+
161
+#define atomic_inc(var) atomic_inc_int(&(var)->val)
162
+#define atomic_dec(var) atomic_dec_int(&(var)->val)
163
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask))
164
+#define atomic_or(var, mask)  atomic_or_int(&(var)->val, (mask))
165
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val)
166
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val)
167
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i)
168
+
169
+
170
+/* memory barrier versions, the same as "normal" versions (since the
171
+ *  locks act as membars), *  except fot * the set/get 
172
+ */
173
+
174
+/* mb_atomic_{set,get} use membar() : if we're lucky we have membars
175
+ * for the arch. (e.g. sparc32) => membar() might be cheaper then lock/unlock */
176
+#define mb_atomic_set_int(v, i) \
177
+	do{ \
178
+		membar(); \
179
+		atomic_set_int(v, i); \
180
+	}while(0)
181
+
182
+inline static int  mb_atomic_get_int(volatile int* v)
183
+{
184
+		membar();
185
+		return atomic_get_int(v);
186
+}
187
+
188
+