... | ... |
@@ -48,6 +48,8 @@ |
48 | 48 |
# 2006-03-30 64 bit mode compile by default on sparc64 (-m64), added |
49 | 49 |
# CC_GCC_LIKE_ASM and SPARC64_MODE (andrei) |
50 | 50 |
# sparc <= v8 support (andrei) |
51 |
+# 2006-03-31 armv6 & mips64 support added |
|
52 |
+# mips and arm set to NOSMP by default (andrei) |
|
51 | 53 |
|
52 | 54 |
|
53 | 55 |
# check if already included/exported |
... | ... |
@@ -64,7 +66,7 @@ MAIN_NAME=ser |
64 | 66 |
VERSION = 0 |
65 | 67 |
PATCHLEVEL = 10 |
66 | 68 |
SUBLEVEL = 99 |
67 |
-EXTRAVERSION = -dev34 |
|
69 |
+EXTRAVERSION = -dev35 |
|
68 | 70 |
|
69 | 71 |
SER_VER = $(shell expr $(VERSION) \* 1000000 + $(PATCHLEVEL) \* 1000 + \ |
70 | 72 |
$(SUBLEVEL) ) |
... | ... |
@@ -78,7 +80,8 @@ else |
78 | 80 |
endif |
79 | 81 |
|
80 | 82 |
ARCH := $(shell $(GETARCH) |sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ |
81 |
- -e s/armv4l/arm/ -e "s/Power Macintosh/ppc/" \ |
|
83 |
+ -e s/armv[3-5].*/arm/ -e s/armv6.*/arm6/ \ |
|
84 |
+ -e "s/Power Macintosh/ppc/" \ |
|
82 | 85 |
-e "s/cobalt/mips2/" \ |
83 | 86 |
-e s/amd64/x86_64/ ) |
84 | 87 |
# fix sparc -> sparc64 |
... | ... |
@@ -447,13 +450,16 @@ endif |
447 | 450 |
endif |
448 | 451 |
|
449 | 452 |
ifeq ($(ARCH), sparc) |
450 |
- # smp no supported on sparc32 |
|
451 |
- DEFS+= -DNOSMP # FIXME |
|
452 | 453 |
use_fast_lock=yes |
453 | 454 |
endif |
454 | 455 |
|
455 | 456 |
ifeq ($(ARCH), arm) |
456 | 457 |
use_fast_lock=yes |
458 |
+ DEFS+=-DNOSMP # very unlikely to have an smp arm |
|
459 |
+endif |
|
460 |
+ |
|
461 |
+ifeq ($(ARCH), arm6) |
|
462 |
+ use_fast_lock=yes |
|
457 | 463 |
endif |
458 | 464 |
|
459 | 465 |
ifeq ($(ARCH), ppc) |
... | ... |
@@ -467,6 +473,8 @@ endif |
467 | 473 |
ifeq ($(ARCH), mips) |
468 | 474 |
# mips1 arch. (e.g. R3000) - no hardware locking support |
469 | 475 |
use_fast_lock=no |
476 |
+ DEFS+=-DMIPS_HAS_LLSC # likely |
|
477 |
+ DEFS+=-DNOSMP # very likely |
|
470 | 478 |
endif |
471 | 479 |
|
472 | 480 |
ifeq ($(ARCH), mips2) |
... | ... |
@@ -474,6 +482,11 @@ ifeq ($(ARCH), mips2) |
474 | 482 |
use_fast_lock=yes |
475 | 483 |
endif |
476 | 484 |
|
485 |
+ifeq ($(ARCH), mips64) |
|
486 |
+# mips2 arch and newer (mips3=R4000, mips4=R5000 a.s.o) |
|
487 |
+ use_fast_lock=yes |
|
488 |
+endif |
|
489 |
+ |
|
477 | 490 |
ifeq ($(ARCH), alpha) |
478 | 491 |
use_fast_lock=yes |
479 | 492 |
endif |
... | ... |
@@ -754,20 +767,20 @@ ifeq ($(ARCH), arm) |
754 | 767 |
ifeq ($(CC_NAME), gcc) |
755 | 768 |
DEFS+=-DCC_GCC_LIKE_ASM |
756 | 769 |
#common stuff |
757 |
- CFLAGS=-O9 -funroll-loops -Wcast-align $(PROFILE) \ |
|
770 |
+ CFLAGS=-O9 -funroll-loops $(PROFILE) \ |
|
758 | 771 |
-Wall |
759 | 772 |
#if gcc 4.x+ |
760 | 773 |
ifeq ($(CC_SHORTVER), 4.x) |
761 |
- CFLAGS+=-mcpu=strongarm1100 -minline-all-stringops \ |
|
762 |
- -ftree-vectorize |
|
774 |
+ CFLAGS+= -ftree-vectorize |
|
775 |
+ # not supported on arm: -minline-all-stringops |
|
763 | 776 |
else |
764 | 777 |
#if gcc 3.4+ |
765 | 778 |
ifeq ($(CC_SHORTVER), 3.4) |
766 |
- CFLAGS+= -mcpu=strongarm1100 |
|
779 |
+ CFLAGS+= |
|
767 | 780 |
else |
768 | 781 |
#if gcc 3.0 |
769 | 782 |
ifeq ($(CC_SHORTVER), 3.0) |
770 |
- CFLAGS+= -mcpu=strongarm1100 |
|
783 |
+ CFLAGS+= |
|
771 | 784 |
#-mcpu=athlon |
772 | 785 |
else |
773 | 786 |
ifeq ($(CC_SHORTVER), 2.9x) #older gcc version (2.9[1-5]) |
... | ... |
@@ -791,6 +804,48 @@ $(error Unsupported compiler ($(CC):$(CC_NAME)), try gcc) |
791 | 804 |
endif #CC_NAME, gcc |
792 | 805 |
endif #ARCH, arm |
793 | 806 |
|
807 |
+ #if armv6 cpu |
|
808 |
+ifeq ($(ARCH), arm6) |
|
809 |
+ # if gcc |
|
810 |
+ifeq ($(CC_NAME), gcc) |
|
811 |
+ DEFS+=-DCC_GCC_LIKE_ASM |
|
812 |
+ #common stuff |
|
813 |
+ CFLAGS=-march=armv6 -O9 -funroll-loops \ |
|
814 |
+ $(PROFILE) -Wall |
|
815 |
+ #if gcc 4.x+ |
|
816 |
+ifeq ($(CC_SHORTVER), 4.x) |
|
817 |
+ CFLAGS+= -ftree-vectorize |
|
818 |
+else |
|
819 |
+ #if gcc 3.4+ |
|
820 |
+ifeq ($(CC_SHORTVER), 3.4) |
|
821 |
+ CFLAGS+= |
|
822 |
+else |
|
823 |
+ #if gcc 3.0 |
|
824 |
+ifeq ($(CC_SHORTVER), 3.0) |
|
825 |
+ CFLAGS+= |
|
826 |
+ #-mcpu=athlon |
|
827 |
+else |
|
828 |
+ifeq ($(CC_SHORTVER), 2.9x) #older gcc version (2.9[1-5]) |
|
829 |
+$(warning Old gcc detected ($(CC_SHORTVER)), use gcc 3.0.x \ |
|
830 |
+ for better results) |
|
831 |
+ |
|
832 |
+ CFLAGS+= |
|
833 |
+else |
|
834 |
+ #really old version |
|
835 |
+$(warning You are using an old and unsupported gcc \ |
|
836 |
+ version ($(CC_SHORTVER)), compile at your own risk!) |
|
837 |
+ |
|
838 |
+endif # CC_SHORTVER, 2.9x |
|
839 |
+endif # CC_SHORTVER, 3.0 |
|
840 |
+endif # CC_SHORTVER, 3.4 |
|
841 |
+endif # CC_SHORTVER, 4.0 |
|
842 |
+ |
|
843 |
+else # CC_NAME, gcc |
|
844 |
+ #other compilers |
|
845 |
+$(error Unsupported compiler ($(CC):$(CC_NAME)), try gcc) |
|
846 |
+endif #CC_NAME, gcc |
|
847 |
+endif #ARCH, arm6 |
|
848 |
+ |
|
794 | 849 |
#if mips (R3000) |
795 | 850 |
ifeq ($(ARCH), mips) |
796 | 851 |
# if gcc |
... | ... |
@@ -873,6 +928,45 @@ $(error Unsupported compiler ($(CC):$(CC_NAME)), try gcc) |
873 | 928 |
endif #CC_NAME, gcc |
874 | 929 |
endif #ARCH, mips2 |
875 | 930 |
|
931 |
+#if >=mips64 |
|
932 |
+ifeq ($(ARCH), mips64) |
|
933 |
+ # if gcc |
|
934 |
+ifeq ($(CC_NAME), gcc) |
|
935 |
+ DEFS+=-DCC_GCC_LIKE_ASM |
|
936 |
+ #common stuff |
|
937 |
+ CFLAGS= -mips64 -O9 -funroll-loops $(PROFILE) \ |
|
938 |
+ -Wall |
|
939 |
+ #if gcc 4.0+ |
|
940 |
+ifeq ($(CC_SHORTVER), 4.x) |
|
941 |
+ CFLAGS+=-minline-all-stringops -ftree-vectorize |
|
942 |
+else |
|
943 |
+ #if gcc 3.4+ |
|
944 |
+ifeq ($(CC_SHORTVER), 3.4) |
|
945 |
+ CFLAGS+= |
|
946 |
+else |
|
947 |
+ #if gcc 3.0 |
|
948 |
+ifeq ($(CC_SHORTVER), 3.0) |
|
949 |
+ CFLAGS+= |
|
950 |
+else |
|
951 |
+ifeq ($(CC_SHORTVER), 2.9x) #older gcc version (2.9[1-5]) |
|
952 |
+$(warning Old gcc detected ($(CC_SHORTVER)), use gcc 3.0.x \ |
|
953 |
+ for better results) |
|
954 |
+ CFLAGS+= |
|
955 |
+else |
|
956 |
+ #really old version |
|
957 |
+$(warning You are using an old and unsupported gcc \ |
|
958 |
+ version ($(CC_SHORTVER)), compile at your own risk!) |
|
959 |
+ |
|
960 |
+endif # CC_SHORTVER, 2.9x |
|
961 |
+endif # CC_SHORTVER, 3.0 |
|
962 |
+endif # CC_SHORTVER, 3.4 |
|
963 |
+endif # CC_SHORTVER, 4.x |
|
964 |
+ |
|
965 |
+else # CC_NAME, gcc |
|
966 |
+ #other compilers |
|
967 |
+$(error Unsupported compiler ($(CC):$(CC_NAME)), try gcc) |
|
968 |
+endif #CC_NAME, gcc |
|
969 |
+endif #ARCH, mips64 |
|
876 | 970 |
|
877 | 971 |
#if alpha |
878 | 972 |
ifeq ($(ARCH), alpha) |
... | ... |
@@ -883,7 +977,8 @@ ifeq ($(CC_NAME), gcc) |
883 | 977 |
CFLAGS= -O9 -funroll-loops $(PROFILE) -Wall |
884 | 978 |
#if gcc 4.0+ |
885 | 979 |
ifeq ($(CC_SHORTVER), 4.x) |
886 |
- CFLAGS+=-minline-all-stringops |
|
980 |
+ CFLAGS+= |
|
981 |
+ # not supported: -minline-all-stringops |
|
887 | 982 |
else |
888 | 983 |
#if gcc 3.4+ |
889 | 984 |
ifeq ($(CC_SHORTVER), 3.4) |
890 | 985 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,319 @@ |
1 |
+/* |
|
2 |
+ * $Id$ |
|
3 |
+ * |
|
4 |
+ * Copyright (C) 2006 iptelorg GmbH |
|
5 |
+ * |
|
6 |
+ * This file is part of ser, a free SIP server. |
|
7 |
+ * |
|
8 |
+ * ser is free software; you can redistribute it and/or modify |
|
9 |
+ * it under the terms of the GNU General Public License as published by |
|
10 |
+ * the Free Software Foundation; either version 2 of the License, or |
|
11 |
+ * (at your option) any later version |
|
12 |
+ * |
|
13 |
+ * For a license to use the ser software under conditions |
|
14 |
+ * other than those described here, or to purchase support for this |
|
15 |
+ * software, please contact iptel.org by e-mail at the following addresses: |
|
16 |
+ * info@iptel.org |
|
17 |
+ * |
|
18 |
+ * ser is distributed in the hope that it will be useful, |
|
19 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
20 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
21 |
+ * GNU General Public License for more details. |
|
22 |
+ * |
|
23 |
+ * You should have received a copy of the GNU General Public License |
|
24 |
+ * along with this program; if not, write to the Free Software |
|
25 |
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
26 |
+ */ |
|
27 |
+/* |
|
28 |
+ * atomic operations and memory barriers (alpha specific) |
|
29 |
+ * WARNING: atomic ops do not include memory barriers |
|
30 |
+ * see atomic_ops.h for more details |
|
31 |
+ * |
|
32 |
+ * Config defines: - NOSMP |
|
33 |
+ * - __CPU_alpha |
|
34 |
+ */ |
|
35 |
+/* |
|
36 |
+ * History: |
|
37 |
+ * -------- |
|
38 |
+ * 2006-03-31 created by andrei |
|
39 |
+ */ |
|
40 |
+ |
|
41 |
+ |
|
42 |
+#ifndef _atomic_alpha_h |
|
43 |
+#define _atomic_alpha_h |
|
44 |
+ |
|
45 |
+#define HAVE_ASM_INLINE_ATOMIC_OPS |
|
46 |
+#define HAVE_ASM_INLINE_MEMBAR |
|
47 |
+ |
|
48 |
+#warning alpha atomic code was not tested, please report problems to \ |
|
49 |
+ serdev@iptel.org or andrei@iptel.org |
|
50 |
+ |
|
51 |
+#ifdef NOSMP |
|
52 |
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/ |
|
53 |
+#define membar_read() membar() |
|
54 |
+#define membar_write() membar() |
|
55 |
+#else |
|
56 |
+ |
|
57 |
+#define membar() asm volatile (" mb \n\t" : : : "memory" ) |
|
58 |
+#define membar_read() membar() |
|
59 |
+#define membar_write() asm volatile (" wmb \n\t" : : : "memory" ) |
|
60 |
+ |
|
61 |
+#endif /* NOSMP */ |
|
62 |
+ |
|
63 |
+ |
|
64 |
+ |
|
65 |
+/* main asm block |
|
66 |
+ * if store failes, jump _forward_ (optimization, because back jumps are |
|
67 |
+ * always predicted to happen on alpha )*/ |
|
68 |
+#define ATOMIC_ASM_OP00_int(op) \ |
|
69 |
+ "1: ldl_l %0, %2 \n\t" \ |
|
70 |
+ " " op "\n\t" \ |
|
71 |
+ " stl_c %0, %2 \n\t" \ |
|
72 |
+ " beq %0, 2f \n\t" \ |
|
73 |
+ ".subsection 2 \n\t" \ |
|
74 |
+ "2: br 1b \n\t" \ |
|
75 |
+ ".previous \n\t" |
|
76 |
+ |
|
77 |
+/* as above, but output in %1 instead of %0 (%0 is not clobbered) */ |
|
78 |
+#define ATOMIC_ASM_OP01_int(op) \ |
|
79 |
+ "1: ldl_l %0, %3 \n\t" \ |
|
80 |
+ " " op "\n\t" \ |
|
81 |
+ " stl_c %1, %3 \n\t" \ |
|
82 |
+ " beq %1, 2f \n\t" \ |
|
83 |
+ ".subsection 2 \n\t" \ |
|
84 |
+ "2: br 1b \n\t" \ |
|
85 |
+ ".previous \n\t" |
|
86 |
+ |
|
87 |
+#define ATOMIC_ASM_OP00_long(op) \ |
|
88 |
+ "1: ldq_l %0, %2 \n\t" \ |
|
89 |
+ " " op "\n\t" \ |
|
90 |
+ " stq_c %0, %2 \n\t" \ |
|
91 |
+ " beq %0, 2f \n\t" \ |
|
92 |
+ ".subsection 2 \n\t" \ |
|
93 |
+ "2: br 1b \n\t" \ |
|
94 |
+ ".previous \n\t" |
|
95 |
+ |
|
96 |
+/* as above, but output in %1 instead of %0 (%0 is not clobbered) */ |
|
97 |
+#define ATOMIC_ASM_OP01_long(op) \ |
|
98 |
+ "1: ldq_l %0, %3 \n\t" \ |
|
99 |
+ " " op "\n\t" \ |
|
100 |
+ " stq_c %1, %3 \n\t" \ |
|
101 |
+ " beq %1, 2f \n\t" \ |
|
102 |
+ ".subsection 2 \n\t" \ |
|
103 |
+ "2: br 1b \n\t" \ |
|
104 |
+ ".previous \n\t" |
|
105 |
+ |
|
106 |
+ |
|
107 |
+ |
|
108 |
+/* input in %0, output in %0 */ |
|
109 |
+#define ATOMIC_FUNC_DECL0_0(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \ |
|
110 |
+ inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \ |
|
111 |
+ { \ |
|
112 |
+ P_TYPE ret; \ |
|
113 |
+ asm volatile( \ |
|
114 |
+ ATOMIC_ASM_OP00_##P_TYPE(OP) : "=&r"(ret), "=m"(*var) : "m"(*var) \ |
|
115 |
+ ); \ |
|
116 |
+ return RET_EXPR; \ |
|
117 |
+ } |
|
118 |
+ |
|
119 |
+ |
|
120 |
+/* input in %0, and %1 (param), output in %1, %0 goes in ret */ |
|
121 |
+#define ATOMIC_FUNC_DECL01_1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \ |
|
122 |
+ inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \ |
|
123 |
+ P_TYPE v ) \ |
|
124 |
+ { \ |
|
125 |
+ P_TYPE ret; \ |
|
126 |
+ asm volatile( \ |
|
127 |
+ ATOMIC_ASM_OP01_##P_TYPE(OP) \ |
|
128 |
+ : "=&r"(ret), "+r"(v), "=m"(*var) : "m"(*var) \ |
|
129 |
+ ); \ |
|
130 |
+ return RET_EXPR; \ |
|
131 |
+ } |
|
132 |
+ |
|
133 |
+ |
|
134 |
+/* input in %0, output in %1, %0 goes in ret */ |
|
135 |
+#define ATOMIC_FUNC_DECL0_1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \ |
|
136 |
+ inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \ |
|
137 |
+ { \ |
|
138 |
+ P_TYPE ret, tmp; \ |
|
139 |
+ asm volatile( \ |
|
140 |
+ ATOMIC_ASM_OP01_##P_TYPE(OP) \ |
|
141 |
+ : "=&r"(ret), "=&r"(tmp), "=m"(*var) : "m"(*var) \ |
|
142 |
+ ); \ |
|
143 |
+ return RET_EXPR; \ |
|
144 |
+ } |
|
145 |
+ |
|
146 |
+ |
|
147 |
+/* input in %0 and %3 (param), output in %0 */ |
|
148 |
+#define ATOMIC_FUNC_DECL03_0(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \ |
|
149 |
+ inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \ |
|
150 |
+ P_TYPE v) \ |
|
151 |
+ { \ |
|
152 |
+ P_TYPE ret; \ |
|
153 |
+ asm volatile( \ |
|
154 |
+ ATOMIC_ASM_OP00_##P_TYPE(OP) \ |
|
155 |
+ : "=&r"(ret), "=m"(*var) : "m"(*var), "r"(v) \ |
|
156 |
+ ); \ |
|
157 |
+ return RET_EXPR; \ |
|
158 |
+ } |
|
159 |
+ |
|
160 |
+ |
|
161 |
+ATOMIC_FUNC_DECL0_0(inc, "addl %0, 1, %0", int, void, /* no return */ ) |
|
162 |
+ATOMIC_FUNC_DECL0_0(dec, "subl %0, 1, %0", int, void, /* no return */ ) |
|
163 |
+ATOMIC_FUNC_DECL03_0(and, "and %0, %3, %0", int, void, /* no return */ ) |
|
164 |
+ATOMIC_FUNC_DECL03_0(or, "bis %0, %3, %0", int, void, /* no return */ ) |
|
165 |
+ATOMIC_FUNC_DECL0_1(inc_and_test, "addl %0, 1, %1", int, int, (ret+1)==0 ) |
|
166 |
+ATOMIC_FUNC_DECL0_1(dec_and_test, "subl %0, 1, %1", int, int, (ret-1)==0 ) |
|
167 |
+ATOMIC_FUNC_DECL01_1(get_and_set, "" /* nothing needed */, int, int, ret ) |
|
168 |
+ |
|
169 |
+ATOMIC_FUNC_DECL0_0(inc, "addq %0, 1, %0", long, void, /* no return */ ) |
|
170 |
+ATOMIC_FUNC_DECL0_0(dec, "subq %0, 1, %0", long, void, /* no return */ ) |
|
171 |
+ATOMIC_FUNC_DECL03_0(and, "and %0, %3, %0", long, void, /* no return */ ) |
|
172 |
+ATOMIC_FUNC_DECL03_0(or, "bis %0, %3, %0", long, void, /* no return */ ) |
|
173 |
+ATOMIC_FUNC_DECL0_1(inc_and_test, "addq %0, 1, %1", long, long, (ret+1)==0 ) |
|
174 |
+ATOMIC_FUNC_DECL0_1(dec_and_test, "subq %0, 1, %1", long, long, (ret-1)==0 ) |
|
175 |
+ATOMIC_FUNC_DECL01_1(get_and_set, "" /* nothing needed */, long, long, ret ) |
|
176 |
+ |
|
177 |
+ |
|
178 |
+#define atomic_inc(var) atomic_inc_int(&(var)->val) |
|
179 |
+#define atomic_dec(var) atomic_dec_int(&(var)->val) |
|
180 |
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask)) |
|
181 |
+#define atomic_or(var, mask) atomic_or_int(&(var)->val, (mask)) |
|
182 |
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val) |
|
183 |
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val) |
|
184 |
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i) |
|
185 |
+ |
|
186 |
+ |
|
187 |
+/* with integrated membar */ |
|
188 |
+ |
|
189 |
+#define mb_atomic_set_int(v, i) \ |
|
190 |
+ do{ \ |
|
191 |
+ membar(); \ |
|
192 |
+ atomic_set_int(v, i); \ |
|
193 |
+ }while(0) |
|
194 |
+ |
|
195 |
+ |
|
196 |
+ |
|
197 |
+inline static int mb_atomic_get_int(volatile int* v) |
|
198 |
+{ |
|
199 |
+ membar(); |
|
200 |
+ return atomic_get_int(v); |
|
201 |
+} |
|
202 |
+ |
|
203 |
+ |
|
204 |
+#define mb_atomic_inc_int(v) \ |
|
205 |
+ do{ \ |
|
206 |
+ membar(); \ |
|
207 |
+ atomic_inc_int(v); \ |
|
208 |
+ }while(0) |
|
209 |
+ |
|
210 |
+#define mb_atomic_dec_int(v) \ |
|
211 |
+ do{ \ |
|
212 |
+ membar(); \ |
|
213 |
+ atomic_dec_int(v); \ |
|
214 |
+ }while(0) |
|
215 |
+ |
|
216 |
+#define mb_atomic_or_int(v, m) \ |
|
217 |
+ do{ \ |
|
218 |
+ membar(); \ |
|
219 |
+ atomic_or_int(v, m); \ |
|
220 |
+ }while(0) |
|
221 |
+ |
|
222 |
+#define mb_atomic_and_int(v, m) \ |
|
223 |
+ do{ \ |
|
224 |
+ membar(); \ |
|
225 |
+ atomic_and_int(v, m); \ |
|
226 |
+ }while(0) |
|
227 |
+ |
|
228 |
+inline static int mb_atomic_inc_and_test_int(volatile int* v) |
|
229 |
+{ |
|
230 |
+ membar(); |
|
231 |
+ return atomic_inc_and_test_int(v); |
|
232 |
+} |
|
233 |
+ |
|
234 |
+inline static int mb_atomic_dec_and_test_int(volatile int* v) |
|
235 |
+{ |
|
236 |
+ membar(); |
|
237 |
+ return atomic_dec_and_test_int(v); |
|
238 |
+} |
|
239 |
+ |
|
240 |
+ |
|
241 |
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i) |
|
242 |
+{ |
|
243 |
+ membar(); |
|
244 |
+ return atomic_get_and_set_int(v, i); |
|
245 |
+} |
|
246 |
+ |
|
247 |
+ |
|
248 |
+ |
|
249 |
+#define mb_atomic_set_long(v, i) \ |
|
250 |
+ do{ \ |
|
251 |
+ membar(); \ |
|
252 |
+ atomic_set_long(v, i); \ |
|
253 |
+ }while(0) |
|
254 |
+ |
|
255 |
+ |
|
256 |
+ |
|
257 |
+inline static long mb_atomic_get_long(volatile long* v) |
|
258 |
+{ |
|
259 |
+ membar(); |
|
260 |
+ return atomic_get_long(v); |
|
261 |
+} |
|
262 |
+ |
|
263 |
+ |
|
264 |
+#define mb_atomic_inc_long(v) \ |
|
265 |
+ do{ \ |
|
266 |
+ membar(); \ |
|
267 |
+ atomic_inc_long(v); \ |
|
268 |
+ }while(0) |
|
269 |
+ |
|
270 |
+ |
|
271 |
+#define mb_atomic_dec_long(v) \ |
|
272 |
+ do{ \ |
|
273 |
+ membar(); \ |
|
274 |
+ atomic_dec_long(v); \ |
|
275 |
+ }while(0) |
|
276 |
+ |
|
277 |
+#define mb_atomic_or_long(v, m) \ |
|
278 |
+ do{ \ |
|
279 |
+ membar(); \ |
|
280 |
+ atomic_or_long(v, m); \ |
|
281 |
+ }while(0) |
|
282 |
+ |
|
283 |
+#define mb_atomic_and_long(v, m) \ |
|
284 |
+ do{ \ |
|
285 |
+ membar(); \ |
|
286 |
+ atomic_and_long(v, m); \ |
|
287 |
+ }while(0) |
|
288 |
+ |
|
289 |
+inline static long mb_atomic_inc_and_test_long(volatile long* v) |
|
290 |
+{ |
|
291 |
+ membar(); |
|
292 |
+ return atomic_inc_and_test_long(v); |
|
293 |
+} |
|
294 |
+ |
|
295 |
+inline static long mb_atomic_dec_and_test_long(volatile long* v) |
|
296 |
+{ |
|
297 |
+ membar(); |
|
298 |
+ return atomic_dec_and_test_long(v); |
|
299 |
+} |
|
300 |
+ |
|
301 |
+ |
|
302 |
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l) |
|
303 |
+{ |
|
304 |
+ membar(); |
|
305 |
+ return atomic_get_and_set_long(v, l); |
|
306 |
+} |
|
307 |
+ |
|
308 |
+ |
|
309 |
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val) |
|
310 |
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val) |
|
311 |
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask)) |
|
312 |
+#define mb_atomic_or(var, mask) mb_atomic_or_int(&(var)->val, (mask)) |
|
313 |
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val) |
|
314 |
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val) |
|
315 |
+#define mb_atomic_get(var) mb_atomic_get_int(&(var)->val) |
|
316 |
+#define mb_atomic_set(var, i) mb_atomic_set_int(&(var)->val, i) |
|
317 |
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i) |
|
318 |
+ |
|
319 |
+#endif |
0 | 320 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,315 @@ |
1 |
+/* |
|
2 |
+ * $Id$ |
|
3 |
+ * |
|
4 |
+ * Copyright (C) 2006 iptelorg GmbH |
|
5 |
+ * |
|
6 |
+ * This file is part of ser, a free SIP server. |
|
7 |
+ * |
|
8 |
+ * ser is free software; you can redistribute it and/or modify |
|
9 |
+ * it under the terms of the GNU General Public License as published by |
|
10 |
+ * the Free Software Foundation; either version 2 of the License, or |
|
11 |
+ * (at your option) any later version |
|
12 |
+ * |
|
13 |
+ * For a license to use the ser software under conditions |
|
14 |
+ * other than those described here, or to purchase support for this |
|
15 |
+ * software, please contact iptel.org by e-mail at the following addresses: |
|
16 |
+ * info@iptel.org |
|
17 |
+ * |
|
18 |
+ * ser is distributed in the hope that it will be useful, |
|
19 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
20 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
21 |
+ * GNU General Public License for more details. |
|
22 |
+ * |
|
23 |
+ * You should have received a copy of the GNU General Public License |
|
24 |
+ * along with this program; if not, write to the Free Software |
|
25 |
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
26 |
+ */ |
|
27 |
+/* |
|
28 |
+ * atomic ops and memory barriers for arm (>= v3) |
|
29 |
+ * see atomic_ops.h for more details |
|
30 |
+ * |
|
31 |
+ * Config defines: - NOSMP |
|
32 |
+ * - __CPU_arm |
|
33 |
+ * - __CPU_arm6 - armv6 support (supports atomic ops |
|
34 |
+ * via ldrex/strex) |
|
35 |
+ */ |
|
36 |
+/* |
|
37 |
+ * History: |
|
38 |
+ * -------- |
|
39 |
+ * 2006-03-31 created by andrei |
|
40 |
+ */ |
|
41 |
+ |
|
42 |
+ |
|
43 |
+#ifndef _atomic_arm_h |
|
44 |
+#define _atomic_arm_h |
|
45 |
+ |
|
46 |
+ |
|
47 |
+ |
|
48 |
+#warning "arm atomic operations support not tested" |
|
49 |
+ |
|
50 |
+#ifdef NOSMP |
|
51 |
+#define HAVE_ASM_INLINE_MEMBAR |
|
52 |
+#define membar() asm volatile ("" : : : "memory") /* gcc do not cache barrier*/ |
|
53 |
+#define membar_read() membar() |
|
54 |
+#define membar_write() membar() |
|
55 |
+#else /* SMP */ |
|
56 |
+#warning SMP not supported for arm atomic ops, try compiling with -DNOSMP |
|
57 |
+/* fall back to default lock based barriers (don't define HAVE_ASM...) */ |
|
58 |
+#endif /* NOSMP */ |
|
59 |
+ |
|
60 |
+ |
|
61 |
+#ifdef __CPU_arm6 |
|
62 |
+ |
|
63 |
+ |
|
64 |
+#define HAVE_ASM_INLINE_ATOMIC_OPS |
|
65 |
+ |
|
66 |
+/* hack to get some membars */ |
|
67 |
+#ifndef NOSMP |
|
68 |
+#include "atomic_unknown.h" |
|
69 |
+#endif |
|
70 |
+ |
|
71 |
+/* main asm block |
|
72 |
+ * use %0 as input and write the output in %1*/ |
|
73 |
+#define ATOMIC_ASM_OP(op) \ |
|
74 |
+ "1: ldrex %0, [%3] \n\t" \ |
|
75 |
+ " " op "\n\t" \ |
|
76 |
+ " strex %0, %1, [%3] \n\t" \ |
|
77 |
+ " cmp %0, #0 \n\t" \ |
|
78 |
+ " bne 1b \n\t" |
|
79 |
+ |
|
80 |
+/* same as above but writes %4 instead of %1, and %0 will contain |
|
81 |
+ * the prev. val*/ |
|
82 |
+#define ATOMIC_ASM_OP2(op) \ |
|
83 |
+ "1: ldrex %0, [%3] \n\t" \ |
|
84 |
+ " " op "\n\t" \ |
|
85 |
+ " strex %1, %4, [%3] \n\t" \ |
|
86 |
+ " cmp %1, #0 \n\t" \ |
|
87 |
+ " bne 1b \n\t" |
|
88 |
+ |
|
89 |
+/* no extra param, %0 contains *var, %1 should contain the result */ |
|
90 |
+#define ATOMIC_FUNC_DECL(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \ |
|
91 |
+ inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var) \ |
|
92 |
+ { \ |
|
93 |
+ P_TYPE ret, tmp; \ |
|
94 |
+ asm volatile( \ |
|
95 |
+ ATOMIC_ASM_OP(OP) \ |
|
96 |
+ : "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var) : "cc" \ |
|
97 |
+ ); \ |
|
98 |
+ return RET_EXPR; \ |
|
99 |
+ } |
|
100 |
+ |
|
101 |
+/* one extra param in %4 */ |
|
102 |
+#define ATOMIC_FUNC_DECL1(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \ |
|
103 |
+ inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \ |
|
104 |
+ P_TYPE v) \ |
|
105 |
+ { \ |
|
106 |
+ P_TYPE ret, tmp; \ |
|
107 |
+ asm volatile( \ |
|
108 |
+ ATOMIC_ASM_OP(OP) \ |
|
109 |
+ : "=&r"(tmp), "=&r"(ret), "=m"(*var) : "r"(var), "r"(v) : "cc" \ |
|
110 |
+ ); \ |
|
111 |
+ return RET_EXPR; \ |
|
112 |
+ } |
|
113 |
+ |
|
114 |
+ |
|
115 |
+/* as above, but %4 should contain the result, and %0 is returned*/ |
|
116 |
+#define ATOMIC_FUNC_DECL2(NAME, OP, P_TYPE, RET_TYPE, RET_EXPR) \ |
|
117 |
+ inline static RET_TYPE atomic_##NAME##_##P_TYPE (volatile P_TYPE *var, \ |
|
118 |
+ P_TYPE v) \ |
|
119 |
+ { \ |
|
120 |
+ P_TYPE ret, tmp; \ |
|
121 |
+ asm volatile( \ |
|
122 |
+ ATOMIC_ASM_OP2(OP) \ |
|
123 |
+ : "=&r"(ret), "=&r"(tmp), "=m"(*var) : "r"(var), "r"(v) : "cc" \ |
|
124 |
+ ); \ |
|
125 |
+ return RET_EXPR; \ |
|
126 |
+ } |
|
127 |
+ |
|
128 |
+ |
|
129 |
+ |
|
130 |
+ATOMIC_FUNC_DECL(inc, "add %1, %0, #1", int, void, /* no return */ ) |
|
131 |
+ATOMIC_FUNC_DECL(dec, "sub %1, %0, #1", int, void, /* no return */ ) |
|
132 |
+ATOMIC_FUNC_DECL1(and, "and %1, %0, %4", int, void, /* no return */ ) |
|
133 |
+ATOMIC_FUNC_DECL1(or, "orr %1, %0, %4", int, void, /* no return */ ) |
|
134 |
+ATOMIC_FUNC_DECL(inc_and_test, "add %1, %0, #1", int, int, ret ) |
|
135 |
+ATOMIC_FUNC_DECL(dec_and_test, "sub %1, %0, #1", int, int, ret ) |
|
136 |
+ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , int, int, ret) |
|
137 |
+ |
|
138 |
+ATOMIC_FUNC_DECL(inc, "add %1, %0, #1", long, void, /* no return */ ) |
|
139 |
+ATOMIC_FUNC_DECL(dec, "sub %1, %0, #1", long, void, /* no return */ ) |
|
140 |
+ATOMIC_FUNC_DECL1(and, "and %1, %0, %4", long, void, /* no return */ ) |
|
141 |
+ATOMIC_FUNC_DECL1(or, "orr %1, %0, %4", long, void, /* no return */ ) |
|
142 |
+ATOMIC_FUNC_DECL(inc_and_test, "add %1, %0, #1", long, long, ret ) |
|
143 |
+ATOMIC_FUNC_DECL(dec_and_test, "sub %1, %0, #1", long, long, ret ) |
|
144 |
+ATOMIC_FUNC_DECL2(get_and_set, /* no extra op needed */ , long, long, ret) |
|
145 |
+ |
|
146 |
+#define atomic_inc(var) atomic_inc_int(&(var)->val) |
|
147 |
+#define atomic_dec(var) atomic_dec_int(&(var)->val) |
|
148 |
+#define atomic_and(var, mask) atomic_and_int(&(var)->val, (mask)) |
|
149 |
+#define atomic_or(var, mask) atomic_or_int(&(var)->val, (mask)) |
|
150 |
+#define atomic_dec_and_test(var) atomic_dec_and_test_int(&(var)->val) |
|
151 |
+#define atomic_inc_and_test(var) atomic_inc_and_test_int(&(var)->val) |
|
152 |
+#define atomic_get_and_set(var, i) atomic_get_and_set_int(&(var)->val, i) |
|
153 |
+ |
|
154 |
+ |
|
155 |
+/* with integrated membar */ |
|
156 |
+ |
|
157 |
+#define mb_atomic_set_int(v, i) \ |
|
158 |
+ do{ \ |
|
159 |
+ membar(); \ |
|
160 |
+ atomic_set_int(v, i); \ |
|
161 |
+ }while(0) |
|
162 |
+ |
|
163 |
+ |
|
164 |
+ |
|
165 |
+inline static int mb_atomic_get_int(volatile int* v) |
|
166 |
+{ |
|
167 |
+ membar(); |
|
168 |
+ return atomic_get_int(v); |
|
169 |
+} |
|
170 |
+ |
|
171 |
+ |
|
172 |
+#define mb_atomic_inc_int(v) \ |
|
173 |
+ do{ \ |
|
174 |
+ membar(); \ |
|
175 |
+ atomic_inc_int(v); \ |
|
176 |
+ }while(0) |
|
177 |
+ |
|
178 |
+#define mb_atomic_dec_int(v) \ |
|
179 |
+ do{ \ |
|
180 |
+ membar(); \ |
|
181 |
+ atomic_dec_int(v); \ |
|
182 |
+ }while(0) |
|
183 |
+ |
|
184 |
+#define mb_atomic_or_int(v, m) \ |
|
185 |
+ do{ \ |
|
186 |
+ membar(); \ |
|
187 |
+ atomic_or_int(v, m); \ |
|
188 |
+ }while(0) |
|
189 |
+ |
|
190 |
+#define mb_atomic_and_int(v, m) \ |
|
191 |
+ do{ \ |
|
192 |
+ membar(); \ |
|
193 |
+ atomic_and_int(v, m); \ |
|
194 |
+ }while(0) |
|
195 |
+ |
|
196 |
+inline static int mb_atomic_inc_and_test_int(volatile int* v) |
|
197 |
+{ |
|
198 |
+ membar(); |
|
199 |
+ return atomic_inc_and_test_int(v); |
|
200 |
+} |
|
201 |
+ |
|
202 |
+inline static int mb_atomic_dec_and_test_int(volatile int* v) |
|
203 |
+{ |
|
204 |
+ membar(); |
|
205 |
+ return atomic_dec_and_test_int(v); |
|
206 |
+} |
|
207 |
+ |
|
208 |
+ |
|
209 |
+inline static int mb_atomic_get_and_set_int(volatile int* v, int i) |
|
210 |
+{ |
|
211 |
+ membar(); |
|
212 |
+ return atomic_get_and_set_int(v, i); |
|
213 |
+} |
|
214 |
+ |
|
215 |
+ |
|
216 |
+ |
|
217 |
+#define mb_atomic_set_long(v, i) \ |
|
218 |
+ do{ \ |
|
219 |
+ membar(); \ |
|
220 |
+ atomic_set_long(v, i); \ |
|
221 |
+ }while(0) |
|
222 |
+ |
|
223 |
+ |
|
224 |
+ |
|
225 |
+inline static long mb_atomic_get_long(volatile long* v) |
|
226 |
+{ |
|
227 |
+ membar(); |
|
228 |
+ return atomic_get_long(v); |
|
229 |
+} |
|
230 |
+ |
|
231 |
+ |
|
232 |
+#define mb_atomic_inc_long(v) \ |
|
233 |
+ do{ \ |
|
234 |
+ membar(); \ |
|
235 |
+ atomic_inc_long(v); \ |
|
236 |
+ }while(0) |
|
237 |
+ |
|
238 |
+ |
|
239 |
+#define mb_atomic_dec_long(v) \ |
|
240 |
+ do{ \ |
|
241 |
+ membar(); \ |
|
242 |
+ atomic_dec_long(v); \ |
|
243 |
+ }while(0) |
|
244 |
+ |
|
245 |
+#define mb_atomic_or_long(v, m) \ |
|
246 |
+ do{ \ |
|
247 |
+ membar(); \ |
|
248 |
+ atomic_or_long(v, m); \ |
|
249 |
+ }while(0) |
|
250 |
+ |
|
251 |
+#define mb_atomic_and_long(v, m) \ |
|
252 |
+ do{ \ |
|
253 |
+ membar(); \ |
|
254 |
+ atomic_and_long(v, m); \ |
|
255 |
+ }while(0) |
|
256 |
+ |
|
257 |
+inline static long mb_atomic_inc_and_test_long(volatile long* v) |
|
258 |
+{ |
|
259 |
+ membar(); |
|
260 |
+ return atomic_inc_and_test_long(v); |
|
261 |
+} |
|
262 |
+ |
|
263 |
+inline static long mb_atomic_dec_and_test_long(volatile long* v) |
|
264 |
+{ |
|
265 |
+ membar(); |
|
266 |
+ return atomic_dec_and_test_long(v); |
|
267 |
+} |
|
268 |
+ |
|
269 |
+ |
|
270 |
+inline static long mb_atomic_get_and_set_long(volatile long* v, long l) |
|
271 |
+{ |
|
272 |
+ membar(); |
|
273 |
+ return atomic_get_and_set_long(v, l); |
|
274 |
+} |
|
275 |
+ |
|
276 |
+ |
|
277 |
+#define mb_atomic_inc(var) mb_atomic_inc_int(&(var)->val) |
|
278 |
+#define mb_atomic_dec(var) mb_atomic_dec_int(&(var)->val) |
|
279 |
+#define mb_atomic_and(var, mask) mb_atomic_and_int(&(var)->val, (mask)) |
|
280 |
+#define mb_atomic_or(var, mask) mb_atomic_or_int(&(var)->val, (mask)) |
|
281 |
+#define mb_atomic_dec_and_test(var) mb_atomic_dec_and_test_int(&(var)->val) |
|
282 |
+#define mb_atomic_inc_and_test(var) mb_atomic_inc_and_test_int(&(var)->val) |
|
283 |
+#define mb_atomic_get(var) mb_atomic_get_int(&(var)->val) |
|
284 |
+#define mb_atomic_set(var, i) mb_atomic_set_int(&(var)->val, i) |
|
285 |
+#define mb_atomic_get_and_set(var, i) mb_atomic_get_and_set_int(&(var)->val, i) |
|
286 |
+ |
|
287 |
+ |
|
288 |
+#else /* ! __CPU_arm6 => __CPU_arm */ |
|
289 |
+ |
|
290 |
+/* no atomic ops for v <6 , only SWP supported |
|
291 |
+ * Atomic ops could be implemented if one bit is sacrificed and used like |
|
292 |
+ * a spinlock, e.g: |
|
293 |
+ * mov %r0, #0x1 |
|
294 |
+ * 1: swp %r1, %r0, [&atomic_val] |
|
295 |
+ * if (%r1 & 0x1) goto 1 # wait if first bit is 1 |
|
296 |
+ * %r1>>=1 # restore the value (only 31 bits can be used ) |
|
297 |
+ * %r1=op (%r1, ...) |
|
298 |
+ * %r1<<=1 # shift back the value, such that the first bit is 0 |
|
299 |
+ * str %r1, [&atomic_val] # write the value |
|
300 |
+ * |
|
301 |
+ * However only 31 bits could be used (=> atomic_*_int and atomic_*_long |
|
302 |
+ * would still have to be lock based, since in these cases we guarantee all |
|
303 |
+ * the bits) and I'm not sure there would be a significant performance |
|
304 |
+ * benefit when compared with the fallback lock based version: |
|
305 |
+ * lock(atomic_lock); |
|
306 |
+ * atomic_val=op(*atomic_val, ...) |
|
307 |
+ * unlock(atomic_lock); |
|
308 |
+ * |
|
309 |
+ * -- andrei |
|
310 |
+ */ |
|
311 |
+ |
|
312 |
+#endif /* __CPU_arm6 */ |
|
313 |
+ |
|
314 |
+ |
|
315 |
+#endif |
... | ... |
@@ -86,11 +86,13 @@ |
86 | 86 |
* safe |
87 | 87 |
* __CPU_i386, __CPU_x86_64, X86_OOSTORE - see |
88 | 88 |
* atomic/atomic_x86.h |
89 |
- * __CPU_mips, __CPU_mip2, __CPU_mip64, MIPS_HAS_LLSC - see |
|
89 |
+ * __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see |
|
90 | 90 |
* atomic/atomic_mip2.h |
91 | 91 |
* __CPU_ppc, __CPU_ppc64 - see atomic/atomic_ppc.h |
92 | 92 |
* __CPU_sparc - see atomic/atomic_sparc.h |
93 | 93 |
* __CPU_sparc64, SPARC64_MODE - see atomic/atomic_sparc64.h |
94 |
+ * __CPU_arm, __CPU_arm6 - see atomic/atomic_arm.h |
|
95 |
+ * __CPU_alpha - see atomic/atomic_alpha.h |
|
94 | 96 |
*/ |
95 | 97 |
/* |
96 | 98 |
* History: |
... | ... |
@@ -147,6 +149,14 @@ inline static int atomic_get(atomic_t *v) |
147 | 149 |
|
148 | 150 |
#include "atomic/atomic_sparc.h" |
149 | 151 |
|
152 |
+#elif defined __CPU_arm || defined __CPU_arm6 |
|
153 |
+ |
|
154 |
+#include "atomic/atomic_arm.h" |
|
155 |
+ |
|
156 |
+#elif defined __CPU_alpha |
|
157 |
+ |
|
158 |
+#include "atomic/atomic_alpha.h" |
|
159 |
+ |
|
150 | 160 |
#endif /* __CPU_xxx => no known cpu */ |
151 | 161 |
|
152 | 162 |
#endif /* CC_GCC_LIKE_ASM */ |
... | ... |
@@ -97,7 +97,7 @@ inline static int tsl(fl_lock_t* lock) |
97 | 97 |
: "=r"(val) : "r"(lock):"memory" |
98 | 98 |
); |
99 | 99 |
|
100 |
-#elif defined __CPU_arm |
|
100 |
+#elif defined __CPU_arm || defined __CPU_arm6 |
|
101 | 101 |
asm volatile( |
102 | 102 |
"# here \n\t" |
103 | 103 |
"swpb %0, %1, [%2] \n\t" |
... | ... |
@@ -121,7 +121,8 @@ inline static int tsl(fl_lock_t* lock) |
121 | 121 |
: "r"(1), "b" (lock) : |
122 | 122 |
"memory", "cc" |
123 | 123 |
); |
124 |
-#elif defined __CPU_mips2 || ( defined __CPU_mips && defined MIPS_HAS_LLSC ) |
|
124 |
+#elif defined __CPU_mips2 || ( defined __CPU_mips && defined MIPS_HAS_LLSC ) \ |
|
125 |
+ || defined __CPU_mips64 |
|
125 | 126 |
long tmp; |
126 | 127 |
|
127 | 128 |
asm volatile( |
... | ... |
@@ -156,8 +157,7 @@ inline static int tsl(fl_lock_t* lock) |
156 | 157 |
" mb \n\t" |
157 | 158 |
"2: \n\t" |
158 | 159 |
:"=&r" (val), "=m"(*lock), "=r"(tmp) |
159 |
- :"1"(*lock) /* warning on gcc 3.4: replace it with m or remove |
|
160 |
- it and use +m in the input line ? */ |
|
160 |
+ :"m"(*lock) |
|
161 | 161 |
: "memory" |
162 | 162 |
); |
163 | 163 |
#else |
... | ... |
@@ -204,7 +204,7 @@ inline static void release_lock(fl_lock_t* lock) |
204 | 204 |
: "r" (lock) |
205 | 205 |
: "memory" |
206 | 206 |
); |
207 |
-#elif defined __CPU_arm |
|
207 |
+#elif defined __CPU_arm || defined __CPU_arm6 |
|
208 | 208 |
asm volatile( |
209 | 209 |
" str %0, [%1] \n\r" |
210 | 210 |
: /*no outputs*/ |
... | ... |
@@ -223,7 +223,8 @@ inline static void release_lock(fl_lock_t* lock) |
223 | 223 |
: "r"(0), "b" (lock) |
224 | 224 |
: "memory" |
225 | 225 |
); |
226 |
-#elif defined __CPU_mips2 || ( defined __CPU_mips && defined MIPS_HAS_LLSC ) |
|
226 |
+#elif defined __CPU_mips2 || ( defined __CPU_mips && defined MIPS_HAS_LLSC ) \ |
|
227 |
+ || defined __CPU_mips64 |
|
227 | 228 |
asm volatile( |
228 | 229 |
".set push \n\t" |
229 | 230 |
".set noreorder \n\t" |