Browse code

- improved qm_malloc (added free lst hashtable) - quick i386 locking (in the dirty hack state)

Andrei Pelinescu-Onciul authored on 05/02/2002 01:29:27
Showing 9 changed files
... ...
@@ -53,7 +53,9 @@ ARCH = $(shell uname -s)
53 53
 
54 54
 DEFS+= -DNAME='"$(NAME)"' -DVERSION='"$(RELEASE)"' -DARCH='"$(ARCH)"' \
55 55
 	 -DDNS_IP_HACK  -DPKG_MALLOC -DSHM_MEM  -DSHM_MMAP \
56
-	-DVQ_MALLOC -DUSE_SYNONIM #-DBRUT_HACK #-DEXTRA_DEBUG #-DSTATIC_TM
56
+	 -DUSE_SYNONIM \
57
+	 -DFAST_LOCK -Di386
58
+	 #-DBRUT_HACK #-DEXTRA_DEBUG #-DSTATIC_TM
57 59
 	#-DEXTRA_DEBUG -DBRUT_HACK \
58 60
 	#-DVQ_MALLOC  -DDBG_LOCK  #-DSTATS
59 61
 	  #-DDBG_QM_MALLOC #-DVQ_MALLOC #-DNO_DEBUG
60 62
new file mode 100644
... ...
@@ -0,0 +1,66 @@
0
+/*
1
+ * fast arhitecture specific locking
2
+ *
3
+ * $Id$
4
+ *
5
+ * 
6
+ */
7
+
8
+
9
+
10
+#ifndef fastlock_h
11
+#define fastlock_h
12
+
13
+
14
+#include <sched.h>
15
+
16
+
17
+#ifdef i386
18
+
19
+
20
+typedef  volatile int lock_t;
21
+
22
+
23
+
24
+#define init_lock( l ) (l)=0
25
+
26
+
27
+
28
+/*test and set lock, ret 1 if lock held by someone else, 0 otherwise*/
29
+inline static int tsl(lock_t* lock)
30
+{
31
+	volatile char val;
32
+	
33
+	val=1;
34
+	asm volatile( 
35
+		" xchg %b0, %1" : "=q" (val), "=m" (*lock) : "0" (val) : "memory"
36
+	);
37
+	return val;
38
+}
39
+
40
+
41
+
42
+inline static void get_lock(lock_t* lock)
43
+{
44
+	
45
+	while(tsl(lock)){
46
+		sched_yield();
47
+	}
48
+}
49
+
50
+
51
+
52
+inline static void release_lock(lock_t* lock)
53
+{
54
+	char val;
55
+
56
+	val=0;
57
+	asm volatile(
58
+		" xchg %b0, %1" : "=q" (val), "=m" (*lock) : "0" (val) : "memory"
59
+	);
60
+}
61
+
62
+#endif
63
+
64
+
65
+#endif
... ...
@@ -26,6 +26,34 @@
26 26
 #define PREV_FRAG_END(f) \
27 27
 	((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end)))
28 28
 
29
+
30
+#define FRAG_OVERHEAD	(sizeof(struct qm_frag)+sizeof(struct qm_frag_end))
31
+
32
+
33
+#define ROUNDUP(s)		(((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s))
34
+#define ROUNDDOWN(s)	(((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s))
35
+
36
+
37
+
38
+	/* finds the hash value for s, s=ROUNDTO multiple*/
39
+#define GET_HASH(s)   ( ((s)<QM_MALLOC_OPTIMIZE)?(s)/ROUNDTO: \
40
+						QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
41
+							QM_MALLOC_OPTIMIZE_FACTOR+1 )
42
+
43
+
44
+/* computes hash number for big buckets*/
45
+inline static int big_hash_idx(int s)
46
+{
47
+	int idx;
48
+	/* s is rounded => s = k*2^n (ROUNDTO=2^n) 
49
+	 * index= i such that 2^i > s >= 2^(i-1)
50
+	 *
51
+	 * => index = number of the first non null bit in s*/
52
+	for (idx=31; !(s&0x80000000) ; s<<=1, idx--);
53
+	return idx;
54
+}
55
+
56
+
29 57
 #ifdef DBG_QM_MALLOC
30 58
 #define ST_CHECK_PATTERN   0xf0f0f0f0
31 59
 #define END_CHECK_PATTERN1 0xc0c0c0c0
... ...
@@ -60,6 +88,27 @@ static  void qm_debug_frag(struct qm_block* qm, struct qm_frag* f)
60 60
 
61 61
 
62 62
 
63
+static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
64
+{
65
+	struct qm_frag* f;
66
+	struct qm_frag* prev;
67
+	int hash;
68
+	
69
+	hash=GET_HASH(frag->size);
70
+	for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head);
71
+			f=f->u.nxt_free){
72
+		if (frag->size <= f->size) break;
73
+	}
74
+	/*insert it here*/
75
+	prev=FRAG_END(f)->prev_free;
76
+	prev->u.nxt_free=frag;
77
+	FRAG_END(frag)->prev_free=prev;
78
+	frag->u.nxt_free=f;
79
+	FRAG_END(f)->prev_free=frag;
80
+}
81
+
82
+
83
+
63 84
 /* init malloc and return a qm_block*/
64 85
 struct qm_block* qm_malloc_init(char* address, unsigned int size)
65 86
 {
... ...
@@ -67,17 +116,24 @@ struct qm_block* qm_malloc_init(char* address, unsigned int size)
67 67
 	char* end;
68 68
 	struct qm_block* qm;
69 69
 	unsigned int init_overhead;
70
+	int h;
70 71
 	
71 72
 	/* make address and size multiple of 8*/
72
-	start=(char*)( ((unsigned int)address%8)?((unsigned int)address+8)/8*8:
73
-			(unsigned int)address);
73
+	start=(char*)ROUNDUP((unsigned int) address);
74
+	printf("qm_malloc_init: QM_OPTIMIZE=%d, /ROUNDTO=%d\n",
75
+			QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO);
76
+	printf("qm_malloc_init: QM_HASH_SIZE=%d, qm_block size=%d\n",
77
+			QM_HASH_SIZE, sizeof(struct qm_block));
78
+	printf("qm_malloc_init(%x, %d), start=%x\n", address, size, start);
74 79
 	if (size<start-address) return 0;
75 80
 	size-=(start-address);
76
-	if (size <8) return 0;
77
-	size=(size%8)?(size-8)/8*8:size;
81
+	if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
82
+	size=ROUNDDOWN(size);
78 83
 	
79 84
 	init_overhead=sizeof(struct qm_block)+sizeof(struct qm_frag)+
80 85
 		sizeof(struct qm_frag_end);
86
+	printf("qm_malloc_init: size= %d, init_overhead=%d\n", size, init_overhead);
87
+	
81 88
 	if (size < init_overhead)
82 89
 	{
83 90
 		/* not enough mem to create our control structures !!!*/
... ...
@@ -95,42 +151,34 @@ struct qm_block* qm_malloc_init(char* address, unsigned int size)
95 95
 	qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end));
96 96
 	/* init initial fragment*/
97 97
 	qm->first_frag->size=size;
98
-	qm->first_frag->u.nxt_free=&(qm->free_lst);
99 98
 	qm->last_frag_end->size=size;
100
-	qm->last_frag_end->prev_free=&(qm->free_lst);
99
+	
101 100
 #ifdef DBG_QM_MALLOC
102 101
 	qm->first_frag->check=ST_CHECK_PATTERN;
103 102
 	qm->last_frag_end->check1=END_CHECK_PATTERN1;
104 103
 	qm->last_frag_end->check2=END_CHECK_PATTERN2;
105 104
 #endif
106
-	/* init free_lst* */
107
-	qm->free_lst.u.nxt_free=qm->first_frag;
108
-	qm->free_lst_end.prev_free=qm->first_frag;
109
-	qm->free_lst.size=0;
110
-	qm->free_lst_end.size=0;
105
+	/* init free_hash* */
106
+	for (h=0; h<QM_HASH_SIZE;h++){
107
+		qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head);
108
+		qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head);
109
+		qm->free_hash[h].head.size=0;
110
+		qm->free_hash[h].tail.size=0;
111
+	}
112
+	
113
+	/* link initial fragment into the free list*/
114
+	
115
+	qm_insert_free(qm, qm->first_frag);
116
+	
117
+	/*qm->first_frag->u.nxt_free=&(qm->free_lst);
118
+	  qm->last_frag_end->prev_free=&(qm->free_lst);
119
+	*/
111 120
 	
112 121
 	
113 122
 	return qm;
114 123
 }
115 124
 
116 125
 
117
-static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
118
-{
119
-	struct qm_frag* f;
120
-	struct qm_frag* prev;
121
-
122
-	for(f=qm->free_lst.u.nxt_free; f!=&(qm->free_lst); f=f->u.nxt_free){
123
-		if (frag->size < f->size) break;
124
-	}
125
-	/*insert it here*/
126
-	prev=FRAG_END(f)->prev_free;
127
-	prev->u.nxt_free=frag;
128
-	FRAG_END(frag)->prev_free=prev;
129
-	frag->u.nxt_free=f;
130
-	FRAG_END(f)->prev_free=frag;
131
-}
132
-
133
-
134 126
 
135 127
 static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
136 128
 {
... ...
@@ -148,6 +196,28 @@ static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
148 148
 
149 149
 
150 150
 
151
+static inline struct qm_frag* qm_find_free(struct qm_block* qm, 
152
+										unsigned int size)
153
+{
154
+	int hash;
155
+	struct qm_frag* f;
156
+
157
+	for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){
158
+		for (f=qm->free_hash[hash].head.u.nxt_free; 
159
+					f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){
160
+	#ifdef DBG_QM_MALLOC
161
+			list_cntr++;
162
+	#endif
163
+			if (f->size>=size) return f;
164
+		}
165
+	/*try in a bigger bucket*/
166
+	}
167
+	/* not found */
168
+	return 0;
169
+}
170
+
171
+
172
+
151 173
 #ifdef DBG_QM_MALLOC
152 174
 void* qm_malloc(struct qm_block* qm, unsigned int size, char* file, char* func,
153 175
 					unsigned int line)
... ...
@@ -159,7 +229,6 @@ void* qm_malloc(struct qm_block* qm, unsigned int size)
159 159
 	struct qm_frag_end* end;
160 160
 	struct qm_frag* n;
161 161
 	unsigned int rest;
162
-	unsigned int overhead;
163 162
 	
164 163
 #ifdef DBG_QM_MALLOC
165 164
 	unsigned int list_cntr;
... ...
@@ -169,69 +238,63 @@ void* qm_malloc(struct qm_block* qm, unsigned int size)
169 169
 			line);
170 170
 #endif
171 171
 	/*size must be a multiple of 8*/
172
-	size=(size%8)?(size+8)/8*8:size;
172
+	size=ROUNDUP(size);
173 173
 	if (size>(qm->size-qm->real_used)) return 0;
174
-	if (qm->free_lst.u.nxt_free==&(qm->free_lst)) return 0;
175 174
 	/*search for a suitable free frag*/
176
-	for (f=qm->free_lst.u.nxt_free; f!=&(qm->free_lst); f=f->u.nxt_free){
177
-#ifdef DBG_QM_MALLOC
178
-		list_cntr++;
179
-#endif
180
-		
181
-		if (f->size>=size){
182
-			/* we found it!*/
183
-			/*detach it from the free list*/
175
+	f=qm_find_free(qm, size);
176
+
177
+	if ((f=qm_find_free(qm, size))!=0){
178
+		/* we found it!*/
179
+		/*detach it from the free list*/
184 180
 #ifdef DBG_QM_MALLOC
185 181
 			qm_debug_frag(qm, f);
186 182
 #endif
187
-			qm_detach_free(qm, f);
188
-			/*mark it as "busy"*/
189
-			f->u.is_free=0;
190
-			
191
-			/*see if we'll use full frag, or we'll split it in 2*/
192
-			rest=f->size-size;
193
-			overhead=sizeof(struct qm_frag)+sizeof(struct qm_frag_end);
194
-			if (rest>overhead){
195
-				f->size=size;
196
-				/*split the fragment*/
197
-				end=FRAG_END(f);
198
-				end->size=size;
199
-				n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
200
-				n->size=rest-overhead;
201
-				FRAG_END(n)->size=n->size;
202
-				qm->real_used+=overhead;
183
+		qm_detach_free(qm, f);
184
+		/*mark it as "busy"*/
185
+		f->u.is_free=0;
186
+		
187
+		/*see if we'll use full frag, or we'll split it in 2*/
188
+		rest=f->size-size;
189
+		if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
190
+			f->size=size;
191
+			/*split the fragment*/
192
+			end=FRAG_END(f);
193
+			end->size=size;
194
+			n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
195
+			n->size=rest-FRAG_OVERHEAD;
196
+			FRAG_END(n)->size=n->size;
197
+			qm->real_used+=FRAG_OVERHEAD;
203 198
 #ifdef DBG_QM_MALLOC
204
-				end->check1=END_CHECK_PATTERN1;
205
-				end->check2=END_CHECK_PATTERN2;
206
-				/* frag created by malloc, mark it*/
207
-				n->file=file;
208
-				n->func="frag. from qm_malloc";
209
-				n->line=line;
210
-				n->check=ST_CHECK_PATTERN;
211
-/*				FRAG_END(n)->check1=END_CHECK_PATTERN1;
212
-				FRAG_END(n)->check2=END_CHECK_PATTERN2; */
199
+			end->check1=END_CHECK_PATTERN1;
200
+			end->check2=END_CHECK_PATTERN2;
201
+			/* frag created by malloc, mark it*/
202
+			n->file=file;
203
+			n->func="frag. from qm_malloc";
204
+			n->line=line;
205
+			n->check=ST_CHECK_PATTERN;
206
+/*			FRAG_END(n)->check1=END_CHECK_PATTERN1;
207
+			FRAG_END(n)->check2=END_CHECK_PATTERN2; */
213 208
 #endif
214
-				/* reinsert n in free list*/
215
-				qm_insert_free(qm, n);
216
-			}else{
217
-				/* we cannot split this fragment any more => alloc all of it*/
218
-			}
219
-			qm->real_used+=f->size;
220
-			qm->used+=f->size;
221
-			if (qm->max_real_used<qm->real_used)
222
-				qm->max_real_used=qm->real_used;
209
+			/* reinsert n in free list*/
210
+			qm_insert_free(qm, n);
211
+		}else{
212
+			/* we cannot split this fragment any more => alloc all of it*/
213
+		}
214
+		qm->real_used+=f->size;
215
+		qm->used+=f->size;
216
+		if (qm->max_real_used<qm->real_used)
217
+			qm->max_real_used=qm->real_used;
223 218
 #ifdef DBG_QM_MALLOC
224
-			f->file=file;
225
-			f->func=func;
226
-			f->line=line;
227
-			f->check=ST_CHECK_PATTERN;
219
+		f->file=file;
220
+		f->func=func;
221
+		f->line=line;
222
+		f->check=ST_CHECK_PATTERN;
228 223
 		/*  FRAG_END(f)->check1=END_CHECK_PATTERN1;
229 224
 			FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
230
-	DBG("qm_malloc(%x, %d) returns address %x on %d -th hit\n", qm, size,
225
+		DBG("qm_malloc(%x, %d) returns address %x on %d -th hit\n", qm, size,
231 226
 			(char*)f+sizeof(struct qm_frag), list_cntr );
232 227
 #endif
233
-			return (char*)f+sizeof(struct qm_frag);
234
-		}
228
+		return (char*)f+sizeof(struct qm_frag);
235 229
 	}
236 230
 	return 0;
237 231
 }
... ...
@@ -249,7 +312,6 @@ void qm_free(struct qm_block* qm, void* p)
249 249
 	struct qm_frag* prev;
250 250
 	struct qm_frag* next;
251 251
 	struct qm_frag_end *end;
252
-	unsigned int overhead;
253 252
 	unsigned int size;
254 253
 
255 254
 #ifdef DBG_QM_MALLOC
... ...
@@ -261,7 +323,7 @@ void qm_free(struct qm_block* qm, void* p)
261 261
 	}
262 262
 #endif
263 263
 	if (p==0) {
264
-		DBG("WARNING:qm_free: free(0) called\n");
264
+		LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
265 265
 		return;
266 266
 	}
267 267
 	prev=next=0;
... ...
@@ -277,7 +339,6 @@ void qm_free(struct qm_block* qm, void* p)
277 277
 	DBG("qm_free: freeing block alloc'ed from %s: %s(%d)\n", f->file, f->func,
278 278
 			f->line);
279 279
 #endif
280
-	overhead=sizeof(struct qm_frag)+sizeof(struct qm_frag_end);
281 280
 	next=FRAG_NEXT(f);
282 281
 	size=f->size;
283 282
 	qm->used-=size;
... ...
@@ -285,11 +346,15 @@ void qm_free(struct qm_block* qm, void* p)
285 285
 #ifdef DBG_QM_MALLOC
286 286
 	qm_debug_frag(qm, f);
287 287
 #endif
288
+
289
+#ifdef QM_JOIN_FREE
290
+	/* join packets if possible*/
291
+
288 292
 	if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
289 293
 		/* join */
290 294
 		qm_detach_free(qm, next);
291
-		size+=next->size+overhead;
292
-		qm->real_used-=overhead;
295
+		size+=next->size+FRAG_OVERHEAD;
296
+		qm->real_used-=FRAG_OVERHEAD;
293 297
 	}
294 298
 	
295 299
 	if (f > qm->first_frag){
... ...
@@ -302,13 +367,14 @@ void qm_free(struct qm_block* qm, void* p)
302 302
 		if (prev->u.is_free){
303 303
 			/*join*/
304 304
 			qm_detach_free(qm, prev);
305
-			size+=prev->size+overhead;
306
-			qm->real_used-=overhead;
305
+			size+=prev->size+FRAG_OVERHEAD;
306
+			qm->real_used-=FRAG_OVERHEAD;
307 307
 			f=prev;
308 308
 		}
309 309
 	}
310 310
 	f->size=size;
311 311
 	FRAG_END(f)->size=f->size;
312
+#endif /* QM_JOIN_FREE*/
312 313
 #ifdef DBG_QM_MALLOC
313 314
 	f->file=file;
314 315
 	f->func=func;
... ...
@@ -322,7 +388,8 @@ void qm_free(struct qm_block* qm, void* p)
322 322
 void qm_status(struct qm_block* qm)
323 323
 {
324 324
 	struct qm_frag* f;
325
-	int i;
325
+	int i,j;
326
+	int h;
326 327
 
327 328
 	LOG(L_INFO, "qm_status (%x):\n", qm);
328 329
 	if (!qm) return;
... ...
@@ -345,15 +412,19 @@ void qm_status(struct qm_block* qm)
345 345
 				f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
346 346
 #endif
347 347
 	}
348
-	DBG("dumping free list:\n");
349
-	for (f=qm->free_lst.u.nxt_free,i=0; f!=&(qm->free_lst); f=f->u.nxt_free,
350
-			i++){
351
-		DBG("    %3d. %c  address=%x  size=%d\n", i, (f->u.is_free)?'a':'N',
352
-				(char*)f+sizeof(struct qm_frag), f->size);
348
+	LOG(L_INFO, "dumping free list:\n");
349
+	for(h=0,i=0;h<QM_HASH_SIZE;h++){
350
+		
351
+		for (f=qm->free_hash[h].head.u.nxt_free,j=0; 
352
+				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
353
+			LOG(L_INFO, "   %5d.[%3d:%3d] %c  address=%x  size=%d\n", i, h, j,
354
+					(f->u.is_free)?'a':'N',
355
+					(char*)f+sizeof(struct qm_frag), f->size);
353 356
 #ifdef DBG_QM_MALLOC
354
-		DBG("            %s from %s: %s(%d)\n", 
357
+			DBG("            %s from %s: %s(%d)\n", 
355 358
 				(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
356 359
 #endif
360
+		}
357 361
 	}
358 362
 	LOG(L_INFO, "-----------------------------\n");
359 363
 }
... ...
@@ -7,6 +7,28 @@
7 7
 #define q_malloc_h
8 8
 
9 9
 
10
+
11
+/* defs*/
12
+
13
+#define ROUNDTO		16 /* size we round to, must be = 2^n */
14
+#define MIN_FRAG_SIZE	ROUNDTO
15
+
16
+
17
+
18
+#define QM_MALLOC_OPTIMIZE_FACTOR 10 /*used below */
19
+#define QM_MALLOC_OPTIMIZE  (1<<QM_MALLOC_OPTIMIZE_FACTOR)
20
+								/* size to optimize for,
21
+									(most allocs < this size),
22
+									must be 2^k */
23
+
24
+#define QM_HASH_SIZE (QM_MALLOC_OPTIMIZE/ROUNDTO + \
25
+		(32-QM_MALLOC_OPTIMIZE_FACTOR)+1)
26
+
27
+/* hash structure:
28
+ * 0 .... QM_MALLOC_OPTIMIE/ROUNDTO  - small buckets, size increases with
29
+ *                            ROUNDTO from bucket to bucket
30
+ * +1 .... end -  size = 2^k, big buckets */
31
+
10 32
 struct qm_frag{
11 33
 	unsigned int size;
12 34
 	union{
... ...
@@ -31,6 +53,13 @@ struct qm_frag_end{
31 31
 };
32 32
 
33 33
 
34
+
35
+struct qm_frag_full{
36
+	struct qm_frag head;
37
+	struct qm_frag_end tail;
38
+};
39
+
40
+
34 41
 struct qm_block{
35 42
 	unsigned int size; /* total size */
36 43
 	unsigned int used; /* alloc'ed size*/
... ...
@@ -40,8 +69,8 @@ struct qm_block{
40 40
 	struct qm_frag* first_frag;
41 41
 	struct qm_frag_end* last_frag_end;
42 42
 	
43
-	struct qm_frag free_lst;
44
-	struct qm_frag_end free_lst_end;
43
+	struct qm_frag_full free_hash[QM_HASH_SIZE];
44
+	/*struct qm_frag_end free_lst_end;*/
45 45
 };
46 46
 
47 47
 
... ...
@@ -18,6 +18,10 @@
18 18
 
19 19
 #endif
20 20
 
21
+#ifdef FAST_LOCK
22
+#include "../fastlock.h"
23
+#endif
24
+
21 25
 
22 26
 /* define semun */
23 27
 #if defined(__GNU_LIBRARY__) && !defined(_SEM_SEMUN_UNDEFINED)
... ...
@@ -38,8 +42,12 @@
38 38
 static int shm_shmid=-1; /*shared memory id*/
39 39
 #endif
40 40
 
41
-
41
+#ifdef FAST_LOCK
42
+lock_t* mem_lock=0;
43
+#else
42 44
 int shm_semid=-1; /*semaphore id*/
45
+#endif
46
+
43 47
 static void* shm_mempool=(void*)-1;
44 48
 #ifdef VQ_MALLOC
45 49
 	struct vqm_block* shm_block;
... ...
@@ -153,6 +161,8 @@ int shm_mem_init()
153 153
 		shm_mem_destroy();
154 154
 		return -1;
155 155
 	}
156
+
157
+#ifndef FAST_LOCK
156 158
 	/* alloc a semaphore (for malloc)*/
157 159
 	shm_semid=semget(IPC_PRIVATE, 1, 0700);
158 160
 	if (shm_semid==-1){
... ...
@@ -170,6 +180,7 @@ int shm_mem_init()
170 170
 		shm_mem_destroy();
171 171
 		return -1;
172 172
 	}
173
+#endif
173 174
 	/* init it for malloc*/
174 175
 #	ifdef VQ_MALLOC
175 176
 		shm_block=vqm_malloc_init(shm_mempool, SHM_MEM_SIZE);
... ...
@@ -182,6 +193,11 @@ int shm_mem_init()
182 182
 		shm_mem_destroy();
183 183
 		return -1;
184 184
 	}
185
+#ifdef FAST_LOCK
186
+	mem_lock=shm_malloc_unsafe(sizeof(lock_t));
187
+	init_lock(*mem_lock);
188
+#endif
189
+	
185 190
 	DBG("shm_mem_init: success\n");
186 191
 	
187 192
 	return 0;
... ...
@@ -210,10 +226,12 @@ void shm_mem_destroy()
210 210
 		shm_shmid=-1;
211 211
 	}
212 212
 #endif
213
+#ifndef FAST_LOCK
213 214
 	if (shm_semid!=-1) {
214 215
 		semctl(shm_semid, 0, IPC_RMID, (union semun)0);
215 216
 		shm_semid=-1;
216 217
 	}
218
+#endif
217 219
 }
218 220
 
219 221
 
... ...
@@ -40,16 +40,30 @@
40 40
 #	define MY_FREE qm_free
41 41
 #	define MY_STATUS qm_status
42 42
 #endif
43
-extern int shm_semid;
43
+
44
+#ifdef FAST_LOCK
45
+#include "../fastlock.h"
46
+	
47
+	extern lock_t* mem_lock;
48
+#else
49
+extern  int shm_semid;
50
+#endif
51
+
44 52
 
45 53
 int shm_mem_init();
46 54
 void shm_mem_destroy();
47 55
 
48 56
 
57
+#ifdef FAST_LOCK
49 58
 
59
+#define shm_lock()    get_lock(mem_lock)
60
+#define shm_unlock()  release_lock(mem_lock)
61
+
62
+#else
50 63
 /* inline functions (do not move them to *.c, they won't be inlined anymore) */
51 64
 static inline void shm_lock()
52 65
 {
66
+
53 67
 	struct sembuf sop;
54 68
 	
55 69
 	sop.sem_num=0;
... ...
@@ -98,6 +112,7 @@ again:
98 98
 }
99 99
 
100 100
 /* ret -1 on erro*/
101
+#endif
101 102
 
102 103
 
103 104
 
... ...
@@ -232,7 +232,6 @@ void lock_cleanup()
232 232
 
233 233
 
234 234
 
235
-
236 235
 /* lock sempahore s */
237 236
 #ifdef DBG_LOCK
238 237
 inline int _lock( ser_lock_t s , char *file, char *function, unsigned int line )
... ...
@@ -255,6 +254,7 @@ inline int _unlock( ser_lock_t s )
255 255
 #ifdef DBG_LOCK
256 256
 	DBG("DEBUG: unlock : entered from %s, %s:%d\n", file, function, line );
257 257
 #endif
258
+	
258 259
 	return change_semaphore( s, +1 );
259 260
 }
260 261
 
... ...
@@ -5,7 +5,7 @@
5 5
 #
6 6
 
7 7
 
8
-debug=1          # debug level (cmd line: -dddddddddd)
8
+debug=3          # debug level (cmd line: -dddddddddd)
9 9
 #fork=yes          # (cmd. line: -D)
10 10
 fork=no
11 11
 log_stderror=yes # (cmd line: -E)
... ...
@@ -17,8 +17,8 @@ check_via=no     # (cmd. line: -v)
17 17
 dns=on           # (cmd. line: -r)
18 18
 rev_dns=yes      # (cmd. line: -R)
19 19
 #port=5070
20
-#listen=127.0.0.1
21
-listen=192.168.57.33
20
+listen=127.0.0.1
21
+#listen=192.168.57.33
22 22
 loop_checks=0
23 23
 # for more info: sip_router -h
24 24
 
... ...
@@ -5,7 +5,7 @@
5 5
 #
6 6
 
7 7
 
8
-debug=9          # debug level (cmd line: -dddddddddd)
8
+debug=3          # debug level (cmd line: -dddddddddd)
9 9
 #fork=yes          # (cmd. line: -D)
10 10
 fork=no
11 11
 log_stderror=yes # (cmd line: -E)
... ...
@@ -27,7 +27,6 @@ loop_checks=0
27 27
 loadmodule "modules/tm/tm.so"
28 28
 loadmodule "modules/rr/rr.so"
29 29
 loadmodule "modules/maxfwd/maxfwd.so"
30
-loadmodule "modules/cpl/cpl.so"
31 30
 
32 31
 
33 32
 route{
... ...
@@ -46,24 +45,6 @@ route{
46 46
                    mf_add_maxfwd_header( "10" );
47 47
              };
48 48
 
49
-             if (method=="INVITE")
50
-             {
51
-                log("SER : runing CPL!! :)\n");
52
-                if ( !cpl_run_script() )
53
-                {
54
-                   log("SER : Error during running CPL script!\n");
55
-                }else{
56
-                   if ( cpl_is_response_reject() )
57
-                   {
58
-                       t_add_transaction();
59
-                       t_send_reply("486","I am not available!");
60
-                       drop();
61
-                   }else if ( cpl_is_response_redirect() ) {
62
-                         log("SER : redirect\n");
63
-                   };
64
-                };
65
-             };
66
-
67 49
              #if ( !rewriteFromRoute() )
68 50
              #{
69 51
                 log( " SER : no route found!\n");