... | ... |
@@ -53,7 +53,9 @@ ARCH = $(shell uname -s) |
53 | 53 |
|
54 | 54 |
DEFS+= -DNAME='"$(NAME)"' -DVERSION='"$(RELEASE)"' -DARCH='"$(ARCH)"' \ |
55 | 55 |
-DDNS_IP_HACK -DPKG_MALLOC -DSHM_MEM -DSHM_MMAP \ |
56 |
- -DVQ_MALLOC -DUSE_SYNONIM #-DBRUT_HACK #-DEXTRA_DEBUG #-DSTATIC_TM |
|
56 |
+ -DUSE_SYNONIM \ |
|
57 |
+ -DFAST_LOCK -Di386 |
|
58 |
+ #-DBRUT_HACK #-DEXTRA_DEBUG #-DSTATIC_TM |
|
57 | 59 |
#-DEXTRA_DEBUG -DBRUT_HACK \ |
58 | 60 |
#-DVQ_MALLOC -DDBG_LOCK #-DSTATS |
59 | 61 |
#-DDBG_QM_MALLOC #-DVQ_MALLOC #-DNO_DEBUG |
60 | 62 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,66 @@ |
1 |
+/* |
|
2 |
+ * fast arhitecture specific locking |
|
3 |
+ * |
|
4 |
+ * $Id$ |
|
5 |
+ * |
|
6 |
+ * |
|
7 |
+ */ |
|
8 |
+ |
|
9 |
+ |
|
10 |
+ |
|
11 |
+#ifndef fastlock_h |
|
12 |
+#define fastlock_h |
|
13 |
+ |
|
14 |
+ |
|
15 |
+#include <sched.h> |
|
16 |
+ |
|
17 |
+ |
|
18 |
+#ifdef i386 |
|
19 |
+ |
|
20 |
+ |
|
21 |
+typedef volatile int lock_t; |
|
22 |
+ |
|
23 |
+ |
|
24 |
+ |
|
25 |
+#define init_lock( l ) (l)=0 |
|
26 |
+ |
|
27 |
+ |
|
28 |
+ |
|
29 |
+/*test and set lock, ret 1 if lock held by someone else, 0 otherwise*/ |
|
30 |
+inline static int tsl(lock_t* lock) |
|
31 |
+{ |
|
32 |
+ volatile char val; |
|
33 |
+ |
|
34 |
+ val=1; |
|
35 |
+ asm volatile( |
|
36 |
+ " xchg %b0, %1" : "=q" (val), "=m" (*lock) : "0" (val) : "memory" |
|
37 |
+ ); |
|
38 |
+ return val; |
|
39 |
+} |
|
40 |
+ |
|
41 |
+ |
|
42 |
+ |
|
43 |
+inline static void get_lock(lock_t* lock) |
|
44 |
+{ |
|
45 |
+ |
|
46 |
+ while(tsl(lock)){ |
|
47 |
+ sched_yield(); |
|
48 |
+ } |
|
49 |
+} |
|
50 |
+ |
|
51 |
+ |
|
52 |
+ |
|
53 |
+inline static void release_lock(lock_t* lock) |
|
54 |
+{ |
|
55 |
+ char val; |
|
56 |
+ |
|
57 |
+ val=0; |
|
58 |
+ asm volatile( |
|
59 |
+ " xchg %b0, %1" : "=q" (val), "=m" (*lock) : "0" (val) : "memory" |
|
60 |
+ ); |
|
61 |
+} |
|
62 |
+ |
|
63 |
+#endif |
|
64 |
+ |
|
65 |
+ |
|
66 |
+#endif |
... | ... |
@@ -26,6 +26,34 @@ |
26 | 26 |
#define PREV_FRAG_END(f) \ |
27 | 27 |
((struct qm_frag_end*)((char*)(f)-sizeof(struct qm_frag_end))) |
28 | 28 |
|
29 |
+ |
|
30 |
+#define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end)) |
|
31 |
+ |
|
32 |
+ |
|
33 |
+#define ROUNDUP(s) (((s)%ROUNDTO)?((s)+ROUNDTO)/ROUNDTO*ROUNDTO:(s)) |
|
34 |
+#define ROUNDDOWN(s) (((s)%ROUNDTO)?((s)-ROUNDTO)/ROUNDTO*ROUNDTO:(s)) |
|
35 |
+ |
|
36 |
+ |
|
37 |
+ |
|
38 |
+ /* finds the hash value for s, s=ROUNDTO multiple*/ |
|
39 |
+#define GET_HASH(s) ( ((s)<QM_MALLOC_OPTIMIZE)?(s)/ROUNDTO: \ |
|
40 |
+ QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \ |
|
41 |
+ QM_MALLOC_OPTIMIZE_FACTOR+1 ) |
|
42 |
+ |
|
43 |
+ |
|
44 |
+/* computes hash number for big buckets*/ |
|
45 |
+inline static int big_hash_idx(int s) |
|
46 |
+{ |
|
47 |
+ int idx; |
|
48 |
+ /* s is rounded => s = k*2^n (ROUNDTO=2^n) |
|
49 |
+ * index= i such that 2^i > s >= 2^(i-1) |
|
50 |
+ * |
|
51 |
+ * => index = number of the first non null bit in s*/ |
|
52 |
+ for (idx=31; !(s&0x80000000) ; s<<=1, idx--); |
|
53 |
+ return idx; |
|
54 |
+} |
|
55 |
+ |
|
56 |
+ |
|
29 | 57 |
#ifdef DBG_QM_MALLOC |
30 | 58 |
#define ST_CHECK_PATTERN 0xf0f0f0f0 |
31 | 59 |
#define END_CHECK_PATTERN1 0xc0c0c0c0 |
... | ... |
@@ -60,6 +88,27 @@ static void qm_debug_frag(struct qm_block* qm, struct qm_frag* f) |
60 | 88 |
|
61 | 89 |
|
62 | 90 |
|
91 |
+static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag) |
|
92 |
+{ |
|
93 |
+ struct qm_frag* f; |
|
94 |
+ struct qm_frag* prev; |
|
95 |
+ int hash; |
|
96 |
+ |
|
97 |
+ hash=GET_HASH(frag->size); |
|
98 |
+ for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head); |
|
99 |
+ f=f->u.nxt_free){ |
|
100 |
+ if (frag->size <= f->size) break; |
|
101 |
+ } |
|
102 |
+ /*insert it here*/ |
|
103 |
+ prev=FRAG_END(f)->prev_free; |
|
104 |
+ prev->u.nxt_free=frag; |
|
105 |
+ FRAG_END(frag)->prev_free=prev; |
|
106 |
+ frag->u.nxt_free=f; |
|
107 |
+ FRAG_END(f)->prev_free=frag; |
|
108 |
+} |
|
109 |
+ |
|
110 |
+ |
|
111 |
+ |
|
63 | 112 |
/* init malloc and return a qm_block*/ |
64 | 113 |
struct qm_block* qm_malloc_init(char* address, unsigned int size) |
65 | 114 |
{ |
... | ... |
@@ -67,17 +116,24 @@ struct qm_block* qm_malloc_init(char* address, unsigned int size) |
67 | 116 |
char* end; |
68 | 117 |
struct qm_block* qm; |
69 | 118 |
unsigned int init_overhead; |
119 |
+ int h; |
|
70 | 120 |
|
71 | 121 |
/* make address and size multiple of 8*/ |
72 |
- start=(char*)( ((unsigned int)address%8)?((unsigned int)address+8)/8*8: |
|
73 |
- (unsigned int)address); |
|
122 |
+ start=(char*)ROUNDUP((unsigned int) address); |
|
123 |
+ printf("qm_malloc_init: QM_OPTIMIZE=%d, /ROUNDTO=%d\n", |
|
124 |
+ QM_MALLOC_OPTIMIZE, QM_MALLOC_OPTIMIZE/ROUNDTO); |
|
125 |
+ printf("qm_malloc_init: QM_HASH_SIZE=%d, qm_block size=%d\n", |
|
126 |
+ QM_HASH_SIZE, sizeof(struct qm_block)); |
|
127 |
+ printf("qm_malloc_init(%x, %d), start=%x\n", address, size, start); |
|
74 | 128 |
if (size<start-address) return 0; |
75 | 129 |
size-=(start-address); |
76 |
- if (size <8) return 0; |
|
77 |
- size=(size%8)?(size-8)/8*8:size; |
|
130 |
+ if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0; |
|
131 |
+ size=ROUNDDOWN(size); |
|
78 | 132 |
|
79 | 133 |
init_overhead=sizeof(struct qm_block)+sizeof(struct qm_frag)+ |
80 | 134 |
sizeof(struct qm_frag_end); |
135 |
+ printf("qm_malloc_init: size= %d, init_overhead=%d\n", size, init_overhead); |
|
136 |
+ |
|
81 | 137 |
if (size < init_overhead) |
82 | 138 |
{ |
83 | 139 |
/* not enough mem to create our control structures !!!*/ |
... | ... |
@@ -95,42 +151,34 @@ struct qm_block* qm_malloc_init(char* address, unsigned int size) |
95 | 151 |
qm->last_frag_end=(struct qm_frag_end*)(end-sizeof(struct qm_frag_end)); |
96 | 152 |
/* init initial fragment*/ |
97 | 153 |
qm->first_frag->size=size; |
98 |
- qm->first_frag->u.nxt_free=&(qm->free_lst); |
|
99 | 154 |
qm->last_frag_end->size=size; |
100 |
- qm->last_frag_end->prev_free=&(qm->free_lst); |
|
155 |
+ |
|
101 | 156 |
#ifdef DBG_QM_MALLOC |
102 | 157 |
qm->first_frag->check=ST_CHECK_PATTERN; |
103 | 158 |
qm->last_frag_end->check1=END_CHECK_PATTERN1; |
104 | 159 |
qm->last_frag_end->check2=END_CHECK_PATTERN2; |
105 | 160 |
#endif |
106 |
- /* init free_lst* */ |
|
107 |
- qm->free_lst.u.nxt_free=qm->first_frag; |
|
108 |
- qm->free_lst_end.prev_free=qm->first_frag; |
|
109 |
- qm->free_lst.size=0; |
|
110 |
- qm->free_lst_end.size=0; |
|
161 |
+ /* init free_hash* */ |
|
162 |
+ for (h=0; h<QM_HASH_SIZE;h++){ |
|
163 |
+ qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head); |
|
164 |
+ qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head); |
|
165 |
+ qm->free_hash[h].head.size=0; |
|
166 |
+ qm->free_hash[h].tail.size=0; |
|
167 |
+ } |
|
168 |
+ |
|
169 |
+ /* link initial fragment into the free list*/ |
|
170 |
+ |
|
171 |
+ qm_insert_free(qm, qm->first_frag); |
|
172 |
+ |
|
173 |
+ /*qm->first_frag->u.nxt_free=&(qm->free_lst); |
|
174 |
+ qm->last_frag_end->prev_free=&(qm->free_lst); |
|
175 |
+ */ |
|
111 | 176 |
|
112 | 177 |
|
113 | 178 |
return qm; |
114 | 179 |
} |
115 | 180 |
|
116 | 181 |
|
117 |
-static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag) |
|
118 |
-{ |
|
119 |
- struct qm_frag* f; |
|
120 |
- struct qm_frag* prev; |
|
121 |
- |
|
122 |
- for(f=qm->free_lst.u.nxt_free; f!=&(qm->free_lst); f=f->u.nxt_free){ |
|
123 |
- if (frag->size < f->size) break; |
|
124 |
- } |
|
125 |
- /*insert it here*/ |
|
126 |
- prev=FRAG_END(f)->prev_free; |
|
127 |
- prev->u.nxt_free=frag; |
|
128 |
- FRAG_END(frag)->prev_free=prev; |
|
129 |
- frag->u.nxt_free=f; |
|
130 |
- FRAG_END(f)->prev_free=frag; |
|
131 |
-} |
|
132 |
- |
|
133 |
- |
|
134 | 182 |
|
135 | 183 |
static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag) |
136 | 184 |
{ |
... | ... |
@@ -148,6 +196,28 @@ static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag) |
148 | 196 |
|
149 | 197 |
|
150 | 198 |
|
199 |
+static inline struct qm_frag* qm_find_free(struct qm_block* qm, |
|
200 |
+ unsigned int size) |
|
201 |
+{ |
|
202 |
+ int hash; |
|
203 |
+ struct qm_frag* f; |
|
204 |
+ |
|
205 |
+ for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){ |
|
206 |
+ for (f=qm->free_hash[hash].head.u.nxt_free; |
|
207 |
+ f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){ |
|
208 |
+ #ifdef DBG_QM_MALLOC |
|
209 |
+ list_cntr++; |
|
210 |
+ #endif |
|
211 |
+ if (f->size>=size) return f; |
|
212 |
+ } |
|
213 |
+ /*try in a bigger bucket*/ |
|
214 |
+ } |
|
215 |
+ /* not found */ |
|
216 |
+ return 0; |
|
217 |
+} |
|
218 |
+ |
|
219 |
+ |
|
220 |
+ |
|
151 | 221 |
#ifdef DBG_QM_MALLOC |
152 | 222 |
void* qm_malloc(struct qm_block* qm, unsigned int size, char* file, char* func, |
153 | 223 |
unsigned int line) |
... | ... |
@@ -159,7 +229,6 @@ void* qm_malloc(struct qm_block* qm, unsigned int size) |
159 | 229 |
struct qm_frag_end* end; |
160 | 230 |
struct qm_frag* n; |
161 | 231 |
unsigned int rest; |
162 |
- unsigned int overhead; |
|
163 | 232 |
|
164 | 233 |
#ifdef DBG_QM_MALLOC |
165 | 234 |
unsigned int list_cntr; |
... | ... |
@@ -169,69 +238,63 @@ void* qm_malloc(struct qm_block* qm, unsigned int size) |
169 | 238 |
line); |
170 | 239 |
#endif |
171 | 240 |
/*size must be a multiple of 8*/ |
172 |
- size=(size%8)?(size+8)/8*8:size; |
|
241 |
+ size=ROUNDUP(size); |
|
173 | 242 |
if (size>(qm->size-qm->real_used)) return 0; |
174 |
- if (qm->free_lst.u.nxt_free==&(qm->free_lst)) return 0; |
|
175 | 243 |
/*search for a suitable free frag*/ |
176 |
- for (f=qm->free_lst.u.nxt_free; f!=&(qm->free_lst); f=f->u.nxt_free){ |
|
177 |
-#ifdef DBG_QM_MALLOC |
|
178 |
- list_cntr++; |
|
179 |
-#endif |
|
180 |
- |
|
181 |
- if (f->size>=size){ |
|
182 |
- /* we found it!*/ |
|
183 |
- /*detach it from the free list*/ |
|
244 |
+ f=qm_find_free(qm, size); |
|
245 |
+ |
|
246 |
+ if ((f=qm_find_free(qm, size))!=0){ |
|
247 |
+ /* we found it!*/ |
|
248 |
+ /*detach it from the free list*/ |
|
184 | 249 |
#ifdef DBG_QM_MALLOC |
185 | 250 |
qm_debug_frag(qm, f); |
186 | 251 |
#endif |
187 |
- qm_detach_free(qm, f); |
|
188 |
- /*mark it as "busy"*/ |
|
189 |
- f->u.is_free=0; |
|
190 |
- |
|
191 |
- /*see if we'll use full frag, or we'll split it in 2*/ |
|
192 |
- rest=f->size-size; |
|
193 |
- overhead=sizeof(struct qm_frag)+sizeof(struct qm_frag_end); |
|
194 |
- if (rest>overhead){ |
|
195 |
- f->size=size; |
|
196 |
- /*split the fragment*/ |
|
197 |
- end=FRAG_END(f); |
|
198 |
- end->size=size; |
|
199 |
- n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end)); |
|
200 |
- n->size=rest-overhead; |
|
201 |
- FRAG_END(n)->size=n->size; |
|
202 |
- qm->real_used+=overhead; |
|
252 |
+ qm_detach_free(qm, f); |
|
253 |
+ /*mark it as "busy"*/ |
|
254 |
+ f->u.is_free=0; |
|
255 |
+ |
|
256 |
+ /*see if we'll use full frag, or we'll split it in 2*/ |
|
257 |
+ rest=f->size-size; |
|
258 |
+ if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){ |
|
259 |
+ f->size=size; |
|
260 |
+ /*split the fragment*/ |
|
261 |
+ end=FRAG_END(f); |
|
262 |
+ end->size=size; |
|
263 |
+ n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end)); |
|
264 |
+ n->size=rest-FRAG_OVERHEAD; |
|
265 |
+ FRAG_END(n)->size=n->size; |
|
266 |
+ qm->real_used+=FRAG_OVERHEAD; |
|
203 | 267 |
#ifdef DBG_QM_MALLOC |
204 |
- end->check1=END_CHECK_PATTERN1; |
|
205 |
- end->check2=END_CHECK_PATTERN2; |
|
206 |
- /* frag created by malloc, mark it*/ |
|
207 |
- n->file=file; |
|
208 |
- n->func="frag. from qm_malloc"; |
|
209 |
- n->line=line; |
|
210 |
- n->check=ST_CHECK_PATTERN; |
|
211 |
-/* FRAG_END(n)->check1=END_CHECK_PATTERN1; |
|
212 |
- FRAG_END(n)->check2=END_CHECK_PATTERN2; */ |
|
268 |
+ end->check1=END_CHECK_PATTERN1; |
|
269 |
+ end->check2=END_CHECK_PATTERN2; |
|
270 |
+ /* frag created by malloc, mark it*/ |
|
271 |
+ n->file=file; |
|
272 |
+ n->func="frag. from qm_malloc"; |
|
273 |
+ n->line=line; |
|
274 |
+ n->check=ST_CHECK_PATTERN; |
|
275 |
+/* FRAG_END(n)->check1=END_CHECK_PATTERN1; |
|
276 |
+ FRAG_END(n)->check2=END_CHECK_PATTERN2; */ |
|
213 | 277 |
#endif |
214 |
- /* reinsert n in free list*/ |
|
215 |
- qm_insert_free(qm, n); |
|
216 |
- }else{ |
|
217 |
- /* we cannot split this fragment any more => alloc all of it*/ |
|
218 |
- } |
|
219 |
- qm->real_used+=f->size; |
|
220 |
- qm->used+=f->size; |
|
221 |
- if (qm->max_real_used<qm->real_used) |
|
222 |
- qm->max_real_used=qm->real_used; |
|
278 |
+ /* reinsert n in free list*/ |
|
279 |
+ qm_insert_free(qm, n); |
|
280 |
+ }else{ |
|
281 |
+ /* we cannot split this fragment any more => alloc all of it*/ |
|
282 |
+ } |
|
283 |
+ qm->real_used+=f->size; |
|
284 |
+ qm->used+=f->size; |
|
285 |
+ if (qm->max_real_used<qm->real_used) |
|
286 |
+ qm->max_real_used=qm->real_used; |
|
223 | 287 |
#ifdef DBG_QM_MALLOC |
224 |
- f->file=file; |
|
225 |
- f->func=func; |
|
226 |
- f->line=line; |
|
227 |
- f->check=ST_CHECK_PATTERN; |
|
288 |
+ f->file=file; |
|
289 |
+ f->func=func; |
|
290 |
+ f->line=line; |
|
291 |
+ f->check=ST_CHECK_PATTERN; |
|
228 | 292 |
/* FRAG_END(f)->check1=END_CHECK_PATTERN1; |
229 | 293 |
FRAG_END(f)->check2=END_CHECK_PATTERN2;*/ |
230 |
- DBG("qm_malloc(%x, %d) returns address %x on %d -th hit\n", qm, size, |
|
294 |
+ DBG("qm_malloc(%x, %d) returns address %x on %d -th hit\n", qm, size, |
|
231 | 295 |
(char*)f+sizeof(struct qm_frag), list_cntr ); |
232 | 296 |
#endif |
233 |
- return (char*)f+sizeof(struct qm_frag); |
|
234 |
- } |
|
297 |
+ return (char*)f+sizeof(struct qm_frag); |
|
235 | 298 |
} |
236 | 299 |
return 0; |
237 | 300 |
} |
... | ... |
@@ -249,7 +312,6 @@ void qm_free(struct qm_block* qm, void* p) |
249 | 312 |
struct qm_frag* prev; |
250 | 313 |
struct qm_frag* next; |
251 | 314 |
struct qm_frag_end *end; |
252 |
- unsigned int overhead; |
|
253 | 315 |
unsigned int size; |
254 | 316 |
|
255 | 317 |
#ifdef DBG_QM_MALLOC |
... | ... |
@@ -261,7 +323,7 @@ void qm_free(struct qm_block* qm, void* p) |
261 | 323 |
} |
262 | 324 |
#endif |
263 | 325 |
if (p==0) { |
264 |
- DBG("WARNING:qm_free: free(0) called\n"); |
|
326 |
+ LOG(L_WARN, "WARNING:qm_free: free(0) called\n"); |
|
265 | 327 |
return; |
266 | 328 |
} |
267 | 329 |
prev=next=0; |
... | ... |
@@ -277,7 +339,6 @@ void qm_free(struct qm_block* qm, void* p) |
277 | 339 |
DBG("qm_free: freeing block alloc'ed from %s: %s(%d)\n", f->file, f->func, |
278 | 340 |
f->line); |
279 | 341 |
#endif |
280 |
- overhead=sizeof(struct qm_frag)+sizeof(struct qm_frag_end); |
|
281 | 342 |
next=FRAG_NEXT(f); |
282 | 343 |
size=f->size; |
283 | 344 |
qm->used-=size; |
... | ... |
@@ -285,11 +346,15 @@ void qm_free(struct qm_block* qm, void* p) |
285 | 346 |
#ifdef DBG_QM_MALLOC |
286 | 347 |
qm_debug_frag(qm, f); |
287 | 348 |
#endif |
349 |
+ |
|
350 |
+#ifdef QM_JOIN_FREE |
|
351 |
+ /* join packets if possible*/ |
|
352 |
+ |
|
288 | 353 |
if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){ |
289 | 354 |
/* join */ |
290 | 355 |
qm_detach_free(qm, next); |
291 |
- size+=next->size+overhead; |
|
292 |
- qm->real_used-=overhead; |
|
356 |
+ size+=next->size+FRAG_OVERHEAD; |
|
357 |
+ qm->real_used-=FRAG_OVERHEAD; |
|
293 | 358 |
} |
294 | 359 |
|
295 | 360 |
if (f > qm->first_frag){ |
... | ... |
@@ -302,13 +367,14 @@ void qm_free(struct qm_block* qm, void* p) |
302 | 367 |
if (prev->u.is_free){ |
303 | 368 |
/*join*/ |
304 | 369 |
qm_detach_free(qm, prev); |
305 |
- size+=prev->size+overhead; |
|
306 |
- qm->real_used-=overhead; |
|
370 |
+ size+=prev->size+FRAG_OVERHEAD; |
|
371 |
+ qm->real_used-=FRAG_OVERHEAD; |
|
307 | 372 |
f=prev; |
308 | 373 |
} |
309 | 374 |
} |
310 | 375 |
f->size=size; |
311 | 376 |
FRAG_END(f)->size=f->size; |
377 |
+#endif /* QM_JOIN_FREE*/ |
|
312 | 378 |
#ifdef DBG_QM_MALLOC |
313 | 379 |
f->file=file; |
314 | 380 |
f->func=func; |
... | ... |
@@ -322,7 +388,8 @@ void qm_free(struct qm_block* qm, void* p) |
322 | 388 |
void qm_status(struct qm_block* qm) |
323 | 389 |
{ |
324 | 390 |
struct qm_frag* f; |
325 |
- int i; |
|
391 |
+ int i,j; |
|
392 |
+ int h; |
|
326 | 393 |
|
327 | 394 |
LOG(L_INFO, "qm_status (%x):\n", qm); |
328 | 395 |
if (!qm) return; |
... | ... |
@@ -345,15 +412,19 @@ void qm_status(struct qm_block* qm) |
345 | 412 |
f->check, FRAG_END(f)->check1, FRAG_END(f)->check2); |
346 | 413 |
#endif |
347 | 414 |
} |
348 |
- DBG("dumping free list:\n"); |
|
349 |
- for (f=qm->free_lst.u.nxt_free,i=0; f!=&(qm->free_lst); f=f->u.nxt_free, |
|
350 |
- i++){ |
|
351 |
- DBG(" %3d. %c address=%x size=%d\n", i, (f->u.is_free)?'a':'N', |
|
352 |
- (char*)f+sizeof(struct qm_frag), f->size); |
|
415 |
+ LOG(L_INFO, "dumping free list:\n"); |
|
416 |
+ for(h=0,i=0;h<QM_HASH_SIZE;h++){ |
|
417 |
+ |
|
418 |
+ for (f=qm->free_hash[h].head.u.nxt_free,j=0; |
|
419 |
+ f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){ |
|
420 |
+ LOG(L_INFO, " %5d.[%3d:%3d] %c address=%x size=%d\n", i, h, j, |
|
421 |
+ (f->u.is_free)?'a':'N', |
|
422 |
+ (char*)f+sizeof(struct qm_frag), f->size); |
|
353 | 423 |
#ifdef DBG_QM_MALLOC |
354 |
- DBG(" %s from %s: %s(%d)\n", |
|
424 |
+ DBG(" %s from %s: %s(%d)\n", |
|
355 | 425 |
(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line); |
356 | 426 |
#endif |
427 |
+ } |
|
357 | 428 |
} |
358 | 429 |
LOG(L_INFO, "-----------------------------\n"); |
359 | 430 |
} |
... | ... |
@@ -7,6 +7,28 @@ |
7 | 7 |
#define q_malloc_h |
8 | 8 |
|
9 | 9 |
|
10 |
+ |
|
11 |
+/* defs*/ |
|
12 |
+ |
|
13 |
+#define ROUNDTO 16 /* size we round to, must be = 2^n */ |
|
14 |
+#define MIN_FRAG_SIZE ROUNDTO |
|
15 |
+ |
|
16 |
+ |
|
17 |
+ |
|
18 |
+#define QM_MALLOC_OPTIMIZE_FACTOR 10 /*used below */ |
|
19 |
+#define QM_MALLOC_OPTIMIZE (1<<QM_MALLOC_OPTIMIZE_FACTOR) |
|
20 |
+ /* size to optimize for, |
|
21 |
+ (most allocs < this size), |
|
22 |
+ must be 2^k */ |
|
23 |
+ |
|
24 |
+#define QM_HASH_SIZE (QM_MALLOC_OPTIMIZE/ROUNDTO + \ |
|
25 |
+ (32-QM_MALLOC_OPTIMIZE_FACTOR)+1) |
|
26 |
+ |
|
27 |
+/* hash structure: |
|
28 |
+ * 0 .... QM_MALLOC_OPTIMIE/ROUNDTO - small buckets, size increases with |
|
29 |
+ * ROUNDTO from bucket to bucket |
|
30 |
+ * +1 .... end - size = 2^k, big buckets */ |
|
31 |
+ |
|
10 | 32 |
struct qm_frag{ |
11 | 33 |
unsigned int size; |
12 | 34 |
union{ |
... | ... |
@@ -31,6 +53,13 @@ struct qm_frag_end{ |
31 | 53 |
}; |
32 | 54 |
|
33 | 55 |
|
56 |
+ |
|
57 |
+struct qm_frag_full{ |
|
58 |
+ struct qm_frag head; |
|
59 |
+ struct qm_frag_end tail; |
|
60 |
+}; |
|
61 |
+ |
|
62 |
+ |
|
34 | 63 |
struct qm_block{ |
35 | 64 |
unsigned int size; /* total size */ |
36 | 65 |
unsigned int used; /* alloc'ed size*/ |
... | ... |
@@ -40,8 +69,8 @@ struct qm_block{ |
40 | 69 |
struct qm_frag* first_frag; |
41 | 70 |
struct qm_frag_end* last_frag_end; |
42 | 71 |
|
43 |
- struct qm_frag free_lst; |
|
44 |
- struct qm_frag_end free_lst_end; |
|
72 |
+ struct qm_frag_full free_hash[QM_HASH_SIZE]; |
|
73 |
+ /*struct qm_frag_end free_lst_end;*/ |
|
45 | 74 |
}; |
46 | 75 |
|
47 | 76 |
|
... | ... |
@@ -18,6 +18,10 @@ |
18 | 18 |
|
19 | 19 |
#endif |
20 | 20 |
|
21 |
+#ifdef FAST_LOCK |
|
22 |
+#include "../fastlock.h" |
|
23 |
+#endif |
|
24 |
+ |
|
21 | 25 |
|
22 | 26 |
/* define semun */ |
23 | 27 |
#if defined(__GNU_LIBRARY__) && !defined(_SEM_SEMUN_UNDEFINED) |
... | ... |
@@ -38,8 +42,12 @@ |
38 | 42 |
static int shm_shmid=-1; /*shared memory id*/ |
39 | 43 |
#endif |
40 | 44 |
|
41 |
- |
|
45 |
+#ifdef FAST_LOCK |
|
46 |
+lock_t* mem_lock=0; |
|
47 |
+#else |
|
42 | 48 |
int shm_semid=-1; /*semaphore id*/ |
49 |
+#endif |
|
50 |
+ |
|
43 | 51 |
static void* shm_mempool=(void*)-1; |
44 | 52 |
#ifdef VQ_MALLOC |
45 | 53 |
struct vqm_block* shm_block; |
... | ... |
@@ -153,6 +161,8 @@ int shm_mem_init() |
153 | 161 |
shm_mem_destroy(); |
154 | 162 |
return -1; |
155 | 163 |
} |
164 |
+ |
|
165 |
+#ifndef FAST_LOCK |
|
156 | 166 |
/* alloc a semaphore (for malloc)*/ |
157 | 167 |
shm_semid=semget(IPC_PRIVATE, 1, 0700); |
158 | 168 |
if (shm_semid==-1){ |
... | ... |
@@ -170,6 +180,7 @@ int shm_mem_init() |
170 | 180 |
shm_mem_destroy(); |
171 | 181 |
return -1; |
172 | 182 |
} |
183 |
+#endif |
|
173 | 184 |
/* init it for malloc*/ |
174 | 185 |
# ifdef VQ_MALLOC |
175 | 186 |
shm_block=vqm_malloc_init(shm_mempool, SHM_MEM_SIZE); |
... | ... |
@@ -182,6 +193,11 @@ int shm_mem_init() |
182 | 193 |
shm_mem_destroy(); |
183 | 194 |
return -1; |
184 | 195 |
} |
196 |
+#ifdef FAST_LOCK |
|
197 |
+ mem_lock=shm_malloc_unsafe(sizeof(lock_t)); |
|
198 |
+ init_lock(*mem_lock); |
|
199 |
+#endif |
|
200 |
+ |
|
185 | 201 |
DBG("shm_mem_init: success\n"); |
186 | 202 |
|
187 | 203 |
return 0; |
... | ... |
@@ -210,10 +226,12 @@ void shm_mem_destroy() |
210 | 226 |
shm_shmid=-1; |
211 | 227 |
} |
212 | 228 |
#endif |
229 |
+#ifndef FAST_LOCK |
|
213 | 230 |
if (shm_semid!=-1) { |
214 | 231 |
semctl(shm_semid, 0, IPC_RMID, (union semun)0); |
215 | 232 |
shm_semid=-1; |
216 | 233 |
} |
234 |
+#endif |
|
217 | 235 |
} |
218 | 236 |
|
219 | 237 |
|
... | ... |
@@ -40,16 +40,30 @@ |
40 | 40 |
# define MY_FREE qm_free |
41 | 41 |
# define MY_STATUS qm_status |
42 | 42 |
#endif |
43 |
-extern int shm_semid; |
|
43 |
+ |
|
44 |
+#ifdef FAST_LOCK |
|
45 |
+#include "../fastlock.h" |
|
46 |
+ |
|
47 |
+ extern lock_t* mem_lock; |
|
48 |
+#else |
|
49 |
+extern int shm_semid; |
|
50 |
+#endif |
|
51 |
+ |
|
44 | 52 |
|
45 | 53 |
int shm_mem_init(); |
46 | 54 |
void shm_mem_destroy(); |
47 | 55 |
|
48 | 56 |
|
57 |
+#ifdef FAST_LOCK |
|
49 | 58 |
|
59 |
+#define shm_lock() get_lock(mem_lock) |
|
60 |
+#define shm_unlock() release_lock(mem_lock) |
|
61 |
+ |
|
62 |
+#else |
|
50 | 63 |
/* inline functions (do not move them to *.c, they won't be inlined anymore) */ |
51 | 64 |
static inline void shm_lock() |
52 | 65 |
{ |
66 |
+ |
|
53 | 67 |
struct sembuf sop; |
54 | 68 |
|
55 | 69 |
sop.sem_num=0; |
... | ... |
@@ -98,6 +112,7 @@ again: |
98 | 112 |
} |
99 | 113 |
|
100 | 114 |
/* ret -1 on erro*/ |
115 |
+#endif |
|
101 | 116 |
|
102 | 117 |
|
103 | 118 |
|
... | ... |
@@ -232,7 +232,6 @@ void lock_cleanup() |
232 | 232 |
|
233 | 233 |
|
234 | 234 |
|
235 |
- |
|
236 | 235 |
/* lock sempahore s */ |
237 | 236 |
#ifdef DBG_LOCK |
238 | 237 |
inline int _lock( ser_lock_t s , char *file, char *function, unsigned int line ) |
... | ... |
@@ -255,6 +254,7 @@ inline int _unlock( ser_lock_t s ) |
255 | 254 |
#ifdef DBG_LOCK |
256 | 255 |
DBG("DEBUG: unlock : entered from %s, %s:%d\n", file, function, line ); |
257 | 256 |
#endif |
257 |
+ |
|
258 | 258 |
return change_semaphore( s, +1 ); |
259 | 259 |
} |
260 | 260 |
|
... | ... |
@@ -5,7 +5,7 @@ |
5 | 5 |
# |
6 | 6 |
|
7 | 7 |
|
8 |
-debug=1 # debug level (cmd line: -dddddddddd) |
|
8 |
+debug=3 # debug level (cmd line: -dddddddddd) |
|
9 | 9 |
#fork=yes # (cmd. line: -D) |
10 | 10 |
fork=no |
11 | 11 |
log_stderror=yes # (cmd line: -E) |
... | ... |
@@ -17,8 +17,8 @@ check_via=no # (cmd. line: -v) |
17 | 17 |
dns=on # (cmd. line: -r) |
18 | 18 |
rev_dns=yes # (cmd. line: -R) |
19 | 19 |
#port=5070 |
20 |
-#listen=127.0.0.1 |
|
21 |
-listen=192.168.57.33 |
|
20 |
+listen=127.0.0.1 |
|
21 |
+#listen=192.168.57.33 |
|
22 | 22 |
loop_checks=0 |
23 | 23 |
# for more info: sip_router -h |
24 | 24 |
|
... | ... |
@@ -5,7 +5,7 @@ |
5 | 5 |
# |
6 | 6 |
|
7 | 7 |
|
8 |
-debug=9 # debug level (cmd line: -dddddddddd) |
|
8 |
+debug=3 # debug level (cmd line: -dddddddddd) |
|
9 | 9 |
#fork=yes # (cmd. line: -D) |
10 | 10 |
fork=no |
11 | 11 |
log_stderror=yes # (cmd line: -E) |
... | ... |
@@ -27,7 +27,6 @@ loop_checks=0 |
27 | 27 |
loadmodule "modules/tm/tm.so" |
28 | 28 |
loadmodule "modules/rr/rr.so" |
29 | 29 |
loadmodule "modules/maxfwd/maxfwd.so" |
30 |
-loadmodule "modules/cpl/cpl.so" |
|
31 | 30 |
|
32 | 31 |
|
33 | 32 |
route{ |
... | ... |
@@ -46,24 +45,6 @@ route{ |
46 | 45 |
mf_add_maxfwd_header( "10" ); |
47 | 46 |
}; |
48 | 47 |
|
49 |
- if (method=="INVITE") |
|
50 |
- { |
|
51 |
- log("SER : runing CPL!! :)\n"); |
|
52 |
- if ( !cpl_run_script() ) |
|
53 |
- { |
|
54 |
- log("SER : Error during running CPL script!\n"); |
|
55 |
- }else{ |
|
56 |
- if ( cpl_is_response_reject() ) |
|
57 |
- { |
|
58 |
- t_add_transaction(); |
|
59 |
- t_send_reply("486","I am not available!"); |
|
60 |
- drop(); |
|
61 |
- }else if ( cpl_is_response_redirect() ) { |
|
62 |
- log("SER : redirect\n"); |
|
63 |
- }; |
|
64 |
- }; |
|
65 |
- }; |
|
66 |
- |
|
67 | 48 |
#if ( !rewriteFromRoute() ) |
68 | 49 |
#{ |
69 | 50 |
log( " SER : no route found!\n"); |