... | ... |
@@ -47,12 +47,14 @@ ARCH = $(shell uname -s) |
47 | 47 |
# -DVQ_MALLOC |
48 | 48 |
# additional option to PKG_MALLOC which utilizes a fater then |
49 | 49 |
# qm version |
50 |
+# -DDBG_MALLOC |
|
51 |
+# issues additional debugging information if lock/unlock is called |
|
50 | 52 |
# |
51 | 53 |
|
52 | 54 |
DEFS+= -DNAME='"$(NAME)"' -DVERSION='"$(RELEASE)"' -DARCH='"$(ARCH)"' \ |
53 | 55 |
-DDNS_IP_HACK -DPKG_MALLOC -DSHM_MEM -DSHM_MMAP \ |
54 | 56 |
-DEXTRA_DEBUG \ |
55 |
- -DVQ_MALLOC #-DSTATS |
|
57 |
+ -DDBG_QM_MALLOC -DVQ_MALLOC -DDBG_LOCK #-DSTATS |
|
56 | 58 |
#-DDBG_QM_MALLOC #-DVQ_MALLOC #-DNO_DEBUG |
57 | 59 |
#-DNO_DEBUG #-DDBG_QM_MALLOC |
58 | 60 |
#-DEXTRA_DEBUG |
... | ... |
@@ -395,7 +395,8 @@ int run_actions(struct action* a, struct sip_msg* msg) |
395 | 395 |
} |
396 | 396 |
|
397 | 397 |
if (a==0){ |
398 |
- LOG(L_ERR, "WARNING: run_actions: null action list\n"); |
|
398 |
+ LOG(L_ERR, "WARNING: run_actions: null action list (rec_level=%d)\n", |
|
399 |
+ rec_lev); |
|
399 | 400 |
ret=0; |
400 | 401 |
} |
401 | 402 |
|
... | ... |
@@ -47,7 +47,111 @@ static void* shm_mempool=(void*)-1; |
47 | 47 |
struct qm_block* shm_block; |
48 | 48 |
#endif |
49 | 49 |
|
50 |
+#define sh_realloc(_p, _size) ({ \ |
|
51 |
+ char *_c; \ |
|
52 |
+ shm_lock(); \ |
|
53 |
+ shm_free_unsafe( (_p) ); \ |
|
54 |
+ _c=shm_malloc_unsafe( (_size) ); \ |
|
55 |
+ shm_unlock(); \ |
|
56 |
+ _c; }) |
|
50 | 57 |
|
58 |
+/* look at a buffer if there is perhaps enough space for the new size |
|
59 |
+ (It is benefitial to do so because vq_malloc is pretty stateful |
|
60 |
+ and if we ask for a new buffer size, we can still make it happy |
|
61 |
+ with current buffer); if so, we return current buffer again; |
|
62 |
+ otherwise, we free it, allocate a new one and return it; no |
|
63 |
+ guarantee for buffer content; if allocation fails, we return |
|
64 |
+ NULL |
|
65 |
+*/ |
|
66 |
+#ifdef DBG_QM_MALLOC |
|
67 |
+void* _shm_resize( void* p, unsigned int s, char* file, char* func, unsigned int line) |
|
68 |
+#else |
|
69 |
+void* _shm_resize( void* p , unsigned int s) |
|
70 |
+#endif |
|
71 |
+{ |
|
72 |
+ char *c; |
|
73 |
+#ifdef VQ_MALLOC |
|
74 |
+ struct vqm_frag *f; |
|
75 |
+#else |
|
76 |
+# warning shm_resize performs suboptimally without VQ_MALLOC! |
|
77 |
+#endif |
|
78 |
+ |
|
79 |
+ if (p==0) { |
|
80 |
+ DBG("WARNING:vqm_resize: resize(0) called\n"); |
|
81 |
+ return shm_malloc( s ); |
|
82 |
+ } |
|
83 |
+ |
|
84 |
+# ifdef VQ_MALLOC |
|
85 |
+ f=(struct vqm_frag*) ((char*)p-sizeof(struct vqm_frag)); |
|
86 |
+# ifdef DBG_QM_MALLOC |
|
87 |
+ DBG("_shm_resize(%x, %d), called from %s: %s(%d)\n", p, s, file, func, line); |
|
88 |
+ VQM_DEBUG_FRAG(shm_block, f); |
|
89 |
+ if (p>(void *)shm_block->core_end || p<(void*)shm_block->init_core){ |
|
90 |
+ LOG(L_CRIT, "BUG: vqm_free: bad pointer %x (out of memory block!) - " |
|
91 |
+ "aborting\n", p); |
|
92 |
+ abort(); |
|
93 |
+ } |
|
94 |
+# endif |
|
95 |
+ if (s <= f->size-VQM_OVERHEAD) { |
|
96 |
+# ifdef DBG_QM_MALLOC |
|
97 |
+ DBG("DEBUG: happy guy -- you reused a memory fragment!\n"); |
|
98 |
+# endif |
|
99 |
+ return p; |
|
100 |
+ }; |
|
101 |
+#endif |
|
102 |
+ /* we can't make the request happy with current size */ |
|
103 |
+ return sh_realloc( p, s ); |
|
104 |
+} |
|
105 |
+ |
|
106 |
+ |
|
107 |
+inline void shm_lock() |
|
108 |
+{ |
|
109 |
+ struct sembuf sop; |
|
110 |
+ |
|
111 |
+ sop.sem_num=0; |
|
112 |
+ sop.sem_op=-1; /*down*/ |
|
113 |
+ sop.sem_flg=0 /*SEM_UNDO*/; |
|
114 |
+again: |
|
115 |
+ semop(shm_semid, &sop, 1); |
|
116 |
+#if 0 |
|
117 |
+ switch(ret){ |
|
118 |
+ case 0: /*ok*/ |
|
119 |
+ break; |
|
120 |
+ case EINTR: /*interrupted by signal, try again*/ |
|
121 |
+ DBG("sh_lock: interrupted by signal, trying again...\n"); |
|
122 |
+ goto again; |
|
123 |
+ default: |
|
124 |
+ LOG(L_ERR, "ERROR: sh_lock: error waiting on semaphore: %s\n", |
|
125 |
+ strerror(errno)); |
|
126 |
+ } |
|
127 |
+#endif |
|
128 |
+} |
|
129 |
+ |
|
130 |
+ |
|
131 |
+ |
|
132 |
+inline void shm_unlock() |
|
133 |
+{ |
|
134 |
+ struct sembuf sop; |
|
135 |
+ |
|
136 |
+ sop.sem_num=0; |
|
137 |
+ sop.sem_op=1; /*up*/ |
|
138 |
+ sop.sem_flg=0 /*SEM_UNDO*/; |
|
139 |
+again: |
|
140 |
+ semop(shm_semid, &sop, 1); |
|
141 |
+#if 0 |
|
142 |
+ /*should ret immediately*/ |
|
143 |
+ switch(ret){ |
|
144 |
+ case 0: /*ok*/ |
|
145 |
+ break; |
|
146 |
+ case EINTR: /*interrupted by signal, try again*/ |
|
147 |
+ DBG("sh_lock: interrupted by signal, trying again...\n"); |
|
148 |
+ goto again; |
|
149 |
+ default: |
|
150 |
+ LOG(L_ERR, "ERROR: sh_lock: error waiting on semaphore: %s\n", |
|
151 |
+ strerror(errno)); |
|
152 |
+ } |
|
153 |
+#endif |
|
154 |
+} |
|
51 | 155 |
|
52 | 156 |
/* ret -1 on erro*/ |
53 | 157 |
int shm_mem_init() |
... | ... |
@@ -44,113 +44,61 @@ extern int shm_semid; |
44 | 44 |
|
45 | 45 |
int shm_mem_init(); |
46 | 46 |
void shm_mem_destroy(); |
47 |
+inline void shm_lock(); |
|
48 |
+inline void shm_unlock(); |
|
47 | 49 |
|
48 | 50 |
|
49 | 51 |
|
50 |
-inline static void shm_lock() |
|
51 |
-{ |
|
52 |
- struct sembuf sop; |
|
53 |
- |
|
54 |
- sop.sem_num=0; |
|
55 |
- sop.sem_op=-1; /*down*/ |
|
56 |
- sop.sem_flg=0 /*SEM_UNDO*/; |
|
57 |
-again: |
|
58 |
- semop(shm_semid, &sop, 1); |
|
59 |
-#if 0 |
|
60 |
- switch(ret){ |
|
61 |
- case 0: /*ok*/ |
|
62 |
- break; |
|
63 |
- case EINTR: /*interrupted by signal, try again*/ |
|
64 |
- DBG("sh_lock: interrupted by signal, trying again...\n"); |
|
65 |
- goto again; |
|
66 |
- default: |
|
67 |
- LOG(L_ERR, "ERROR: sh_lock: error waiting on semaphore: %s\n", |
|
68 |
- strerror(errno)); |
|
69 |
- } |
|
70 |
-#endif |
|
71 |
-} |
|
72 |
- |
|
73 |
- |
|
74 |
- |
|
75 |
-inline static void shm_unlock() |
|
76 |
-{ |
|
77 |
- struct sembuf sop; |
|
78 |
- |
|
79 |
- sop.sem_num=0; |
|
80 |
- sop.sem_op=1; /*up*/ |
|
81 |
- sop.sem_flg=0 /*SEM_UNDO*/; |
|
82 |
-again: |
|
83 |
- semop(shm_semid, &sop, 1); |
|
84 |
-#if 0 |
|
85 |
- /*should ret immediately*/ |
|
86 |
- switch(ret){ |
|
87 |
- case 0: /*ok*/ |
|
88 |
- break; |
|
89 |
- case EINTR: /*interrupted by signal, try again*/ |
|
90 |
- DBG("sh_lock: interrupted by signal, trying again...\n"); |
|
91 |
- goto again; |
|
92 |
- default: |
|
93 |
- LOG(L_ERR, "ERROR: sh_lock: error waiting on semaphore: %s\n", |
|
94 |
- strerror(errno)); |
|
95 |
- } |
|
96 |
-#endif |
|
97 |
-} |
|
98 |
- |
|
99 |
- |
|
100 | 52 |
|
101 | 53 |
#ifdef DBG_QM_MALLOC |
102 |
-#define shm_malloc(size) \ |
|
54 |
+ |
|
55 |
+#define shm_malloc_unsafe(_size ) \ |
|
56 |
+ MY_MALLOC(shm_block, (_size), __FILE__, __FUNCTION__, __LINE__ ) |
|
57 |
+#define shm_malloc(_size) \ |
|
103 | 58 |
({\ |
104 | 59 |
void *p;\ |
105 | 60 |
\ |
106 |
- /*if (shm_lock()==0){*/\ |
|
107 |
- shm_lock();\ |
|
108 |
- p=MY_MALLOC(shm_block, (size), __FILE__, __FUNCTION__, __LINE__);\ |
|
109 |
- shm_unlock();\ |
|
110 |
- /* \ |
|
111 |
- }else{ \ |
|
112 |
- p=0;\ |
|
113 |
- }*/ \ |
|
114 |
- p; \ |
|
61 |
+ shm_lock();\ |
|
62 |
+ p=shm_malloc_unsafe( (_size) );\ |
|
63 |
+ shm_unlock();\ |
|
64 |
+ p; \ |
|
115 | 65 |
}) |
116 | 66 |
|
117 |
- |
|
118 |
- |
|
119 |
-#define shm_free(p) \ |
|
67 |
+#define shm_free_unsafe( _p ) \ |
|
68 |
+ MY_FREE( shm_block, (_p), __FILE__, __FUNCTION__, __LINE__ ) |
|
69 |
+#define shm_free(_p) \ |
|
120 | 70 |
do { \ |
121 | 71 |
shm_lock(); \ |
122 |
- MY_FREE(shm_block, (p), __FILE__, __FUNCTION__, __LINE__); \ |
|
72 |
+ shm_free_unsafe( (_p)); \ |
|
123 | 73 |
shm_unlock(); \ |
124 | 74 |
}while(0) |
125 | 75 |
|
76 |
+#define shm_resize(_p, _s ) \ |
|
77 |
+ _shm_resize( (_p), (_s), __FILE__, __FUNCTION__, __LINE__) |
|
126 | 78 |
|
127 | 79 |
#else |
128 | 80 |
|
129 |
- |
|
81 |
+#define shm_malloc_unsafe(_size) MY_MALLOC(shm_block, (_size)) |
|
130 | 82 |
#define shm_malloc(size) \ |
131 | 83 |
({\ |
132 | 84 |
void *p;\ |
133 | 85 |
\ |
134 |
- /*if (shm_lock()==0){*/\ |
|
135 | 86 |
shm_lock();\ |
136 |
- p=MY_MALLOC(shm_block, (size));\ |
|
87 |
+ p=shm_malloc_unsafe(size); \ |
|
137 | 88 |
shm_unlock();\ |
138 |
- /* \ |
|
139 |
- }else{ \ |
|
140 |
- p=0;\ |
|
141 |
- }*/ \ |
|
142 | 89 |
p; \ |
143 | 90 |
}) |
144 | 91 |
|
145 | 92 |
|
146 |
- |
|
147 |
-#define shm_free(p) \ |
|
93 |
+#define shm_free_unsafe( _p ) MY_FREE(shm_block, (_p)) |
|
94 |
+#define shm_free(_p) \ |
|
148 | 95 |
do { \ |
149 | 96 |
shm_lock(); \ |
150 |
- MY_FREE(shm_block, (p)); \ |
|
97 |
+ shm_free_unsafe( _p ); \ |
|
151 | 98 |
shm_unlock(); \ |
152 | 99 |
}while(0) |
153 | 100 |
|
101 |
+#define shm_resize(_p, _s) _shm_resize( (_p), (_s)) |
|
154 | 102 |
|
155 | 103 |
#endif |
156 | 104 |
|
... | ... |
@@ -163,7 +111,7 @@ do { \ |
163 | 111 |
}while(0) |
164 | 112 |
|
165 | 113 |
|
166 |
- |
|
114 |
+ |
|
167 | 115 |
|
168 | 116 |
#endif |
169 | 117 |
|
... | ... |
@@ -85,7 +85,7 @@ void my_assert( int assertation, int line, char *file, char *function ) |
85 | 85 |
} |
86 | 86 |
|
87 | 87 |
#ifdef DBG_QM_MALLOC |
88 |
-static void vqm_debug_frag(struct vqm_block* qm, struct vqm_frag* f) |
|
88 |
+void vqm_debug_frag(struct vqm_block* qm, struct vqm_frag* f) |
|
89 | 89 |
{ |
90 | 90 |
|
91 | 91 |
int r; |
... | ... |
@@ -110,15 +110,14 @@ static void vqm_debug_frag(struct vqm_block* qm, struct vqm_frag* f) |
110 | 110 |
and changed the demanded size to size really used including all |
111 | 111 |
possible overhead |
112 | 112 |
*/ |
113 |
-unsigned char size2bucket( struct vqm_block* qm, int *size ) |
|
113 |
+unsigned char size2bucket( struct vqm_block* qm, int *size ) |
|
114 | 114 |
{ |
115 | 115 |
unsigned char b; |
116 | 116 |
unsigned int real_size; |
117 | 117 |
unsigned int exceeds; |
118 | 118 |
|
119 |
+ real_size = *size+ VQM_OVERHEAD; |
|
119 | 120 |
|
120 |
- real_size = *size+ sizeof(struct vqm_frag)+ |
|
121 |
- sizeof(struct vqm_frag_end); |
|
122 | 121 |
#ifdef DBG_QM_MALLOC |
123 | 122 |
real_size+=END_CHECK_PATTERN_LEN; |
124 | 123 |
#endif |
... | ... |
@@ -307,9 +306,6 @@ void* vqm_malloc(struct vqm_block* qm, unsigned int size) |
307 | 306 |
return (char*)new_chunk+sizeof(struct vqm_frag); |
308 | 307 |
} |
309 | 308 |
|
310 |
- |
|
311 |
- |
|
312 |
- |
|
313 | 309 |
#ifdef DBG_QM_MALLOC |
314 | 310 |
void vqm_free(struct vqm_block* qm, void* p, char* file, char* func, |
315 | 311 |
unsigned int line) |
... | ... |
@@ -35,16 +35,18 @@ |
35 | 35 |
|
36 | 36 |
|
37 | 37 |
#ifdef DBG_QM_MALLOC |
38 |
-#define ST_CHECK_PATTERN 0xf0f0f0f0 |
|
39 |
-#define END_CHECK_PATTERN "sExP" |
|
40 |
-#define END_CHECK_PATTERN_LEN 4 |
|
41 |
- |
|
42 |
-#define VQM_DEBUG_FRAG(qm, f) vqm_debug_frag( (qm), (f)) |
|
38 |
+# define ST_CHECK_PATTERN 0xf0f0f0f0 |
|
39 |
+# define END_CHECK_PATTERN "sExP" |
|
40 |
+# define END_CHECK_PATTERN_LEN 4 |
|
41 |
+# define VQM_OVERHEAD (sizeof(struct vqm_frag)+ sizeof(struct vqm_frag_end)+END_CHECK_PATTERN_LEN) |
|
42 |
+# define VQM_DEBUG_FRAG(qm, f) vqm_debug_frag( (qm), (f)) |
|
43 | 43 |
#else |
44 |
-#define VQM_DEBUG_FRAG(qm, f) |
|
44 |
+# define VQM_DEBUG_FRAG(qm, f) |
|
45 |
+# define VQM_OVERHEAD (sizeof(struct vqm_frag)+ sizeof(struct vqm_frag_end)) |
|
45 | 46 |
#endif |
46 | 47 |
|
47 | 48 |
|
49 |
+ |
|
48 | 50 |
struct vqm_frag { |
49 | 51 |
/* XXX */ |
50 | 52 |
/* total chunk size including all overhead/bellowfoot/roundings/etc */ |
... | ... |
@@ -113,14 +115,10 @@ struct vqm_block* vqm_malloc_init(char* address, unsigned int size); |
113 | 115 |
#ifdef DBG_QM_MALLOC |
114 | 116 |
void* vqm_malloc(struct vqm_block*, unsigned int size, char* file, char* func, |
115 | 117 |
unsigned int line); |
116 |
-#else |
|
117 |
-void* vqm_malloc(struct vqm_block*, unsigned int size); |
|
118 |
-#endif |
|
119 |
- |
|
120 |
-#ifdef DBG_QM_MALLOC |
|
121 | 118 |
void vqm_free(struct vqm_block*, void* p, char* file, char* func, |
122 | 119 |
unsigned int line); |
123 | 120 |
#else |
121 |
+void* vqm_malloc(struct vqm_block*, unsigned int size); |
|
124 | 122 |
void vqm_free(struct vqm_block*, void* p); |
125 | 123 |
#endif |
126 | 124 |
|
... | ... |
@@ -33,4 +33,10 @@ |
33 | 33 |
#define RETR_T1 1 |
34 | 34 |
#define RETR_T2 4 |
35 | 35 |
|
36 |
+/* when first reply is sent, this additional space is allocated so that |
|
37 |
+ one does not have to reallocate share memory when the message is |
|
38 |
+ replaced by a subsequent, longer message |
|
39 |
+*/ |
|
40 |
+#define REPLY_OVERBUFFER_LEN 160 |
|
41 |
+ |
|
36 | 42 |
#endif |
... | ... |
@@ -19,10 +19,11 @@ void free_cell( struct cell* dead_cell ) |
19 | 19 |
DBG("DEBUG: free_cell: start\n"); |
20 | 20 |
/* UA Server */ |
21 | 21 |
DBG("DEBUG: free_cell: inbound request %p\n",dead_cell->inbound_request); |
22 |
+ shm_lock(); |
|
22 | 23 |
if ( dead_cell->inbound_request ) |
23 |
- sip_msg_free( dead_cell->inbound_request ); |
|
24 |
+ sip_msg_free_unsafe( dead_cell->inbound_request ); |
|
24 | 25 |
DBG("DEBUG: free_cell: outbound response %p\n",dead_cell->outbound_response); |
25 |
- if (b=dead_cell->outbound_response.retr_buffer) sh_free( b ); |
|
26 |
+ if (b=dead_cell->outbound_response.retr_buffer) shm_free_unsafe( b ); |
|
26 | 27 |
|
27 | 28 |
/* UA Clients */ |
28 | 29 |
for ( i =0 ; i<dead_cell->nr_of_outgoings; i++ ) |
... | ... |
@@ -31,19 +32,22 @@ void free_cell( struct cell* dead_cell ) |
31 | 32 |
DBG("DEBUG: free_cell: outbound_request[%d] %p\n",i,dead_cell->outbound_request[i]); |
32 | 33 |
if ( rb=dead_cell->outbound_request[i] ) |
33 | 34 |
{ |
34 |
- if (rb->retr_buffer) sh_free( rb->retr_buffer ); |
|
35 |
+/* |
|
36 |
+ if (rb->retr_buffer) shm_free( rb->retr_buffer ); |
|
35 | 37 |
dead_cell->outbound_request[i] = NULL; |
36 |
- sh_free( rb ); |
|
38 |
+*/ |
|
39 |
+ shm_free_unsafe( rb ); |
|
37 | 40 |
} |
38 | 41 |
/* outbound requests*/ |
39 | 42 |
DBG("DEBUG: free_cell: inbound_response[%d] %p\n",i,dead_cell->inbound_response[i]); |
40 | 43 |
if ( dead_cell -> inbound_response[i] ) |
41 |
- sip_msg_free( dead_cell->inbound_response[i] ); |
|
44 |
+ sip_msg_free_unsafe( dead_cell->inbound_response[i] ); |
|
42 | 45 |
} |
43 | 46 |
/* mutex */ |
44 | 47 |
/* release_cell_lock( dead_cell ); */ |
45 | 48 |
/* the cell's body */ |
46 |
- sh_free( dead_cell ); |
|
49 |
+ shm_free_unsafe( dead_cell ); |
|
50 |
+ shm_unlock(); |
|
47 | 51 |
DBG("DEBUG: free_cell: done\n"); |
48 | 52 |
} |
49 | 53 |
|
... | ... |
@@ -172,42 +172,56 @@ void lock_cleanup() |
172 | 172 |
} |
173 | 173 |
|
174 | 174 |
|
175 |
+ |
|
176 |
+ |
|
175 | 177 |
/* lock sempahore s */ |
176 |
-int lock( ser_lock_t s ) |
|
178 |
+#ifdef DBG_LOCK |
|
179 |
+inline int _lock( ser_lock_t s , char *file, char *function, unsigned int line ) |
|
180 |
+#else |
|
181 |
+inline int _lock( ser_lock_t s ) |
|
182 |
+#endif |
|
177 | 183 |
{ |
178 |
- //DBG("DEBUG: lock: entering lock\n"); |
|
184 |
+#ifdef DBG_LOCK |
|
185 |
+ DBG("DEBUG: lock : entered from %s , %s(%d)\n", function, file, line ); |
|
186 |
+#endif |
|
179 | 187 |
return change_semaphore( s, -1 ); |
180 |
- //DBG("DEBUG: lock: leaving lock\n"); |
|
181 | 188 |
} |
182 | 189 |
|
183 |
-int unlock( ser_lock_t s ) |
|
190 |
+#ifdef DBG_LOCK |
|
191 |
+inline int _unlock( ser_lock_t s, char *file, char *function, unsigned int line ) |
|
192 |
+#else |
|
193 |
+inline int _unlock( ser_lock_t s ) |
|
194 |
+#endif |
|
184 | 195 |
{ |
185 |
- //DBG("DEBUG: unlock: entering unlock\n"); |
|
196 |
+#ifdef DBG_LOCK |
|
197 |
+ DBG("DEBUG: lock : entered from %s, %s:%d\n", file, function, line ); |
|
198 |
+#endif |
|
186 | 199 |
return change_semaphore( s, +1 ); |
187 |
- //DBG("DEBUG: unlock: leaving unlock\n"); |
|
188 | 200 |
} |
189 | 201 |
|
190 | 202 |
|
191 | 203 |
int change_semaphore( ser_lock_t s , int val ) |
192 | 204 |
{ |
193 |
- struct sembuf pbuf; |
|
194 |
- int r; |
|
205 |
+ struct sembuf pbuf; |
|
206 |
+ int r; |
|
195 | 207 |
|
196 |
- pbuf.sem_num = s.semaphore_index ; |
|
197 |
- pbuf.sem_op =val; |
|
198 |
- pbuf.sem_flg = 0; |
|
208 |
+ pbuf.sem_num = s.semaphore_index ; |
|
209 |
+ pbuf.sem_op =val; |
|
210 |
+ pbuf.sem_flg = 0; |
|
199 | 211 |
|
200 | 212 |
tryagain: |
201 |
- r=semop( s.semaphore_set, &pbuf , 1 /* just 1 op */ ); |
|
213 |
+ r=semop( s.semaphore_set, &pbuf , 1 /* just 1 op */ ); |
|
202 | 214 |
|
203 |
- if (r==-1) { |
|
204 |
- if (errno=EINTR) { |
|
205 |
- DBG("signal received in a semaphore\n"); |
|
206 |
- goto tryagain; |
|
207 |
- } else LOG(L_ERR, "ERROR: change_semaphore: %s\n", strerror(errno)); |
|
215 |
+ if (r==-1) { |
|
216 |
+ if (errno=EINTR) { |
|
217 |
+ DBG("signal received in a semaphore\n"); |
|
218 |
+ goto tryagain; |
|
219 |
+ } else LOG(L_ERR, "ERROR: change_semaphore: %s\n", strerror(errno)); |
|
208 | 220 |
} |
209 | 221 |
return r; |
210 | 222 |
} |
223 |
+ |
|
224 |
+ |
|
211 | 225 |
/* |
212 | 226 |
int init_cell_lock( struct cell *cell ) |
213 | 227 |
{ |
... | ... |
@@ -30,8 +30,19 @@ int lock_initialize(); |
30 | 30 |
int init_semaphore_set( int size ); |
31 | 31 |
void lock_cleanup(); |
32 | 32 |
|
33 |
-int lock( ser_lock_t s ); |
|
34 |
-int unlock( ser_lock_t s ); |
|
33 |
+ |
|
34 |
+#ifdef DBG_LOCK |
|
35 |
+int _lock( ser_lock_t s , char *file, char *function, unsigned int line ); |
|
36 |
+int _unlock( ser_lock_t s, char *file, char *function, unsigned int line ); |
|
37 |
+# define lock(_s) _lock( (_s), __FILE__, __FUNCTION__, __LINE__ ) |
|
38 |
+# define unlock(_s) _unlock( (_s), __FILE__, __FUNCTION__, __LINE__ ) |
|
39 |
+#else |
|
40 |
+int _lock( ser_lock_t s ); |
|
41 |
+int _unlock( ser_lock_t s ); |
|
42 |
+# define lock(_s) _lock( (_s) ) |
|
43 |
+# define unlock(_s) _unlock( (_s) ) |
|
44 |
+#endif |
|
45 |
+ |
|
35 | 46 |
int change_semaphore( ser_lock_t s , int val ); |
36 | 47 |
|
37 | 48 |
int init_cell_lock( struct cell *cell ); |
... | ... |
@@ -13,14 +13,15 @@ |
13 | 13 |
#define sip_msg_cloner(p_msg) \ |
14 | 14 |
sip_msg_cloner_2(p_msg) |
15 | 15 |
|
16 |
-#define sip_msg_free(p_msg) \ |
|
17 |
- sip_msg_free_2(p_msg) |
|
16 |
+#define sip_msg_free(_p_msg) shm_free( (_p_msg )) |
|
17 |
+#define sip_msg_free_unsafe(_p_msg) shm_free_unsafe( (_p_msg) ) |
|
18 |
+/* sip_msg_free_2(p_msg) */ |
|
18 | 19 |
|
19 | 20 |
|
20 | 21 |
struct sip_msg* sip_msg_cloner_1( struct sip_msg *org_msg ); |
21 | 22 |
struct sip_msg* sip_msg_cloner_2( struct sip_msg *org_msg ); |
22 | 23 |
void sip_msg_free_1( struct sip_msg *org_msg ); |
23 |
-void sip_msg_free_2( struct sip_msg *org_msg ); |
|
24 |
+/* void sip_msg_free_2( struct sip_msg *org_msg ); */ |
|
24 | 25 |
|
25 | 26 |
|
26 | 27 |
#endif |
... | ... |
@@ -191,10 +191,12 @@ int t_forward( struct sip_msg* p_msg , unsigned int dest_ip_param , unsigned int |
191 | 191 |
unsigned int dest_port = dest_port_param; |
192 | 192 |
int branch; |
193 | 193 |
unsigned int len; |
194 |
- char *buf; |
|
194 |
+ char *buf, *shbuf; |
|
195 | 195 |
struct retrans_buff *rb; |
196 |
+ |
|
196 | 197 |
|
197 | 198 |
buf=NULL; |
199 |
+ shbuf = NULL; |
|
198 | 200 |
branch = 0; /* we don't do any forking right now */ |
199 | 201 |
|
200 | 202 |
/* it's about the same transaction or not? */ |
... | ... |
@@ -253,36 +255,42 @@ int t_forward( struct sip_msg* p_msg , unsigned int dest_ip_param , unsigned int |
253 | 255 |
"nothing to CANCEL\n"); |
254 | 256 |
return 1; |
255 | 257 |
} |
256 |
- }/* end special case CANCEL*/ |
|
258 |
+ }/* end special case CANCEL*/ |
|
259 |
+ |
|
260 |
+ if ( add_branch_label( T, T->inbound_request , branch )==-1) |
|
261 |
+ goto error; |
|
262 |
+ if ( add_branch_label( T, p_msg , branch )==-1) |
|
263 |
+ goto error; |
|
264 |
+ if ( !(buf = build_req_buf_from_sip_req ( p_msg, &len))) |
|
265 |
+ goto error; |
|
257 | 266 |
|
258 | 267 |
/* allocates a new retrans_buff for the outbound request */ |
259 | 268 |
DBG("DEBUG: t_forward: building outbound request\n"); |
269 |
+ shm_lock(); |
|
260 | 270 |
T->outbound_request[branch] = rb = |
261 |
- (struct retrans_buff*)sh_malloc( sizeof(struct retrans_buff) ); |
|
271 |
+ (struct retrans_buff*)shm_malloc_unsafe( sizeof(struct retrans_buff) ); |
|
262 | 272 |
if (!rb) |
263 | 273 |
{ |
264 | 274 |
LOG(L_ERR, "ERROR: t_forward: out of shmem\n"); |
275 |
+ shm_unlock(); |
|
276 |
+ goto error; |
|
277 |
+ } |
|
278 |
+ shbuf = (char *) shm_malloc_unsafe( len ); |
|
279 |
+ if (!shbuf) |
|
280 |
+ { |
|
281 |
+ LOG(L_ERR, "ERROR: t_forward: out of shmem buffer\n"); |
|
282 |
+ shm_unlock(); |
|
265 | 283 |
goto error; |
266 | 284 |
} |
285 |
+ shm_unlock(); |
|
267 | 286 |
memset( rb , 0 , sizeof (struct retrans_buff) ); |
287 |
+ rb->retr_buffer = shbuf; |
|
268 | 288 |
rb->retr_timer.payload = rb; |
269 | 289 |
rb->fr_timer.payload = rb; |
270 | 290 |
rb->to.sin_family = AF_INET; |
271 | 291 |
rb->my_T = T; |
272 | 292 |
T->nr_of_outgoings = 1; |
273 |
- |
|
274 |
- if ( add_branch_label( T, T->inbound_request , branch )==-1) |
|
275 |
- goto error; |
|
276 |
- if ( add_branch_label( T, p_msg , branch )==-1) |
|
277 |
- goto error; |
|
278 |
- if ( !(buf = build_req_buf_from_sip_req ( p_msg, &len))) |
|
279 |
- goto error; |
|
280 | 293 |
rb->bufflen = len ; |
281 |
- if ( !(rb->retr_buffer = (char*)sh_malloc( len ))) |
|
282 |
- { |
|
283 |
- LOG(L_ERR, "ERROR: t_forward: shmem allocation failed\n"); |
|
284 |
- goto error; |
|
285 |
- } |
|
286 | 294 |
memcpy( rb->retr_buffer , buf , len ); |
287 | 295 |
free( buf ) ; buf=NULL; |
288 | 296 |
|
... | ... |
@@ -330,9 +338,9 @@ int t_forward( struct sip_msg* p_msg , unsigned int dest_ip_param , unsigned int |
330 | 338 |
return 1; |
331 | 339 |
|
332 | 340 |
error: |
333 |
- if ( rb && rb->retr_buffer) sh_free( rb->retr_buffer ); |
|
341 |
+ if (shbuf) shm_free(shbuf); |
|
334 | 342 |
if (rb) { |
335 |
- sh_free(rb); |
|
343 |
+ shm_free(rb); |
|
336 | 344 |
T->outbound_request[branch]=NULL; |
337 | 345 |
} |
338 | 346 |
if (buf) free( buf ); |
... | ... |
@@ -582,10 +590,9 @@ int t_unref( struct sip_msg* p_msg, char* foo, char* bar ) |
582 | 590 |
*/ |
583 | 591 |
int t_send_reply( struct sip_msg* p_msg , unsigned int code , char * text ) |
584 | 592 |
{ |
585 |
- unsigned int len; |
|
593 |
+ unsigned int len, buf_len; |
|
586 | 594 |
char * buf; |
587 | 595 |
struct retrans_buff *rb; |
588 |
- char *b; |
|
589 | 596 |
|
590 | 597 |
DBG("DEBUG: t_send_reply: entered\n"); |
591 | 598 |
if (t_check( p_msg , 0 )==-1) return -1; |
... | ... |
@@ -628,15 +635,16 @@ int t_send_reply( struct sip_msg* p_msg , unsigned int code , char * text ) |
628 | 635 |
goto error; |
629 | 636 |
} |
630 | 637 |
|
631 |
- if (! (b = (char*)sh_malloc( len ))) |
|
638 |
+ /* if this is a first reply (?100), longer replies will probably follow; |
|
639 |
+ try avoiding shm_resize by higher buffer size |
|
640 |
+ */ |
|
641 |
+ buf_len = rb->retr_buffer ? len : len + REPLY_OVERBUFFER_LEN; |
|
642 |
+ |
|
643 |
+ if (! (rb->retr_buffer = (char*)shm_resize( rb->retr_buffer, buf_len ))) |
|
632 | 644 |
{ |
633 | 645 |
LOG(L_ERR, "ERROR: t_send_reply: cannot allocate shmem buffer\n"); |
634 | 646 |
goto error2; |
635 | 647 |
} |
636 |
- /* if present, remove previous message */ |
|
637 |
- if ( rb->retr_buffer) |
|
638 |
- sh_free( rb->retr_buffer ); |
|
639 |
- rb->retr_buffer = b; |
|
640 | 648 |
rb->bufflen = len ; |
641 | 649 |
memcpy( rb->retr_buffer , buf , len ); |
642 | 650 |
free( buf ) ; |
... | ... |
@@ -666,9 +674,8 @@ error: |
666 | 674 |
int push_reply_from_uac_to_uas( struct cell* trans , unsigned int branch ) |
667 | 675 |
{ |
668 | 676 |
char *buf; |
669 |
- unsigned int len; |
|
677 |
+ unsigned int len, buf_len; |
|
670 | 678 |
struct retrans_buff *rb; |
671 |
- char *b; |
|
672 | 679 |
|
673 | 680 |
DBG("DEBUG: push_reply_from_uac_to_uas: start\n"); |
674 | 681 |
rb= & trans->outbound_response; |
... | ... |
@@ -700,14 +707,16 @@ int push_reply_from_uac_to_uas( struct cell* trans , unsigned int branch ) |
700 | 707 |
"no shmem for outbound reply buffer\n"); |
701 | 708 |
goto error; |
702 | 709 |
} |
703 |
- if ( !(b = (char*)sh_malloc( len ))) { |
|
704 |
- LOG(L_ERR, "ERROR: push_reply_from_uac_to_uas: " |
|
705 |
- "no memory to allocate retr_buffer\n"); |
|
710 |
+ |
|
711 |
+ /* if this is a first reply (?100), longer replies will probably follow; |
|
712 |
+ try avoiding shm_resize by higher buffer size |
|
713 |
+ */ |
|
714 |
+ buf_len = rb->retr_buffer ? len : len + REPLY_OVERBUFFER_LEN; |
|
715 |
+ if (! (rb->retr_buffer = (char*)shm_resize( rb->retr_buffer, buf_len ))) |
|
716 |
+ { |
|
717 |
+ LOG(L_ERR, "ERROR: t_send_reply: cannot allocate shmem buffer\n"); |
|
706 | 718 |
goto error1; |
707 | 719 |
} |
708 |
- if ( rb->retr_buffer ) |
|
709 |
- sh_free( rb->retr_buffer ) ; |
|
710 |
- rb->retr_buffer = b; |
|
711 | 720 |
rb->bufflen = len ; |
712 | 721 |
memcpy( rb->retr_buffer , buf , len ); |
713 | 722 |
free( buf ) ; |
... | ... |
@@ -966,8 +975,8 @@ int t_build_and_send_ACK( struct cell *Trans, unsigned int branch, struct sip_ms |
966 | 975 |
else if ( hdr->type==HDR_TO ) |
967 | 976 |
len += ((r_msg->to->body.s+r_msg->to->body.len ) - r_msg->to->name.s ) + CRLF_LEN ; |
968 | 977 |
|
969 |
- /* CSEQ method : from INVITE-> ACK, don't count CRLF twice*/ |
|
970 |
- len -= 3 + 2 ; |
|
978 |
+ /* CSEQ method : from INVITE-> ACK */ |
|
979 |
+ len -= 3 ; |
|
971 | 980 |
/* end of message */ |
972 | 981 |
len += CRLF_LEN; /*new line*/ |
973 | 982 |
|
... | ... |
@@ -1048,6 +1057,58 @@ error: |
1048 | 1057 |
} |
1049 | 1058 |
|
1050 | 1059 |
|
1060 |
+void delete_cell( struct cell *p_cell ) |
|
1061 |
+{ |
|
1062 |
+#ifdef EXTRA_DEBUG |
|
1063 |
+ int i; |
|
1064 |
+ |
|
1065 |
+ if (is_in_timer_list2(& p_cell->wait_tl )) { |
|
1066 |
+ LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on WAIT\n", |
|
1067 |
+ p_cell); |
|
1068 |
+ abort(); |
|
1069 |
+ } |
|
1070 |
+ if (is_in_timer_list2(& p_cell->outbound_response.retr_timer )) { |
|
1071 |
+ LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on RETR (rep)\n", |
|
1072 |
+ p_cell); |
|
1073 |
+ abort(); |
|
1074 |
+ } |
|
1075 |
+ if (is_in_timer_list2(& p_cell->outbound_response.fr_timer )) { |
|
1076 |
+ LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on FR (rep)\n", |
|
1077 |
+ p_cell); |
|
1078 |
+ abort(); |
|
1079 |
+ } |
|
1080 |
+ for (i=0; i<p_cell->nr_of_outgoings; i++) { |
|
1081 |
+ if (is_in_timer_list2(& p_cell->outbound_request[i]->retr_timer)) { |
|
1082 |
+ LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on RETR (req %d)\n", |
|
1083 |
+ p_cell, i); |
|
1084 |
+ abort(); |
|
1085 |
+ } |
|
1086 |
+ if (is_in_timer_list2(& p_cell->outbound_request[i]->fr_timer)) { |
|
1087 |
+ LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on FR (req %d)\n", |
|
1088 |
+ p_cell, i); |
|
1089 |
+ abort(); |
|
1090 |
+ } |
|
1091 |
+ } |
|
1092 |
+#endif |
|
1093 |
+ /* still in use ... don't delete */ |
|
1094 |
+ if ( p_cell->ref_counter ) { |
|
1095 |
+#ifdef EXTRA_DEBUG |
|
1096 |
+ if (p_cell->ref_counter>1) { |
|
1097 |
+ DBG("DEBUG: while debugging with a single process, ref_count > 1\n"); |
|
1098 |
+ DBG("DEBUG: transaction =%p\n", p_cell ); |
|
1099 |
+ abort(); |
|
1100 |
+ } |
|
1101 |
+#endif |
|
1102 |
+ DBG("DEBUG: delete_cell: t=%p post for delete (%d)\n", |
|
1103 |
+ p_cell,p_cell->ref_counter); |
|
1104 |
+ /* it's added to del list for future del */ |
|
1105 |
+ set_timer( hash_table, &(p_cell->dele_tl), DELETE_LIST ); |
|
1106 |
+ } else { |
|
1107 |
+ DBG("DEBUG: delete_handler : delete transaction %p\n", p_cell ); |
|
1108 |
+ free_cell( p_cell ); |
|
1109 |
+ } |
|
1110 |
+} |
|
1111 |
+ |
|
1051 | 1112 |
|
1052 | 1113 |
/*---------------------TIMEOUT HANDLERS--------------------------*/ |
1053 | 1114 |
|
... | ... |
@@ -1147,75 +1208,23 @@ void wait_handler( void *attr) |
1147 | 1208 |
#ifdef EXTRA_DEBUG |
1148 | 1209 |
p_cell->damocles = 1; |
1149 | 1210 |
#endif |
1150 |
- set_timer( hash_table, &(p_cell->dele_tl), DELETE_LIST ); |
|
1211 |
+ delete_cell( p_cell ); |
|
1151 | 1212 |
DBG("DEBUG: wait_handler : done\n"); |
1152 | 1213 |
} |
1153 | 1214 |
|
1154 | 1215 |
|
1155 |
- |
|
1156 |
- |
|
1157 | 1216 |
void delete_handler( void *attr) |
1158 | 1217 |
{ |
1159 | 1218 |
struct cell *p_cell = (struct cell*)attr; |
1160 | 1219 |
|
1220 |
+ DBG("DEBUG: delete_handler : removing %p \n", p_cell ); |
|
1161 | 1221 |
#ifdef EXTRA_DEBUG |
1162 |
- int i; |
|
1163 | 1222 |
if (p_cell->damocles==0) { |
1164 | 1223 |
LOG( L_ERR, "ERROR: transaction %p not scheduled for deletion and called from DELETE timer\n", |
1165 | 1224 |
p_cell); |
1166 | 1225 |
abort(); |
1167 | 1226 |
} |
1168 |
- if (is_in_timer_list2(& p_cell->wait_tl )) { |
|
1169 |
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on WAIT\n", |
|
1170 |
- p_cell); |
|
1171 |
- abort(); |
|
1172 |
- } |
|
1173 |
- if (is_in_timer_list2(& p_cell->outbound_response.retr_timer )) { |
|
1174 |
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on RETR (rep)\n", |
|
1175 |
- p_cell); |
|
1176 |
- abort(); |
|
1177 |
- } |
|
1178 |
- if (is_in_timer_list2(& p_cell->outbound_response.fr_timer )) { |
|
1179 |
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on FR (rep)\n", |
|
1180 |
- p_cell); |
|
1181 |
- abort(); |
|
1182 |
- } |
|
1183 |
- for (i=0; i<p_cell->nr_of_outgoings; i++) { |
|
1184 |
- if (is_in_timer_list2(& p_cell->outbound_request[i]->retr_timer)) { |
|
1185 |
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on RETR (req %d)\n", |
|
1186 |
- p_cell, i); |
|
1187 |
- abort(); |
|
1188 |
- } |
|
1189 |
- if (is_in_timer_list2(& p_cell->outbound_request[i]->fr_timer)) { |
|
1190 |
- LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and still on FR (req %d)\n", |
|
1191 |
- p_cell, i); |
|
1192 |
- abort(); |
|
1193 |
- } |
|
1194 |
- } |
|
1195 |
- |
|
1196 |
-#endif |
|
1197 |
- |
|
1198 |
- /* the transaction is already removed from DEL_LIST by the timer */ |
|
1199 |
- /* if is not refenceted -> is deleted*/ |
|
1200 |
- if ( p_cell->ref_counter==0 ) |
|
1201 |
- { |
|
1202 |
- DBG("DEBUG: delete_handler : delete transaction %p\n", p_cell ); |
|
1203 |
- free_cell( p_cell ); |
|
1204 |
- } else { |
|
1205 |
-#ifdef EXTRA_DEBUG |
|
1206 |
- if (p_cell->ref_counter>1) { |
|
1207 |
- DBG("DEBUG: while debugging with a single process, ref_count > 1\n"); |
|
1208 |
- DBG("DEBUG: transaction =%p\n", p_cell ); |
|
1209 |
- abort(); |
|
1210 |
- } |
|
1211 | 1227 |
#endif |
1212 |
- DBG("DEBUG: delete_handler: t=%p post for delete (%d)\n", |
|
1213 |
- p_cell,p_cell->ref_counter); |
|
1214 |
- /* else it's readded to del list for future del */ |
|
1215 |
- set_timer( hash_table, &(p_cell->dele_tl), DELETE_LIST ); |
|
1216 |
- } |
|
1228 |
+ delete_cell( p_cell ); |
|
1217 | 1229 |
DBG("DEBUG: delete_handler : done\n"); |
1218 | 1230 |
} |
1219 |
- |
|
1220 |
- |
|
1221 |
- |
... | ... |
@@ -100,6 +100,12 @@ struct timer_link *check_and_split_time_list( struct timer *timer_list, int tim |
100 | 100 |
struct timer_link *tl , *tmp , *end, *ret; |
101 | 101 |
|
102 | 102 |
//DBG("DEBUG : check_and_split_time_list: start\n"); |
103 |
+ |
|
104 |
+ /* quick check whether it is worth entering the lock */ |
|
105 |
+ if (timer_list->first_tl.next_tl==&timer_list->last_tl || |
|
106 |
+ timer_list->first_tl.next_tl->time_out > time ) |
|
107 |
+ return NULL; |
|
108 |
+ |
|
103 | 109 |
/* the entire timer list is locked now -- noone else can manipulate it */ |
104 | 110 |
lock( timer_list->mutex ); |
105 | 111 |
|
... | ... |
@@ -4,15 +4,17 @@ |
4 | 4 |
# $ID: $ |
5 | 5 |
# |
6 | 6 |
|
7 |
-debug=9 # debug level (cmd line: -dddddddddd) |
|
7 |
+debug=1 # debug level (cmd line: -dddddddddd) |
|
8 |
+fork=yes # (cmd. line: -D) |
|
9 |
+#fork=no |
|
10 |
+log_stderror=yes # (cmd line: -E) |
|
11 |
+#log_stderror=no # (cmd line: -E) |
|
12 |
+ |
|
13 |
+ |
|
14 |
+children=4 |
|
8 | 15 |
check_via=yes # (cmd. line: -v) |
9 | 16 |
dns=on # (cmd. line: -r) |
10 | 17 |
rev_dns=yes # (cmd. line: -R) |
11 |
-#fork=yes # (cmd. line: -D) |
|
12 |
-fork=no |
|
13 |
-children=16 |
|
14 |
-log_stderror=yes # (cmd line: -E) |
|
15 |
-#log_stderror=no # (cmd line: -E) |
|
16 | 18 |
port=5080 |
17 | 19 |
#listen=127.0.0.1 |
18 | 20 |
listen=192.168.99.100 |
... | ... |
@@ -23,34 +25,46 @@ loop_checks=1 |
23 | 25 |
loadmodule "modules/print/print.so" |
24 | 26 |
#loadmodule "modules/tm/tm.so" |
25 | 27 |
|
26 |
-route{ |
|
28 |
+route[0]{ |
|
29 |
+ forward("bat.iptel.org", 5090); |
|
30 |
+ break; |
|
31 |
+} |
|
32 |
+ |
|
33 |
+route[1]{ |
|
34 |
+ log("SER: new request reveived\n"); |
|
27 | 35 |
if ( t_lookup_request()) { |
28 |
- log("SER: transaction found\n"); |
|
29 | 36 |
if ( method=="ACK" ) { |
30 |
- log("SER: ACK received -> t_release\n"); |
|
37 |
+ log("SER: ACK for an existing transaction received\n"); |
|
31 | 38 |
if (! t_forward("bat.iptel.org", "5090" )) { |
32 | 39 |
log("SER: WARNING: bad forward\n"); |
33 |
- }; |
|
40 |
+ } else log("SER: t_forward ok\n"); |
|
34 | 41 |
if (! t_release()) { |
35 | 42 |
log("SER: WARNING: bad t_release\n"); |
36 |
- }; |
|
43 |
+ } else log("SER: t_release ok\n"); |
|
37 | 44 |
} else { |
45 |
+ if (method=="INVITE" ) { log("SER: it's an INVITE retranmission\n"); } |
|
46 |
+ else if (method=="BYE") log( "SER: it's a BYE retransmission\n") |
|
47 |
+ else log("SER: it's a retransmission (neither INVITE nor BYE\n"); |
|
38 | 48 |
if (! t_retransmit_reply()) { |
39 | 49 |
log("SER: WARNING: bad t_retransmit_reply\n"); |
40 |
- }; |
|
41 |
- log("SER: yet another annoying retranmission\n"); |
|
50 |
+ } else log("SER: t_retransmit ok\n"); |
|
42 | 51 |
}; |
43 | 52 |
t_unref(); |
44 | 53 |
} else { |
45 | 54 |
log("SER: transaction not found\n"); |
46 | 55 |
if (method=="ACK") { |
47 | 56 |
# no established transaction ... forward ACK just statelessly |
57 |
+ log("SER: ACK received\n"); |
|
48 | 58 |
forward("bat.iptel.org", 5090); |
49 | 59 |
} else { |
50 | 60 |
# establish transaction |
61 |
+ log("SER: adding new transaction\n"); |
|
62 |
+ if (method=="INVITE" ) { log("SER: it's a new INVITE \n"); } |
|
63 |
+ else if (method=="BYE") log( "SER: it's a new BYE \n") |
|
64 |
+ else log("SER: it is a new transaction (neither INVITE nor BYE)\n"); |
|
51 | 65 |
if (! t_add_transaction()){ |
52 |
- log("ERROR in ser: t_add_transaction\n"); |
|
53 |
- }; |
|
66 |
+ log("SER t_add_transaction failed\n"); |
|
67 |
+ } else log("SER: t_add_Transactio ok\n"); |
|
54 | 68 |
# reply |
55 | 69 |
if (method=="CANCEL") { |
56 | 70 |
log("SER: new CANCEL\n"); |
... | ... |
@@ -58,15 +72,15 @@ route{ |
58 | 72 |
log("SER:ERROR: t_send_reply\n"); |
59 | 73 |
}; |
60 | 74 |
} else { |
61 |
- log("SER: new transaction\n"); |
|
75 |
+ log("SER: replying\n"); |
|
62 | 76 |
if (! t_send_reply("100", "trying -- your call is important to us") |
63 | 77 |
){ |
64 | 78 |
log("SER: ERROR: t_send_reply (100)\n"); |
65 |
- }; |
|
79 |
+ } else log("SER: t_send_reply ok\n"); |
|
66 | 80 |
}; |
67 | 81 |
if (! t_forward("bat.iptel.org", "5090")){ |
68 | 82 |
log("SER:ERROR: t_forward (..., 5555)\n"); |
69 |
- }; |
|
83 |
+ } else log("SER: t_forward ok\n"); |
|
70 | 84 |
t_unref(); |
71 | 85 |
}; |
72 | 86 |
}; |
... | ... |
@@ -20,12 +20,8 @@ loop_checks=1 |
20 | 20 |
loadmodule "modules/print/print.so" |
21 | 21 |
#loadmodule "modules/tm/tm.so" |
22 | 22 |
|
23 |
-route[0] { |
|
24 |
- forward(195.37.78.146, 5060); |
|
25 |
- drop; |
|
26 |
-} |
|
27 | 23 |
|
28 |
-route[1]{ |
|
24 |
+route[0]{ |
|
29 | 25 |
if ( t_lookup_request()) { |
30 | 26 |
if ( method=="ACK" ) { |
31 | 27 |
t_release(); |
... | ... |
@@ -56,3 +52,8 @@ route[1]{ |
56 | 52 |
}; |
57 | 53 |
|
58 | 54 |
} |
55 |
+ |
|
56 |
+#route[0] { |
|
57 |
+# forward(195.37.78.146, 5060); |
|
58 |
+# drop; |
|
59 |
+#} |