Browse code

FAT bug_fix: An unfortuntately big fix of bug which has not been unfortunetly understood :-( . Suddenly, TM started segfaulting on start-up. In gdb, there was lot of confusion among automatic and static variable -- funny thing, p1==p2 but *p1!=*p2. I resturucted TM again, made non-automatic variables in question (hash_table) static, and split it in timers and transaction table, both in separate files.

Jiri Kuthan authored on 28/08/2002 21:24:28
Showing 15 changed files
... ...
@@ -15,6 +15,29 @@
15 15
 #include "t_cancel.h"
16 16
 #include "t_stats.h"
17 17
 
18
+/* pointer to the big table where all the transaction data
19
+   lives
20
+*/
21
+
22
+static struct s_table*  tm_table;
23
+
24
+void lock_hash(int i) 
25
+{
26
+	lock(&tm_table->entrys[i].mutex);
27
+}
28
+
29
+void unlock_hash(int i) 
30
+{
31
+	unlock(&tm_table->entrys[i].mutex);
32
+}
33
+
34
+
35
+struct s_table* get_tm_table()
36
+{
37
+	return tm_table;
38
+}
39
+
40
+
18 41
 unsigned int transaction_count( void )
19 42
 {
20 43
 	unsigned int i;
... ...
@@ -22,7 +45,7 @@ unsigned int transaction_count( void )
22 22
 
23 23
 	count=0;	
24 24
 	for (i=0; i<TABLE_ENTRIES; i++) 
25
-		count+=hash_table->entrys[i].entries;
25
+		count+=tm_table->entrys[i].entries;
26 26
 	return count;
27 27
 }
28 28
 
... ...
@@ -186,20 +209,20 @@ error:
186 186
 
187 187
 /* Release all the data contained by the hash table. All the aux. structures
188 188
  *  as sems, lists, etc, are also released */
189
-void free_hash_table( struct s_table *hash_table )
189
+void free_hash_table(  )
190 190
 {
191 191
 	struct cell* p_cell;
192 192
 	struct cell* tmp_cell;
193 193
 	int    i;
194 194
 
195
-	if (hash_table)
195
+	if (tm_table)
196 196
 	{
197 197
 		/* remove the data contained by each entry */
198 198
 		for( i = 0 ; i<TABLE_ENTRIES; i++)
199 199
 		{
200
-			release_entry_lock( (hash_table->entrys)+i );
200
+			release_entry_lock( (tm_table->entrys)+i );
201 201
 			/* delete all synonyms at hash-collision-slot i */
202
-			p_cell=hash_table->entrys[i].first_cell;
202
+			p_cell=tm_table->entrys[i].first_cell;
203 203
 			for( ; p_cell; p_cell = tmp_cell )
204 204
 			{
205 205
 				tmp_cell = p_cell->next_cell;
... ...
@@ -207,11 +230,6 @@ void free_hash_table( struct s_table *hash_table )
207 207
 			}
208 208
 		}
209 209
 
210
-		/* the mutexs for sync the lists are released*/
211
-		for ( i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
212
-			release_timerlist_lock( &(hash_table->timers[i]) );
213
-
214
-		shm_free( hash_table );
215 210
 	}
216 211
 }
217 212
 
... ...
@@ -222,36 +240,37 @@ void free_hash_table( struct s_table *hash_table )
222 222
  */
223 223
 struct s_table* init_hash_table()
224 224
 {
225
-	struct s_table*  hash_table;
226 225
 	int              i;
227 226
 
228 227
 	/*allocs the table*/
229
-	hash_table = (struct s_table*)shm_malloc( sizeof( struct s_table ) );
230
-	if ( !hash_table )
231
-		goto error;
228
+	tm_table= (struct s_table*)shm_malloc( sizeof( struct s_table ) );
229
+	if ( !tm_table) {
230
+		LOG(L_ERR, "ERROR: init_hash_table: no shmem for TM table\n");
231
+		goto error0;
232
+	}
232 233
 
233
-	memset( hash_table, 0, sizeof (struct s_table ) );
234
+	memset( tm_table, 0, sizeof (struct s_table ) );
234 235
 
235 236
 	/* try first allocating all the structures needed for syncing */
236 237
 	if (lock_initialize()==-1)
237
-		goto error;
238
+		goto error1;
238 239
 
239 240
 	/* inits the entrys */
240 241
 	for(  i=0 ; i<TABLE_ENTRIES; i++ )
241 242
 	{
242
-		init_entry_lock( hash_table , (hash_table->entrys)+i );
243
-		hash_table->entrys[i].next_label = rand();
243
+		init_entry_lock( tm_table, (tm_table->entrys)+i );
244
+		tm_table->entrys[i].next_label = rand();
244 245
 	}
245 246
 
246
-	/* inits the timers*/
247
-	for(  i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
248
-		init_timer_list( hash_table, i );
249
-
250
-	return  hash_table;
247
+	return  tm_table;
251 248
 
252
-error:
253
-	free_hash_table( hash_table );
249
+#ifdef _OBSO
250
+error2:
254 251
 	lock_cleanup();
252
+#endif
253
+error1:
254
+	free_hash_table( );
255
+error0:
255 256
 	return 0;
256 257
 }
257 258
 
... ...
@@ -260,13 +279,12 @@ error:
260 260
 
261 261
 /*  Takes an already created cell and links it into hash table on the
262 262
  *  appropiate entry. */
263
-void insert_into_hash_table_unsafe( struct s_table *hash_table,
264
-											struct cell * p_cell )
263
+void insert_into_hash_table_unsafe( struct cell * p_cell )
265 264
 {
266 265
 	struct entry* p_entry;
267 266
 
268 267
 	/* locates the apropiate entry */
269
-	p_entry = &hash_table->entrys[ p_cell->hash_index ];
268
+	p_entry = &tm_table->entrys[ p_cell->hash_index ];
270 269
 
271 270
 	p_cell->label = p_entry->next_label++;
272 271
 	if ( p_entry->last_cell )
... ...
@@ -290,10 +308,10 @@ void insert_into_hash_table_unsafe( struct s_table *hash_table,
290 290
 
291 291
 
292 292
 
293
-void insert_into_hash_table(struct s_table *hash_table,  struct cell * p_cell)
293
+void insert_into_hash_table( struct cell * p_cell)
294 294
 {
295 295
 	LOCK_HASH(p_cell->hash_index);
296
-	insert_into_hash_table_unsafe( hash_table,  p_cell );
296
+	insert_into_hash_table_unsafe(  p_cell );
297 297
 	UNLOCK_HASH(p_cell->hash_index);
298 298
 }
299 299
 
... ...
@@ -301,10 +319,9 @@ void insert_into_hash_table(struct s_table *hash_table,  struct cell * p_cell)
301 301
 
302 302
 
303 303
 /*  Un-link a  cell from hash_table, but the cell itself is not released */
304
-void remove_from_hash_table_unsafe(struct s_table *hash_table,  
305
- struct cell * p_cell)
304
+void remove_from_hash_table_unsafe( struct cell * p_cell)
306 305
 {
307
-	struct entry*  p_entry  = &(hash_table->entrys[p_cell->hash_index]);
306
+	struct entry*  p_entry  = &(tm_table->entrys[p_cell->hash_index]);
308 307
 
309 308
 	/* unlink the cell from entry list */
310 309
 	/* lock( &(p_entry->mutex) ); */
... ...
@@ -23,11 +23,17 @@ struct timer;
23 23
 struct retr_buf;
24 24
 
25 25
 #include "../../mem/shm_mem.h"
26
-#include "timer.h" 
27 26
 #include "lock.h"
28 27
 #include "sip_msg.h"
29 28
 #include "t_reply.h"
30 29
 #include "t_hooks.h"
30
+#include "timer.h"
31
+
32
+#define LOCK_HASH(_h) lock_hash((_h))
33
+#define UNLOCK_HASH(_h) unlock_hash((_h))
34
+
35
+void lock_hash(int i);
36
+void unlock_hash(int i);
31 37
 
32 38
 
33 39
 #define NO_CANCEL       ( (char*) 0 )
... ...
@@ -218,22 +224,21 @@ struct s_table
218 218
 {
219 219
 	/* table of hash entries; each of them is a list of synonyms  */
220 220
 	struct entry   entrys[ TABLE_ENTRIES ];
221
+#ifdef _OBSOLETED
221 222
 	/* table of timer lists */
222 223
 	struct timer   timers[ NR_OF_TIMER_LISTS ];
224
+#endif
223 225
 };
224 226
 
225 227
 
226
-
228
+struct s_table* get_tm_table();
227 229
 struct s_table* init_hash_table();
228
-void   free_hash_table( struct s_table* hash_table );
230
+void   free_hash_table( );
229 231
 void   free_cell( struct cell* dead_cell );
230 232
 struct cell*  build_cell( struct sip_msg* p_msg );
231
-void   remove_from_hash_table_unsafe(struct s_table *hash_table,
232
-	struct cell * p_cell);
233
-void   insert_into_hash_table(struct s_table *hash_table,
234
-	struct cell * p_cell);
235
-void   insert_into_hash_table_unsafe( struct s_table *hash_table,
236
-		struct cell * p_cell );
233
+void   remove_from_hash_table_unsafe( struct cell * p_cell);
234
+void   insert_into_hash_table( struct cell * p_cell);
235
+void   insert_into_hash_table_unsafe( struct cell * p_cell );
237 236
 
238 237
 unsigned int transaction_count( void );
239 238
 
... ...
@@ -364,7 +364,7 @@ int init_cell_lock( struct cell *cell )
364 364
 	return 0;
365 365
 }
366 366
 
367
-int init_entry_lock( struct s_table* hash_table, struct entry *entry )
367
+int init_entry_lock( struct s_table* ht, struct entry *entry )
368 368
 {
369 369
 #ifdef FAST_LOCK
370 370
 	init_lock(entry->mutex);
... ...
@@ -374,24 +374,12 @@ int init_entry_lock( struct s_table* hash_table, struct entry *entry )
374 374
 	   many partitions as number of available semaphors allows
375 375
         */
376 376
 	entry->mutex.semaphore_set=entry_semaphore;
377
-	entry->mutex.semaphore_index = ( ((void *)entry - (void *)(hash_table->entrys ) )
377
+	entry->mutex.semaphore_index = ( ((void *)entry - (void *)(ht->entrys ) )
378 378
                / sizeof(struct entry) ) % sem_nr;
379 379
 #endif
380 380
 	return 0;
381 381
 }
382 382
 
383
-int init_timerlist_lock( struct s_table* hash_table, enum lists timerlist_id)
384
-{
385
-	/* each timer list has its own semaphore */
386
-	/*
387
-	hash_table->timers[timerlist_id].mutex.semaphore_set=timer_semaphore;
388
-	hash_table->timers[timerlist_id].mutex.semaphore_index=timer_group[timerlist_id];
389
-	*/
390
-
391
-	hash_table->timers[timerlist_id].mutex=&(timer_group_lock[ timer_group[timerlist_id] ]);
392
-	return 0;
393
-}
394
-
395 383
 
396 384
 
397 385
 int release_cell_lock( struct cell *cell )
... ...
@@ -420,3 +408,10 @@ int release_timerlist_lock( struct timer *timerlist )
420 420
 	/* the same as above */
421 421
 	return 0;
422 422
 }
423
+
424
+int init_timerlist_lock( enum lists timerlist_id)
425
+{
426
+	get_timertable()->timers[timerlist_id].mutex=
427
+		&(timer_group_lock[ timer_group[timerlist_id] ]);
428
+	return 0;
429
+}
... ...
@@ -66,8 +66,7 @@ static int init_semaphore_set( int size );
66 66
 
67 67
 
68 68
 int init_cell_lock( struct cell *cell );
69
-int init_entry_lock( struct s_table* hash_table, struct entry *entry );
70
-int init_timerlist_lock( struct s_table* hash_table, enum lists timerlist_id);
69
+int init_entry_lock( struct s_table* ht, struct entry *entry );
71 70
 
72 71
 
73 72
 int release_cell_lock( struct cell *cell );
... ...
@@ -119,6 +118,8 @@ static inline int _unlock( ser_lock_t* s )
119 119
 #endif
120 120
 }
121 121
 
122
+int init_timerlist_lock(  enum lists timerlist_id);
123
+
122 124
 
123 125
 #endif
124 126
 
... ...
@@ -16,10 +16,6 @@
16 16
 #include "t_lookup.h"
17 17
 #include "config.h"
18 18
 
19
-/* pointer to the big table where all the transaction data
20
-   lives
21
-*/
22
-struct s_table*  hash_table;
23 19
 
24 20
 /* ----------------------------------------------------- */
25 21
 
... ...
@@ -39,65 +35,25 @@ int send_pr_buffer( struct retr_buf *rb,
39 39
 void start_retr( struct retr_buf *rb )
40 40
 {
41 41
 	rb->retr_list=RT_T1_TO_1;
42
-	set_timer( hash_table, &rb->retr_timer, RT_T1_TO_1 );
43
-	set_timer( hash_table, &rb->fr_timer, FR_TIMER_LIST );
42
+	set_timer( &rb->retr_timer, RT_T1_TO_1 );
43
+	set_timer( &rb->fr_timer, FR_TIMER_LIST );
44 44
 }
45 45
 
46
-int tm_startup()
47
-{
48
-	/* building the hash table*/
49
-	hash_table = init_hash_table();
50
-	if (!hash_table)
51
-		return -1;
52
-
53
-	/* init. timer lists */
54
-	hash_table->timers[RT_T1_TO_1].id = RT_T1_TO_1;
55
-	hash_table->timers[RT_T1_TO_2].id = RT_T1_TO_2;
56
-	hash_table->timers[RT_T1_TO_3].id = RT_T1_TO_3;
57
-	hash_table->timers[RT_T2].id      = RT_T2;
58
-	hash_table->timers[FR_TIMER_LIST].id     = FR_TIMER_LIST;
59
-	hash_table->timers[FR_INV_TIMER_LIST].id = FR_INV_TIMER_LIST;
60
-	hash_table->timers[WT_TIMER_LIST].id     = WT_TIMER_LIST;
61
-	hash_table->timers[DELETE_LIST].id       = DELETE_LIST;
62
-
63
-
64
-	/* fork table */
65
-	/* nr_forks = 0; */	
66
-
67
-	/* init static hidden values */
68
-	init_t();
69
-
70
-	return 0;
71
-}
72 46
 
73 47
 
74 48
 
75 49
 
76 50
 void tm_shutdown()
77 51
 {
78
-	struct timer_link  *tl, *end, *tmp;
79
-	int i;
80 52
 
81 53
 	DBG("DEBUG: tm_shutdown : start\n");
82
-	/* remember the DELETE LIST */
83
-	tl = hash_table->timers[DELETE_LIST].first_tl.next_tl;
84
-	end = & hash_table->timers[DELETE_LIST].last_tl;
85
-	/* unlink the timer lists */
86
-	for( i=0; i<NR_OF_TIMER_LISTS ; i++ )
87
-		reset_timer_list( hash_table, i );
88
-
89
-	DBG("DEBUG: tm_shutdown : empting DELETE list\n");
90
-	/* deletes all cells from DELETE_LIST list
91
-	(they are no more accessible from enrys) */
92
-	while (tl!=end) {
93
-		tmp=tl->next_tl;
94
-		free_cell((struct cell*)tl->payload);
95
-		tl=tmp;
96
-	}
54
+	unlink_timer_lists();
97 55
 
98 56
 	/* destroy the hash table */
99 57
 	DBG("DEBUG: tm_shutdown : empting hash table\n");
100
-	free_hash_table( hash_table );
58
+	free_hash_table( );
59
+	DBG("DEBUG: tm_shutdown: releasing timers\n");
60
+	free_timer_table();
101 61
 	DBG("DEBUG: tm_shutdown : removing semaphores\n");
102 62
 	lock_cleanup();
103 63
 	DBG("DEBUG: tm_shutdown : done\n");
... ...
@@ -110,8 +66,8 @@ int t_release_transaction( struct cell *trans )
110 110
 {
111 111
 	trans->kr|=REQ_RLSD;
112 112
 
113
-	reset_timer( hash_table, & trans->uas.response.fr_timer );
114
-	reset_timer( hash_table, & trans->uas.response.retr_timer );
113
+	reset_timer( & trans->uas.response.fr_timer );
114
+	reset_timer( & trans->uas.response.retr_timer );
115 115
 
116 116
 	cleanup_uac_timers( trans );
117 117
 	
... ...
@@ -161,7 +117,7 @@ void put_on_wait(  struct cell  *Trans  )
161 161
 		4.									WAIT timer executed,
162 162
 											transaction deleted
163 163
 	*/
164
-	set_1timer( hash_table, &(Trans->wait_tl), WT_TIMER_LIST );
164
+	set_1timer( &Trans->wait_tl, WT_TIMER_LIST );
165 165
 }
166 166
 
167 167
 
... ...
@@ -35,26 +35,9 @@ struct timer;
35 35
 struct entry;
36 36
 struct cell;
37 37
 
38
-extern struct s_table*  hash_table;
39 38
 extern int noisy_ctimer;
40 39
 
41 40
 
42
-#define LOCK_HASH(_h) lock(&(hash_table->entrys[(_h)].mutex))
43
-#define UNLOCK_HASH(_h) unlock(&(hash_table->entrys[(_h)].mutex))
44
-
45
-#ifdef _OBSOLETED
46
-#define LOCK_ACK(_t) lock(&(_t)->ack_mutex )
47
-#define UNLOCK_ACK(_t) unlock(&(_t)->ack_mutex )
48
-#endif
49
-
50
-#ifdef _XWAIT
51
-	#define LOCK_WAIT(_t) lock(&(_t)->wait_mutex )
52
-	#define UNLOCK_WAIT(_t) unlock(&(_t)->wait_mutex )
53
-#else
54
-	#define LOCK_WAIT(_t)
55
-	#define UNLOCK_WAIT(_t)
56
-#endif
57
-
58 41
 /* send a private buffer: utilize a retransmission structure
59 42
    but take a separate buffer not refered by it; healthy
60 43
    for reducing time spend in REPLIES locks
... ...
@@ -109,8 +92,6 @@ int get_ip_and_port_from_uri( str* uri , unsigned int *param_ip,
109 109
 	unsigned int *param_port);
110 110
 
111 111
 
112
-void timer_routine(unsigned int, void*);
113
-
114 112
 int t_newtran( struct sip_msg* p_msg );
115 113
 
116 114
 void put_on_wait(  struct cell  *Trans  );
... ...
@@ -317,7 +317,7 @@ int t_forward_nonack( struct cell *t, struct sip_msg* p_msg ,
317 317
 	t->kr|=REQ_FWDED;
318 318
 
319 319
 	if (p_msg->REQ_METHOD==METHOD_CANCEL) {
320
-		t_invite=t_lookupOriginalT( hash_table, p_msg );
320
+		t_invite=t_lookupOriginalT(  p_msg );
321 321
 		if (t_invite!=T_NULL) {
322 322
 			e2e_cancel( p_msg, t, t_invite );
323 323
 			UNREF(t_invite);
... ...
@@ -129,7 +129,7 @@ int t_lookup_request( struct sip_msg* p_msg , int leave_new_locked )
129 129
 	LOCK_HASH(p_msg->hash_index);
130 130
 
131 131
 	/* all the transactions from the entry are compared */
132
-	for ( p_cell = hash_table->entrys[p_msg->hash_index].first_cell;
132
+	for ( p_cell = get_tm_table()->entrys[p_msg->hash_index].first_cell;
133 133
 		  p_cell; p_cell = p_cell->next_cell ) 
134 134
 	{
135 135
 		t_msg = p_cell->uas.request;
... ...
@@ -239,8 +239,7 @@ found:
239 239
  *       0 - transaction wasn't found
240 240
  *       T - transaction found
241 241
  */
242
-struct cell* t_lookupOriginalT(  struct s_table* hash_table ,
243
-	struct sip_msg* p_msg )
242
+struct cell* t_lookupOriginalT(  struct sip_msg* p_msg )
244 243
 {
245 244
 	struct cell     *p_cell;
246 245
 	unsigned int     hash_index;
... ...
@@ -253,7 +252,7 @@ struct cell* t_lookupOriginalT(  struct s_table* hash_table ,
253 253
 	DBG("DEBUG: t_lookupOriginalT: searching on hash entry %d\n",hash_index );
254 254
 
255 255
 	/* all the transactions from the entry are compared */
256
-	for (p_cell=hash_table->entrys[hash_index].first_cell;
256
+	for (p_cell=get_tm_table()->entrys[hash_index].first_cell;
257 257
 		p_cell; p_cell = p_cell->next_cell )
258 258
 	{
259 259
 		t_msg = p_cell->uas.request;
... ...
@@ -407,7 +406,7 @@ int t_reply_matching( struct sip_msg *p_msg , int *p_branch )
407 407
 	is_cancel=cseq_method.len==CANCEL_LEN 
408 408
 		&& memcmp(cseq_method.s, CANCEL, CANCEL_LEN)==0;
409 409
 	LOCK_HASH(hash_index);
410
-	for (p_cell = hash_table->entrys[hash_index].first_cell; p_cell; 
410
+	for (p_cell = get_tm_table()->entrys[hash_index].first_cell; p_cell; 
411 411
 		p_cell=p_cell->next_cell) {
412 412
 
413 413
 		/* first look if branch matches */
... ...
@@ -631,7 +630,7 @@ int t_newtran( struct sip_msg* p_msg )
631 631
 				LOG(L_ERR, "ERROR: t_addifnew: out of mem:\n");
632 632
 				ret = E_OUT_OF_MEM;
633 633
 			} else {
634
-				insert_into_hash_table_unsafe( hash_table , new_cell );
634
+				insert_into_hash_table_unsafe( new_cell );
635 635
 				T=new_cell;
636 636
 				INIT_REF_UNSAFE(T);
637 637
 				/* init pointers to headers needed to construct local
... ...
@@ -20,8 +20,7 @@ extern unsigned int     global_msg_id;
20 20
 
21 21
 void init_t();
22 22
 int init_rb( struct retr_buf *rb, struct sip_msg *msg );
23
-struct cell* t_lookupOriginalT(  struct s_table* hash_table,
24
-	struct sip_msg* p_msg );
23
+struct cell* t_lookupOriginalT( struct sip_msg* p_msg );
25 24
 int t_reply_matching( struct sip_msg* , int* );
26 25
 int t_lookup_request( struct sip_msg* p_msg , int leave_new_locked );
27 26
 int t_newtran( struct sip_msg* p_msg );
... ...
@@ -198,7 +198,7 @@ static int _reply( struct cell *trans, struct sip_msg* p_msg,
198 198
 	if (code>=200) {
199 199
 		cleanup_uac_timers( trans );
200 200
 		if (trans->is_invite) cancel_uacs( trans, cancel_bitmap );
201
-		set_final_timer( /* hash_table, */ trans );
201
+		set_final_timer(  trans );
202 202
 	}
203 203
 
204 204
 	/* send it out */
... ...
@@ -244,8 +244,8 @@ void cleanup_uac_timers( struct cell *t )
244 244
 
245 245
 	/* reset FR/retransmission timers */
246 246
 	for (i=0; i<t->nr_of_outgoings; i++ )  {
247
-		reset_timer( hash_table, &t->uac[i].request.retr_timer );
248
-		reset_timer( hash_table, &t->uac[i].request.fr_timer );
247
+		reset_timer( &t->uac[i].request.retr_timer );
248
+		reset_timer( &t->uac[i].request.fr_timer );
249 249
 	}
250 250
 	DBG("DEBUG: cleanup_uacs: RETR/FR timers reset\n");
251 251
 }
... ...
@@ -538,9 +538,9 @@ int t_on_reply( struct sip_msg  *p_msg )
538 538
 		/* .. which is not e2e ? ... */
539 539
 		&& t->is_invite ) {
540 540
 			/* ... then just stop timers */
541
-			reset_timer( hash_table, &uac->local_cancel.retr_timer);
541
+			reset_timer( &uac->local_cancel.retr_timer);
542 542
 			if ( msg_status >= 200 )
543
-				reset_timer( hash_table, &uac->local_cancel.fr_timer);
543
+				reset_timer( &uac->local_cancel.fr_timer);
544 544
 			DBG("DEBUG: reply to local CANCEL processed\n");
545 545
 			goto done;
546 546
 	}
... ...
@@ -548,10 +548,10 @@ int t_on_reply( struct sip_msg  *p_msg )
548 548
 
549 549
 	/* *** stop timers *** */
550 550
 	/* stop retransmission */
551
-	reset_timer( hash_table, &uac->request.retr_timer);
551
+	reset_timer( &uac->request.retr_timer);
552 552
 	/* stop final response timer only if I got a final response */
553 553
 	if ( msg_status >= 200 )
554
-		reset_timer( hash_table, &uac->request.fr_timer);
554
+		reset_timer( &uac->request.fr_timer);
555 555
 
556 556
 	LOCK_REPLIES( t );
557 557
 	if (t->local) {
... ...
@@ -581,7 +581,7 @@ int t_on_reply( struct sip_msg  *p_msg )
581 581
 		cleanup_uac_timers( t );	
582 582
 		if (t->is_invite) cancel_uacs( t, cancel_bitmap );
583 583
 		/* FR for negative INVITES, WAIT anything else */
584
-		set_final_timer( /* hash_table,*/ t );
584
+		set_final_timer(  t );
585 585
 	} 
586 586
 
587 587
 	/* update FR/RETR timers on provisional replies */
... ...
@@ -590,13 +590,12 @@ int t_on_reply( struct sip_msg  *p_msg )
590 590
 			/* invite: change FR to longer FR_INV, do not
591 591
 			   attempt to restart retransmission any more
592 592
 			*/
593
-			set_timer( hash_table, & uac->request.fr_timer,
593
+			set_timer( & uac->request.fr_timer,
594 594
 				FR_INV_TIMER_LIST );
595 595
 		} else {
596 596
 			/* non-invite: restart retransmisssions (slow now) */
597 597
 			uac->request.retr_list=RT_T2;
598
-			set_timer( hash_table, 
599
-				& uac->request.retr_timer, RT_T2 );
598
+			set_timer(  & uac->request.retr_timer, RT_T2 );
600 599
 		}
601 600
 	} /* provisional replies */
602 601
 
603 602
deleted file mode 100644
... ...
@@ -1,442 +0,0 @@
1
-/*
2
- * $Id$
3
- *
4
- * Timer handlers
5
- */
6
-
7
-#include "../../hash_func.h"
8
-#include "../../dprint.h"
9
-#include "../../config.h"
10
-#include "../../parser/parser_f.h"
11
-#include "../../ut.h"
12
-#include "t_funcs.h"
13
-#include "t_reply.h"
14
-#include "t_cancel.h"
15
-
16
-int noisy_ctimer=0;
17
-
18
-static void unlink_timers( struct cell *t )
19
-{
20
-	int i;
21
-	int remove_fr, remove_retr;
22
-
23
-	remove_fr=0; remove_retr=0;
24
-
25
-	/* first look if we need to remove timers and play with
26
-	   costly locks at all
27
-
28
-	    note that is_in_timer_list2 is unsafe but it does not
29
-	    hurt -- transaction is already dead (wait state) so that
30
-	    noone else will install a FR/RETR timer and it can only
31
-	    be removed from timer process itself -> it is safe to
32
-	    use it without any protection
33
-	*/
34
-	if (is_in_timer_list2(&t->uas.response.fr_timer)) remove_fr=1; 
35
-	else for (i=0; i<t->nr_of_outgoings; i++)
36
-		if (is_in_timer_list2(&t->uac[i].request.fr_timer)
37
-			|| is_in_timer_list2(&t->uac[i].local_cancel.fr_timer)) {
38
-				remove_fr=1;
39
-				break;
40
-		}
41
-	if (is_in_timer_list2(&t->uas.response.retr_timer)) remove_retr=1; 
42
-	else for (i=0; i<t->nr_of_outgoings; i++)
43
-		if (is_in_timer_list2(&t->uac[i].request.retr_timer)
44
-			|| is_in_timer_list2(&t->uac[i].local_cancel.retr_timer)) {
45
-				remove_retr=1;
46
-				break;
47
-		}
48
-
49
-	/* do what we have to do....*/
50
-	if (remove_retr) {
51
-		/* RT_T1 lock is shared by all other RT timer
52
-		   lists -- we can safely lock just one
53
-		*/
54
-		lock(hash_table->timers[RT_T1_TO_1].mutex);
55
-		remove_timer_unsafe(&t->uas.response.retr_timer);
56
-		for (i=0; i<t->nr_of_outgoings; i++) {
57
-			remove_timer_unsafe(&t->uac[i].request.retr_timer);
58
-			remove_timer_unsafe(&t->uac[i].local_cancel.retr_timer);
59
-		}
60
-		unlock(hash_table->timers[RT_T1_TO_1].mutex);
61
-	}
62
-	if (remove_fr) {
63
-		/* FR lock is shared by all other FR timer
64
-		   lists -- we can safely lock just one
65
-		*/
66
-		lock(hash_table->timers[FR_TIMER_LIST].mutex);
67
-		remove_timer_unsafe(&t->uas.response.fr_timer);
68
-		for (i=0; i<t->nr_of_outgoings; i++) {
69
-			remove_timer_unsafe(&t->uac[i].request.fr_timer);
70
-			remove_timer_unsafe(&t->uac[i].local_cancel.fr_timer);
71
-		}
72
-		unlock(hash_table->timers[FR_TIMER_LIST].mutex);
73
-	}
74
-}
75
-
76
-/* delete_cell attempt to delete a transaction of not refered
77
-   by any process; if so, it is put on a delete timer which will
78
-   try the same later; it assumes it is safe to read ref_count --
79
-   either the hash entry is locked or the transaction has been
80
-   removed from hash table (i.e., other processes can only
81
-   decrease ref_count)
82
-
83
-   it is static as it is safe to be called only from WAIT/DELETE
84
-   timers, the only valid place from which a transaction can be
85
-   removed
86
-*/
87
-
88
-static void delete_cell( struct cell *p_cell, int unlock )
89
-{
90
-
91
-	int i;
92
-
93
-	/* there may still be FR/RETR timers, which have been reset
94
-	   (i.e., time_out==TIMER_DELETED) but are stilled linked to
95
-	   timer lists and must be removed from there before the
96
-	   structures are released
97
-	*/
98
-	unlink_timers( p_cell );
99
-
100
-#ifdef EXTRA_DEBUG
101
-
102
-	if (is_in_timer_list2(& p_cell->wait_tl )) {
103
-		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
104
-			" still on WAIT, timeout=%d\n", p_cell, p_cell->wait_tl.time_out);
105
-		abort();
106
-	}
107
-	if (is_in_timer_list2(& p_cell->uas.response.retr_timer )) {
108
-		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
109
-			" still on RETR (rep), timeout=%d\n",
110
-			p_cell, p_cell->uas.response.retr_timer.time_out);
111
-		abort();
112
-	}
113
-	if (is_in_timer_list2(& p_cell->uas.response.fr_timer )) {
114
-		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
115
-			" still on FR (rep), timeout=%d\n", p_cell,
116
-			p_cell->uas.response.fr_timer.time_out);
117
-		abort();
118
-	}
119
-	for (i=0; i<p_cell->nr_of_outgoings; i++) {
120
-		if (is_in_timer_list2(& p_cell->uac[i].request.retr_timer)) {
121
-			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
122
-				" still on RETR (req %d), timeout %d\n", p_cell, i,
123
-				p_cell->uac[i].request.retr_timer.time_out);
124
-			abort();
125
-		}
126
-		if (is_in_timer_list2(& p_cell->uac[i].request.fr_timer)) {
127
-			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
128
-				" still on FR (req %d), timeout %d\n", p_cell, i,
129
-				p_cell->uac[i].request.fr_timer.time_out);
130
-			abort();
131
-		}
132
-		if (is_in_timer_list2(& p_cell->uac[i].local_cancel.retr_timer)) {
133
-			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
134
-				" still on RETR/cancel (req %d), timeout %d\n", p_cell, i,
135
-				p_cell->uac[i].request.retr_timer.time_out);
136
-			abort();
137
-		}
138
-		if (is_in_timer_list2(& p_cell->uac[i].local_cancel.fr_timer)) {
139
-			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
140
-				" still on FR/cancel (req %d), timeout %d\n", p_cell, i,
141
-				p_cell->uac[i].request.fr_timer.time_out);
142
-			abort();
143
-		}
144
-	}
145
-	/* reset_retr_timers( hash_table, p_cell ); */
146
-#endif
147
-	/* still in use ... don't delete */
148
-	if ( IS_REFFED_UNSAFE(p_cell) ) {
149
-		if (unlock) UNLOCK_HASH(p_cell->hash_index);
150
-		DBG("DEBUG: delete_cell %p: can't delete -- still reffed\n",
151
-			p_cell);
152
-		/* it's added to del list for future del */
153
-		set_timer( hash_table, &(p_cell->dele_tl), DELETE_LIST );
154
-	} else {
155
-		if (unlock) UNLOCK_HASH(p_cell->hash_index);
156
-		DBG("DEBUG: delete transaction %p\n", p_cell );
157
-		free_cell( p_cell );
158
-	}
159
-}
160
-
161
-
162
-
163
-inline void retransmission_handler( void *attr)
164
-{
165
-	struct retr_buf* r_buf ;
166
-	enum lists id;
167
-
168
-	r_buf = (struct retr_buf*)attr;
169
-#ifdef EXTRA_DEBUG
170
-	if (r_buf->my_T->damocles) {
171
-		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
172
-			" called from RETR timer\n",r_buf->my_T);
173
-		abort();
174
-	}	
175
-#endif
176
-
177
-	/*the transaction is already removed from RETRANSMISSION_LIST by timer*/
178
-	/* retransmision */
179
-	if ( r_buf->activ_type==TYPE_LOCAL_CANCEL 
180
-		|| r_buf->activ_type==0 ) {
181
-			SEND_BUFFER( r_buf );
182
-			DBG("DEBUG: retransmission_handler : "
183
-				"request resending (t=%p, %.9s ... )\n", 
184
-				r_buf->my_T, r_buf->buffer);
185
-	} else {
186
-			DBG("DEBUG: retransmission_handler : "
187
-				"reply resending (t=%p, %.9s ... )\n", 
188
-				r_buf->my_T, r_buf->buffer);
189
-			t_retransmit_reply(r_buf->my_T);
190
-	}
191
-
192
-	id = r_buf->retr_list;
193
-	r_buf->retr_list = id < RT_T2 ? id + 1 : RT_T2;
194
-
195
-	set_timer(hash_table,&(r_buf->retr_timer),id < RT_T2 ? id + 1 : RT_T2 );
196
-
197
-	DBG("DEBUG: retransmission_handler : done\n");
198
-}
199
-
200
-
201
-
202
-
203
-inline void final_response_handler( void *attr)
204
-{
205
-	int silent;
206
-	struct retr_buf* r_buf;
207
-	enum rps reply_status;
208
-	struct cell *t;
209
-	branch_bm_t cancel_bitmap;
210
-	short do_cancel_branch;
211
-
212
-	r_buf = (struct retr_buf*)attr;
213
-	t=r_buf->my_T;
214
-
215
-#	ifdef EXTRA_DEBUG
216
-	if (t->damocles) 
217
-	{
218
-		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
219
-			" called from FR timer\n",r_buf->my_T);
220
-		abort();
221
-	}
222
-#	endif
223
-
224
-	reset_timer( hash_table , &(r_buf->retr_timer) );
225
-
226
-	/* the transaction is already removed from FR_LIST by the timer */
227
-
228
-	/* FR for local cancels.... */
229
-	if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
230
-	{
231
-		DBG("DEBUG: FR_handler: stop retr for Local Cancel\n");
232
-		return;
233
-	}
234
-
235
-	/* FR for replies (negative INVITE replies) */
236
-	if (r_buf->activ_type>0) {
237
-#		ifdef EXTRA_DEBUG
238
-		if (t->uas.request->REQ_METHOD!=METHOD_INVITE
239
-			|| t->uas.status < 300 ) {
240
-			LOG(L_ERR, "ERROR: FR timer: uknown type reply buffer\n");
241
-			abort();
242
-		}
243
-#		endif
244
-		put_on_wait( t );
245
-		return;
246
-	};
247
-
248
-	/* lock reply processing to determine how to proceed reliably */
249
-	LOCK_REPLIES( t );
250
-	/* now it can be only a request retransmission buffer;
251
-	   try if you can simply discard the local transaction 
252
-	   state without compellingly removing it from the
253
-	   world */
254
-	silent=
255
-		/* not for UACs */
256
-		!t->local
257
-		/* invites only */
258
-		&& t->is_invite
259
-		/* parallel forking does not allow silent state discarding */
260
-		&& t->nr_of_outgoings==1
261
-		/* on_no_reply handler not installed -- serial forking could occur 
262
-		   otherwise */
263
-		&& t->on_negative==0
264
-		/* something received -- we will not be silent on error */
265
-		&& t->uac[r_buf->branch].last_received>0
266
-		/* don't go silent if disallowed globally ... */
267
-		&& noisy_ctimer==0
268
-		/* ... or for this particular transaction */
269
-		&& t->noisy_ctimer==0;
270
-	if (silent) {
271
-		UNLOCK_REPLIES(t);
272
-		DBG("DEBUG: FR_handler: transaction silently dropped (%p)\n",t);
273
-		put_on_wait( t );
274
-		return;
275
-	}
276
-
277
-	DBG("DEBUG: FR_handler:stop retr. and send CANCEL (%p)\n", t);
278
-	do_cancel_branch=t->is_invite && 
279
-		should_cancel_branch(t, r_buf->branch);
280
-
281
-#ifdef _OBSOLETED
282
-	/* set global environment for currently processed transaction */
283
-	T=t;
284
-	global_msg_id=T->uas.request->id;
285
-#endif 
286
-
287
-	cancel_bitmap=do_cancel_branch ? 1<<r_buf->branch : 0;
288
-	if (t->local) {
289
-		reply_status=local_reply( t, FAKED_REPLY, r_buf->branch, 
290
-			408, &cancel_bitmap );
291
-	} else {
292
-		reply_status=relay_reply( t, FAKED_REPLY, r_buf->branch, 408, 
293
-			&cancel_bitmap );
294
-	}
295
-	/* now when out-of-lock do the cancel I/O */
296
-	if (do_cancel_branch) cancel_branch(t, r_buf->branch );
297
-	/* it's cleaned up on error; if no error occured and transaction
298
-	   completed regularly, I have to clean-up myself
299
-	*/
300
-	if (reply_status==RPS_COMPLETED) {
301
-		/* don't need to cleanup uac_timers -- they were cleaned
302
-		   branch by branch and this last branch's timers are
303
-		   reset now too
304
-		*/
305
-		/* don't need to issue cancels -- local cancels have been
306
-		   issued branch by branch and this last branch was
307
-		   cancelled now too
308
-		*/
309
-		/* then the only thing to do now is to put the transaction
310
-		   on FR/wait state 
311
-		*/
312
-		set_final_timer( /* hash_table, */ t );
313
-	}
314
-	DBG("DEBUG: final_response_handler : done\n");
315
-}
316
-
317
-void cleanup_localcancel_timers( struct cell *t )
318
-{
319
-	int i;
320
-	for (i=0; i<t->nr_of_outgoings; i++ )  {
321
-		reset_timer( hash_table, &t->uac[i].local_cancel.retr_timer );
322
-		reset_timer( hash_table, &t->uac[i].local_cancel.fr_timer );
323
-	}
324
-}
325
-
326
-
327
-inline void wait_handler( void *attr)
328
-{
329
-	struct cell *p_cell = (struct cell*)attr;
330
-
331
-#ifdef EXTRA_DEBUG
332
-	if (p_cell->damocles) {
333
-		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
334
-			" called from WAIT timer\n",p_cell);
335
-		abort();
336
-	}	
337
-	DBG("DEBUG: ---------- WAIT timer hit ------- \n");
338
-#endif
339
-
340
-	/* stop cancel timers if any running */
341
-	if (p_cell->is_invite) cleanup_localcancel_timers( p_cell );
342
-
343
-	/* the transaction is already removed from WT_LIST by the timer */
344
-	/* remove the cell from the hash table */
345
-	DBG("DEBUG: wait_handler : removing %p from table \n", p_cell );
346
-	LOCK_HASH( p_cell->hash_index );
347
-	remove_from_hash_table_unsafe( hash_table, p_cell );
348
-	/* jku: no more here -- we do it when we put a transaction on wait */
349
-#ifdef EXTRA_DEBUG
350
-	p_cell->damocles = 1;
351
-#endif
352
-	/* delete (returns with UNLOCK-ed_HASH) */
353
-	delete_cell( p_cell, 1 /* unlock on return */ );
354
-	DBG("DEBUG: wait_handler : done\n");
355
-}
356
-
357
-
358
-
359
-
360
-inline void delete_handler( void *attr)
361
-{
362
-	struct cell *p_cell = (struct cell*)attr;
363
-
364
-	DBG("DEBUG: delete_handler : removing %p \n", p_cell );
365
-#ifdef EXTRA_DEBUG
366
-	if (p_cell->damocles==0) {
367
-		LOG( L_ERR, "ERROR: transaction %p not scheduled for deletion"
368
-			" and called from DELETE timer\n",p_cell);
369
-		abort();
370
-	}	
371
-#endif
372
-
373
-	/* we call delete now without any locking on hash/ref_count;
374
-	   we can do that because delete_handler is only entered after
375
-	   the delete timer was installed from wait_handler, which
376
-	   removed transaction from hash table and did not destroy it
377
-	   because some processes were using it; that means that the
378
-	   processes currently using the transaction can unref and no
379
-	   new processes can ref -- we can wait until ref_count is
380
-	   zero safely without locking
381
-	*/
382
-	delete_cell( p_cell, 0 /* don't unlock on return */ );
383
-    DBG("DEBUG: delete_handler : done\n");
384
-}
385
-
386
-
387
-
388
-
389
-#define run_handler_for_each( _tl , _handler ) \
390
-	while ((_tl))\
391
-	{\
392
-		/* reset the timer list linkage */\
393
-		tmp_tl = (_tl)->next_tl;\
394
-		(_tl)->next_tl = (_tl)->prev_tl = 0;\
395
-		DBG("DEBUG: timer routine:%d,tl=%p next=%p\n",\
396
-			id,(_tl),tmp_tl);\
397
-		if ((_tl)->time_out>TIMER_DELETED) \
398
-			(_handler)( (_tl)->payload );\
399
-		(_tl) = tmp_tl;\
400
-	}
401
-
402
-
403
-
404
-
405
-void timer_routine(unsigned int ticks , void * attr)
406
-{
407
-	struct s_table    *hash_table = (struct s_table *)attr;
408
-	struct timer_link *tl, *tmp_tl;
409
-	int                id;
410
-
411
-#ifdef BOGDAN_TRIFLE
412
-	DBG(" %d \n",ticks);
413
-#endif
414
-
415
-	for( id=0 ; id<NR_OF_TIMER_LISTS ; id++ )
416
-	{
417
-		/* to waste as little time in lock as possible, detach list
418
-		   with expired items and process them after leaving the lock */
419
-		tl=check_and_split_time_list( &(hash_table->timers[ id ]), ticks);
420
-		/* process items now */
421
-		switch (id)
422
-		{
423
-			case FR_TIMER_LIST:
424
-			case FR_INV_TIMER_LIST:
425
-				run_handler_for_each(tl,final_response_handler);
426
-				break;
427
-			case RT_T1_TO_1:
428
-			case RT_T1_TO_2:
429
-			case RT_T1_TO_3:
430
-			case RT_T2:
431
-				run_handler_for_each(tl,retransmission_handler);
432
-				break;
433
-			case WT_TIMER_LIST:
434
-				run_handler_for_each(tl,wait_handler);
435
-				break;
436
-			case DELETE_LIST:
437
-				run_handler_for_each(tl,delete_handler);
438
-				break;
439
-		}
440
-	}
441
-}
442
-
... ...
@@ -75,9 +75,23 @@
75 75
 #include "timer.h"
76 76
 #include "../../dprint.h"
77 77
 #include "lock.h"
78
-
79 78
 #include "t_stats.h"
80 79
 
80
+#include "../../hash_func.h"
81
+#include "../../dprint.h"
82
+#include "../../config.h"
83
+#include "../../parser/parser_f.h"
84
+#include "../../ut.h"
85
+#include "t_funcs.h"
86
+#include "t_reply.h"
87
+#include "t_cancel.h"
88
+
89
+
90
+static struct timer_table *timertable;
91
+
92
+int noisy_ctimer=0;
93
+
94
+
81 95
 int timer_group[NR_OF_TIMER_LISTS] = 
82 96
 {
83 97
 	TG_FR, TG_FR,
... ...
@@ -101,33 +115,410 @@ unsigned int timer_id2timeout[NR_OF_TIMER_LISTS] = {
101 101
 						/* NR_OF_TIMER_LISTS */
102 102
 };
103 103
 
104
+/******************** handlers ***************************/
105
+
106
+
107
+
108
+static void delete_cell( struct cell *p_cell, int unlock )
109
+{
110
+
111
+	int i;
112
+
113
+	/* there may still be FR/RETR timers, which have been reset
114
+	   (i.e., time_out==TIMER_DELETED) but are stilled linked to
115
+	   timer lists and must be removed from there before the
116
+	   structures are released
117
+	*/
118
+	unlink_timers( p_cell );
119
+
120
+#ifdef EXTRA_DEBUG
121
+
122
+	if (is_in_timer_list2(& p_cell->wait_tl )) {
123
+		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
124
+			" still on WAIT, timeout=%d\n", p_cell, p_cell->wait_tl.time_out);
125
+		abort();
126
+	}
127
+	if (is_in_timer_list2(& p_cell->uas.response.retr_timer )) {
128
+		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
129
+			" still on RETR (rep), timeout=%d\n",
130
+			p_cell, p_cell->uas.response.retr_timer.time_out);
131
+		abort();
132
+	}
133
+	if (is_in_timer_list2(& p_cell->uas.response.fr_timer )) {
134
+		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
135
+			" still on FR (rep), timeout=%d\n", p_cell,
136
+			p_cell->uas.response.fr_timer.time_out);
137
+		abort();
138
+	}
139
+	for (i=0; i<p_cell->nr_of_outgoings; i++) {
140
+		if (is_in_timer_list2(& p_cell->uac[i].request.retr_timer)) {
141
+			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
142
+				" still on RETR (req %d), timeout %d\n", p_cell, i,
143
+				p_cell->uac[i].request.retr_timer.time_out);
144
+			abort();
145
+		}
146
+		if (is_in_timer_list2(& p_cell->uac[i].request.fr_timer)) {
147
+			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
148
+				" still on FR (req %d), timeout %d\n", p_cell, i,
149
+				p_cell->uac[i].request.fr_timer.time_out);
150
+			abort();
151
+		}
152
+		if (is_in_timer_list2(& p_cell->uac[i].local_cancel.retr_timer)) {
153
+			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
154
+				" still on RETR/cancel (req %d), timeout %d\n", p_cell, i,
155
+				p_cell->uac[i].request.retr_timer.time_out);
156
+			abort();
157
+		}
158
+		if (is_in_timer_list2(& p_cell->uac[i].local_cancel.fr_timer)) {
159
+			LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
160
+				" still on FR/cancel (req %d), timeout %d\n", p_cell, i,
161
+				p_cell->uac[i].request.fr_timer.time_out);
162
+			abort();
163
+		}
164
+	}
165
+	/* reset_retr_timers( hash__XX_table, p_cell ); */
166
+#endif
167
+	/* still in use ... don't delete */
168
+	if ( IS_REFFED_UNSAFE(p_cell) ) {
169
+		if (unlock) UNLOCK_HASH(p_cell->hash_index);
170
+		DBG("DEBUG: delete_cell %p: can't delete -- still reffed\n",
171
+			p_cell);
172
+		/* it's added to del list for future del */
173
+		set_timer( &(p_cell->dele_tl), DELETE_LIST );
174
+	} else {
175
+		if (unlock) UNLOCK_HASH(p_cell->hash_index);
176
+		DBG("DEBUG: delete transaction %p\n", p_cell );
177
+		free_cell( p_cell );
178
+	}
179
+}
180
+
181
+
182
+
183
+inline void retransmission_handler( void *attr)
184
+{
185
+	struct retr_buf* r_buf ;
186
+	enum lists id;
187
+
188
+	r_buf = (struct retr_buf*)attr;
189
+#ifdef EXTRA_DEBUG
190
+	if (r_buf->my_T->damocles) {
191
+		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
192
+			" called from RETR timer\n",r_buf->my_T);
193
+		abort();
194
+	}	
195
+#endif
196
+
197
+	/*the transaction is already removed from RETRANSMISSION_LIST by timer*/
198
+	/* retransmision */
199
+	if ( r_buf->activ_type==TYPE_LOCAL_CANCEL 
200
+		|| r_buf->activ_type==0 ) {
201
+			SEND_BUFFER( r_buf );
202
+			DBG("DEBUG: retransmission_handler : "
203
+				"request resending (t=%p, %.9s ... )\n", 
204
+				r_buf->my_T, r_buf->buffer);
205
+	} else {
206
+			DBG("DEBUG: retransmission_handler : "
207
+				"reply resending (t=%p, %.9s ... )\n", 
208
+				r_buf->my_T, r_buf->buffer);
209
+			t_retransmit_reply(r_buf->my_T);
210
+	}
211
+
212
+	id = r_buf->retr_list;
213
+	r_buf->retr_list = id < RT_T2 ? id + 1 : RT_T2;
214
+
215
+	set_timer(&(r_buf->retr_timer),id < RT_T2 ? id + 1 : RT_T2 );
216
+
217
+	DBG("DEBUG: retransmission_handler : done\n");
218
+}
219
+
220
+
221
+
222
+
223
+inline void final_response_handler( void *attr)
224
+{
225
+	int silent;
226
+	struct retr_buf* r_buf;
227
+	enum rps reply_status;
228
+	struct cell *t;
229
+	branch_bm_t cancel_bitmap;
230
+	short do_cancel_branch;
231
+
232
+	r_buf = (struct retr_buf*)attr;
233
+	t=r_buf->my_T;
234
+
235
+#	ifdef EXTRA_DEBUG
236
+	if (t->damocles) 
237
+	{
238
+		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
239
+			" called from FR timer\n",r_buf->my_T);
240
+		abort();
241
+	}
242
+#	endif
243
+
244
+	reset_timer(  &(r_buf->retr_timer) );
245
+
246
+	/* the transaction is already removed from FR_LIST by the timer */
247
+
248
+	/* FR for local cancels.... */
249
+	if (r_buf->activ_type==TYPE_LOCAL_CANCEL)
250
+	{
251
+		DBG("DEBUG: FR_handler: stop retr for Local Cancel\n");
252
+		return;
253
+	}
254
+
255
+	/* FR for replies (negative INVITE replies) */
256
+	if (r_buf->activ_type>0) {
257
+#		ifdef EXTRA_DEBUG
258
+		if (t->uas.request->REQ_METHOD!=METHOD_INVITE
259
+			|| t->uas.status < 300 ) {
260
+			LOG(L_ERR, "ERROR: FR timer: uknown type reply buffer\n");
261
+			abort();
262
+		}
263
+#		endif
264
+		put_on_wait( t );
265
+		return;
266
+	};
267
+
268
+	/* lock reply processing to determine how to proceed reliably */
269
+	LOCK_REPLIES( t );
270
+	/* now it can be only a request retransmission buffer;
271
+	   try if you can simply discard the local transaction 
272
+	   state without compellingly removing it from the
273
+	   world */
274
+	silent=
275
+		/* not for UACs */
276
+		!t->local
277
+		/* invites only */
278
+		&& t->is_invite
279
+		/* parallel forking does not allow silent state discarding */
280
+		&& t->nr_of_outgoings==1
281
+		/* on_no_reply handler not installed -- serial forking could occur 
282
+		   otherwise */
283
+		&& t->on_negative==0
284
+		/* something received -- we will not be silent on error */
285
+		&& t->uac[r_buf->branch].last_received>0
286
+		/* don't go silent if disallowed globally ... */
287
+		&& noisy_ctimer==0
288
+		/* ... or for this particular transaction */
289
+		&& t->noisy_ctimer==0;
290
+	if (silent) {
291
+		UNLOCK_REPLIES(t);
292
+		DBG("DEBUG: FR_handler: transaction silently dropped (%p)\n",t);
293
+		put_on_wait( t );
294
+		return;
295
+	}
296
+
297
+	DBG("DEBUG: FR_handler:stop retr. and send CANCEL (%p)\n", t);
298
+	do_cancel_branch=t->is_invite && 
299
+		should_cancel_branch(t, r_buf->branch);
300
+
301
+#ifdef _OBSOLETED
302
+	/* set global environment for currently processed transaction */
303
+	T=t;
304
+	global_msg_id=T->uas.request->id;
305
+#endif 
306
+
307
+	cancel_bitmap=do_cancel_branch ? 1<<r_buf->branch : 0;
308
+	if (t->local) {
309
+		reply_status=local_reply( t, FAKED_REPLY, r_buf->branch, 
310
+			408, &cancel_bitmap );
311
+	} else {
312
+		reply_status=relay_reply( t, FAKED_REPLY, r_buf->branch, 408, 
313
+			&cancel_bitmap );
314
+	}
315
+	/* now when out-of-lock do the cancel I/O */
316
+	if (do_cancel_branch) cancel_branch(t, r_buf->branch );
317
+	/* it's cleaned up on error; if no error occured and transaction
318
+	   completed regularly, I have to clean-up myself
319
+	*/
320
+	if (reply_status==RPS_COMPLETED) {
321
+		/* don't need to cleanup uac_timers -- they were cleaned
322
+		   branch by branch and this last branch's timers are
323
+		   reset now too
324
+		*/
325
+		/* don't need to issue cancels -- local cancels have been
326
+		   issued branch by branch and this last branch was
327
+		   cancelled now too
328
+		*/
329
+		/* then the only thing to do now is to put the transaction
330
+		   on FR/wait state 
331
+		*/
332
+		set_final_timer(  t );
333
+	}
334
+	DBG("DEBUG: final_response_handler : done\n");
335
+}
336
+
337
+void cleanup_localcancel_timers( struct cell *t )
338
+{
339
+	int i;
340
+	for (i=0; i<t->nr_of_outgoings; i++ )  {
341
+		reset_timer(  &t->uac[i].local_cancel.retr_timer );
342
+		reset_timer(  &t->uac[i].local_cancel.fr_timer );
343
+	}
344
+}
345
+
346
+
347
+inline void wait_handler( void *attr)
348
+{
349
+	struct cell *p_cell = (struct cell*)attr;
350
+
351
+#ifdef EXTRA_DEBUG
352
+	if (p_cell->damocles) {
353
+		LOG( L_ERR, "ERROR: transaction %p scheduled for deletion and"
354
+			" called from WAIT timer\n",p_cell);
355
+		abort();
356
+	}	
357
+	DBG("DEBUG: ---------- WAIT timer hit ------- \n");
358
+#endif
359
+
360
+	/* stop cancel timers if any running */
361
+	if (p_cell->is_invite) cleanup_localcancel_timers( p_cell );
362
+
363
+	/* the transaction is already removed from WT_LIST by the timer */
364
+	/* remove the cell from the hash table */
365
+	DBG("DEBUG: wait_handler : removing %p from table \n", p_cell );
366
+	LOCK_HASH( p_cell->hash_index );
367
+	remove_from_hash_table_unsafe(  p_cell );
368
+	/* jku: no more here -- we do it when we put a transaction on wait */
369
+#ifdef EXTRA_DEBUG
370
+	p_cell->damocles = 1;
371
+#endif
372
+	/* delete (returns with UNLOCK-ed_HASH) */
373
+	delete_cell( p_cell, 1 /* unlock on return */ );
374
+	DBG("DEBUG: wait_handler : done\n");
375
+}
376
+
377
+
378
+
379
+
380
+inline void delete_handler( void *attr)
381
+{
382
+	struct cell *p_cell = (struct cell*)attr;
383
+
384
+	DBG("DEBUG: delete_handler : removing %p \n", p_cell );
385
+#ifdef EXTRA_DEBUG
386
+	if (p_cell->damocles==0) {
387
+		LOG( L_ERR, "ERROR: transaction %p not scheduled for deletion"
388
+			" and called from DELETE timer\n",p_cell);
389
+		abort();
390
+	}	
391
+#endif
392
+
393
+	/* we call delete now without any locking on hash/ref_count;
394
+	   we can do that because delete_handler is only entered after
395
+	   the delete timer was installed from wait_handler, which
396
+	   removed transaction from hash table and did not destroy it
397
+	   because some processes were using it; that means that the
398
+	   processes currently using the transaction can unref and no
399
+	   new processes can ref -- we can wait until ref_count is
400
+	   zero safely without locking
401
+	*/
402
+	delete_cell( p_cell, 0 /* don't unlock on return */ );
403
+    DBG("DEBUG: delete_handler : done\n");
404
+}
405
+
406
+
407
+/***********************************************************/
408
+
409
+struct timer_table *get_timertable()
410
+{
411
+	return timertable;
412
+}
413
+
414
+
415
+void unlink_timer_lists()
416
+{
417
+	struct timer_link  *tl, *end, *tmp;
418
+	enum lists i;
419
+
420
+	/* remember the DELETE LIST */
421
+	tl = timertable->timers[DELETE_LIST].first_tl.next_tl;
422
+	end = & timertable->timers[DELETE_LIST].last_tl;
423
+	/* unlink the timer lists */
424
+	for( i=0; i<NR_OF_TIMER_LISTS ; i++ )
425
+		reset_timer_list( i );
426
+	DBG("DEBUG: tm_shutdown : empting DELETE list\n");
427
+	/* deletes all cells from DELETE_LIST list 
428
+	   (they are no more accessible from enrys) */
429
+	while (tl!=end) {
430
+		tmp=tl->next_tl;
431
+		free_cell((struct cell*)tl->payload);
432
+		tl=tmp;
433
+	}
434
+	
435
+}
436
+
437
+struct timer_table *tm_init_timers()
438
+{
439
+	enum lists i;
440
+
441
+	timertable=(struct timer_table *) shm_malloc(sizeof(struct timer_table));
442
+	if (!timertable) {
443
+		LOG(L_ERR, "ERROR: tm_init_timers: no shmem for timer_Table\n");
444
+		goto error0;
445
+	}
446
+	memset(timertable, 0, sizeof (struct timer_table));
447
+		
448
+
449
+	/* inits the timers*/
450
+	for(  i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
451
+        init_timer_list( i );
452
+    
453
+    /* init. timer lists */
454
+	timertable->timers[RT_T1_TO_1].id = RT_T1_TO_1;
455
+	timertable->timers[RT_T1_TO_2].id = RT_T1_TO_2;
456
+	timertable->timers[RT_T1_TO_3].id = RT_T1_TO_3;
457
+	timertable->timers[RT_T2].id      = RT_T2;
458
+	timertable->timers[FR_TIMER_LIST].id     = FR_TIMER_LIST; 
459
+	timertable->timers[FR_INV_TIMER_LIST].id = FR_INV_TIMER_LIST;
460
+	timertable->timers[WT_TIMER_LIST].id     = WT_TIMER_LIST;
461
+	timertable->timers[DELETE_LIST].id       = DELETE_LIST;
462
+
463
+	return timertable;
464
+
465
+error0:
466
+	return 0;
467
+}
468
+
469
+void free_timer_table()
470
+{
471
+	enum lists i;
472
+
473
+	if (timertable) {
474
+		/* the mutexs for sync the lists are released*/
475
+		for ( i=0 ; i<NR_OF_TIMER_LISTS ; i++ )
476
+			release_timerlist_lock( &timertable->timers[i] );
477
+		shm_free(timertable);
478
+	}
479
+		
480
+}
104 481
 
105
-void reset_timer_list( struct s_table* hash_table, enum lists list_id)
482
+void reset_timer_list( enum lists list_id)
106 483
 {
107
-	hash_table->timers[list_id].first_tl.next_tl =
108
-		&(hash_table->timers[list_id].last_tl );
109
-	hash_table->timers[list_id].last_tl.prev_tl =
110
-		&(hash_table->timers[list_id].first_tl );
111
-	hash_table->timers[list_id].first_tl.prev_tl =
112
-		hash_table->timers[list_id].last_tl.next_tl = NULL;
113
-	hash_table->timers[list_id].last_tl.time_out = -1;
484