Browse code

tm: keep internal retr. intervals in ms

When using ticks to keep the retransmission intervals, any
rounding error (when converting from ms to ticks) is increased
with each doubling of the retransmission interval. In the standard
configuration this means that the last retransmission before t2
kicks in, has an error up to 4 times bigger then the initial one
(the rounding error is max. 1 tick/62.5ms for the initial interval
=> up to 250 ms for the last retransmission before t2).

By keeping the retransmission intervals in ms instead of ticks,
this problem is avoided, and the timing error is always < 1 tick
(62.5 ms) + system timer error (< 10 ms usually).

Andrei Pelinescu-Onciul authored on 11/08/2011 14:58:12
Showing 9 changed files
... ...
@@ -52,8 +52,8 @@ struct cfg_group_tm	default_tm_cfg = {
52 52
 	INV_FR_TIME_OUT_NEXT, /* fr_inv_timeout_next */
53 53
 	WT_TIME_OUT,	/* wait_timeout */
54 54
 	DEL_TIME_OUT,	/* delete_timeout */
55
-	RETR_T1,	/* rt_t1_timeout */
56
-	RETR_T2,	/* rt_t2_timeout */
55
+	RETR_T1,	/* rt_t1_timeout_ms */
56
+	RETR_T2,	/* rt_t2_timeout_ms */
57 57
 
58 58
 	/* maximum time an invite or noninv transaction will live, from
59 59
 	 * the moment of creation (overrides larger fr/fr_inv timeouts,
... ...
@@ -122,9 +122,9 @@ cfg_def_t	tm_cfg_def[] = {
122 122
 	{"delete_timer",	CFG_VAR_INT | CFG_ATOMIC,	0, 0, timer_fixup, 0,
123 123
 		"time after which a to-be-deleted transaction currently "
124 124
 		"ref-ed by a process will be tried to be deleted again."},
125
-	{"retr_timer1",		CFG_VAR_INT | CFG_ATOMIC,	0, 0, timer_fixup, 0,
125
+	{"retr_timer1",		CFG_VAR_INT | CFG_ATOMIC,	0, 0, timer_fixup_ms, 0,
126 126
 		"initial retransmission period (in milliseconds)"},
127
-	{"retr_timer2",		CFG_VAR_INT | CFG_ATOMIC,	0, 0, timer_fixup, 0,
127
+	{"retr_timer2",		CFG_VAR_INT | CFG_ATOMIC,	0, 0, timer_fixup_ms, 0,
128 128
 		"maximum retransmission period (in milliseconds)"},
129 129
 	{"max_inv_lifetime",	CFG_VAR_INT | CFG_ATOMIC,	0, 0, timer_fixup, 0,
130 130
 		"maximum time an invite transaction can live "
... ...
@@ -109,8 +109,8 @@ struct cfg_group_tm {
109 109
 	unsigned int    fr_inv_timeout_next;
110 110
 	unsigned int	wait_timeout;
111 111
 	unsigned int	delete_timeout;
112
-	unsigned int	rt_t1_timeout;
113
-	unsigned int	rt_t2_timeout;
112
+	unsigned int	rt_t1_timeout_ms;
113
+	unsigned int	rt_t2_timeout_ms;
114 114
 	unsigned int	tm_max_inv_lifetime;
115 115
 	unsigned int	tm_max_noninv_lifetime;
116 116
 	int	noisy_ctimer;
... ...
@@ -295,9 +295,8 @@ struct totag_elem {
295 295
 #	define pass_provisional(_t_)	((_t_)->flags&T_PASS_PROVISIONAL_FLAG)
296 296
 #endif
297 297
 
298
-/* unsigned short should be enough for a retr. timer: max. 65535 ticks =>
299
- * max  retr. = 1023 s for tick = 15 ms, which should be more then enough and
300
- * saves us 2*2 bytes */
298
+/* unsigned short should be enough for a retr. timer: max. 65535 ms =>
299
+ * max retr. = 65 s which should be enough and saves us 2*2 bytes */
301 300
 typedef unsigned short retr_timeout_t;
302 301
 
303 302
 
... ...
@@ -406,8 +405,8 @@ typedef struct cell
406 405
 	ticks_t fr_timeout;     /* final response interval for retr_bufs */
407 406
 	ticks_t fr_inv_timeout; /* final inv. response interval for retr_bufs */
408 407
 #ifdef TM_DIFF_RT_TIMEOUT
409
-	retr_timeout_t rt_t1_timeout; /* start retr. interval for retr_bufs */
410
-	retr_timeout_t rt_t2_timeout; /* maximum retr. interval for retr_bufs */
408
+	retr_timeout_t rt_t1_timeout_ms; /* start retr. interval for retr_bufs */
409
+	retr_timeout_t rt_t2_timeout_ms; /* maximum retr. interval for retr_bufs */
411 410
 #endif
412 411
 	ticks_t end_of_life; /* maximum lifetime */
413 412
 
... ...
@@ -169,19 +169,21 @@ int fr_inv_avp2timer(unsigned int* timer);
169 169
 #ifdef TIMER_DEBUG
170 170
 #define start_retr(rb) \
171 171
 	_set_fr_retr((rb), \
172
-				((rb)->dst.proto==PROTO_UDP)?RT_T1_TIMEOUT(rb):(ticks_t)(-1), \
172
+				((rb)->dst.proto==PROTO_UDP) ? RT_T1_TIMEOUT_MS(rb) : \
173
+												(unsigned)(-1), \
173 174
 				__FILE__, __FUNCTION__, __LINE__)
174 175
 
175 176
 #define force_retr(rb) \
176
-	_set_fr_retr((rb), RT_T1_TIMEOUT(rb), __FILE__, __FUNCTION__, __LINE__)
177
+	_set_fr_retr((rb), RT_T1_TIMEOUT_MS(rb), __FILE__, __FUNCTION__, __LINE__)
177 178
 
178 179
 #else
179 180
 #define start_retr(rb) \
180 181
 	_set_fr_retr((rb), \
181
-				((rb)->dst.proto==PROTO_UDP)?RT_T1_TIMEOUT(rb):(ticks_t)(-1))
182
+				((rb)->dst.proto==PROTO_UDP) ? RT_T1_TIMEOUT_MS(rb) : \
183
+												(unsigned)(-1))
182 184
 
183 185
 #define force_retr(rb) \
184
-	_set_fr_retr((rb), RT_T1_TIMEOUT(rb))
186
+	_set_fr_retr((rb), RT_T1_TIMEOUT_MS(rb))
185 187
 
186 188
 #endif
187 189
 
... ...
@@ -1294,14 +1294,16 @@ static inline void init_new_t(struct cell *new_cell, struct sip_msg *p_msg)
1294 1294
 		}
1295 1295
 	}
1296 1296
 #ifdef TM_DIFF_RT_TIMEOUT
1297
-	new_cell->rt_t1_timeout=(ticks_t)get_msgid_val(user_rt_t1_timeout,
1298
-												p_msg->id, int);
1299
-	if (likely(new_cell->rt_t1_timeout==0))
1300
-		new_cell->rt_t1_timeout=cfg_get(tm, tm_cfg, rt_t1_timeout);
1301
-	new_cell->rt_t2_timeout=(ticks_t)get_msgid_val(user_rt_t2_timeout,
1302
-												p_msg->id, int);
1303
-	if (likely(new_cell->rt_t2_timeout==0))
1304
-		new_cell->rt_t2_timeout=cfg_get(tm, tm_cfg, rt_t2_timeout);
1297
+	new_cell->rt_t1_timeout_ms = (retr_timeout_t) get_msgid_val(
1298
+														user_rt_t1_timeout_ms,
1299
+														p_msg->id, int);
1300
+	if (likely(new_cell->rt_t1_timeout_ms == 0))
1301
+		new_cell->rt_t1_timeout_ms = cfg_get(tm, tm_cfg, rt_t1_timeout_ms);
1302
+	new_cell->rt_t2_timeout_ms = (retr_timeout_t) get_msgid_val(
1303
+														user_rt_t2_timeout_ms,
1304
+														p_msg->id, int);
1305
+	if (likely(new_cell->rt_t2_timeout_ms == 0))
1306
+		new_cell->rt_t2_timeout_ms = cfg_get(tm, tm_cfg, rt_t2_timeout_ms);
1305 1307
 #endif
1306 1308
 	new_cell->on_branch=get_on_branch();
1307 1309
 }
... ...
@@ -1813,30 +1815,30 @@ int t_reset_fr()
1813 1815
 
1814 1816
 /* params: retr. t1 & retr. t2 value in ms, 0 means "do not touch"
1815 1817
  * ret: 1 on success, -1 on error (script safe)*/
1816
-int t_set_retr(struct sip_msg* msg, unsigned int t1_to, unsigned int t2_to)
1818
+int t_set_retr(struct sip_msg* msg, unsigned int t1_ms, unsigned int t2_ms)
1817 1819
 {
1818 1820
 	struct cell *t;
1819 1821
 	ticks_t retr_t1, retr_t2;
1820 1822
 	
1821 1823
 	
1822
-	retr_t1=MS_TO_TICKS((ticks_t)t1_to);
1823
-	if (unlikely((retr_t1==0) && (t1_to!=0))){
1824
-		ERR("t_set_retr: retr. t1 interval too small (%u)\n", t1_to);
1824
+	retr_t1=MS_TO_TICKS((ticks_t)t1_ms);
1825
+	if (unlikely((retr_t1==0) && (t1_ms!=0))){
1826
+		ERR("t_set_retr: retr. t1 interval too small (%u)\n", t1_ms);
1825 1827
 		return -1;
1826 1828
 	}
1827
-	if (unlikely(MAX_UVAR_VALUE(t->rt_t1_timeout) < retr_t1)){
1829
+	if (unlikely(MAX_UVAR_VALUE(t->rt_t1_timeout_ms) < t1_ms)){
1828 1830
 		ERR("t_set_retr: retr. t1 interval too big: %d (max %lu)\n",
1829
-				t1_to, TICKS_TO_MS(MAX_UVAR_VALUE(t->rt_t1_timeout))); 
1831
+				t1_ms, MAX_UVAR_VALUE(t->rt_t1_timeout_ms)); 
1830 1832
 		return -1;
1831 1833
 	} 
1832
-	retr_t2=MS_TO_TICKS((ticks_t)t2_to);
1833
-	if (unlikely((retr_t2==0) && (t2_to!=0))){
1834
-		ERR("t_set_retr: retr. t2 interval too small (%d)\n", t2_to);
1834
+	retr_t2=MS_TO_TICKS((ticks_t)t2_ms);
1835
+	if (unlikely((retr_t2==0) && (t2_ms!=0))){
1836
+		ERR("t_set_retr: retr. t2 interval too small (%d)\n", t2_ms);
1835 1837
 		return -1;
1836 1838
 	}
1837
-	if (unlikely(MAX_UVAR_VALUE(t->rt_t2_timeout) < retr_t2)){
1839
+	if (unlikely(MAX_UVAR_VALUE(t->rt_t2_timeout_ms) < t2_ms)){
1838 1840
 		ERR("t_set_retr: retr. t2 interval too big: %u (max %lu)\n",
1839
-				t2_to, TICKS_TO_MS(MAX_UVAR_VALUE(t->rt_t2_timeout))); 
1841
+				t2_ms, MAX_UVAR_VALUE(t->rt_t2_timeout_ms)); 
1840 1842
 		return -1;
1841 1843
 	} 
1842 1844
 	
... ...
@@ -1845,10 +1847,10 @@ int t_set_retr(struct sip_msg* msg, unsigned int t1_to, unsigned int t2_to)
1845 1847
 	 * in REQUEST_ROUTE T will be set only if the transaction was already
1846 1848
 	 * created; if not -> use the static variables */
1847 1849
 	if (!t || t==T_UNDEFINED ){
1848
-		set_msgid_val(user_rt_t1_timeout, msg->id, int, (int)retr_t1);
1849
-		set_msgid_val(user_rt_t2_timeout, msg->id, int, (int)retr_t2);
1850
+		set_msgid_val(user_rt_t1_timeout_ms, msg->id, int, (int)t1_ms);
1851
+		set_msgid_val(user_rt_t2_timeout_ms, msg->id, int, (int)t2_ms);
1850 1852
 	}else{
1851
-		change_retr(t, 1, retr_t1, retr_t2); /* change running uac timers */
1853
+		change_retr(t, 1, t1_ms, t2_ms); /* change running uac timers */
1852 1854
 	}
1853 1855
 	return 1;
1854 1856
 }
... ...
@@ -1863,13 +1865,14 @@ int t_reset_retr()
1863 1865
 	 * in REQUEST_ROUTE T will be set only if the transaction was already
1864 1866
 	 * created; if not -> use the static variables */
1865 1867
 	if (!t || t==T_UNDEFINED ){
1866
-		memset(&user_rt_t1_timeout, 0, sizeof(user_rt_t1_timeout));
1867
-		memset(&user_rt_t2_timeout, 0, sizeof(user_rt_t2_timeout));
1868
+		memset(&user_rt_t1_timeout_ms, 0, sizeof(user_rt_t1_timeout_ms));
1869
+		memset(&user_rt_t2_timeout_ms, 0, sizeof(user_rt_t2_timeout_ms));
1868 1870
 	}else{
1871
+		 /* change running uac timers */
1869 1872
 		change_retr(t,
1870 1873
 			1,
1871
-			cfg_get(tm, tm_cfg, rt_t1_timeout),
1872
-			cfg_get(tm, tm_cfg, rt_t2_timeout)); /* change running uac timers */
1874
+			cfg_get(tm, tm_cfg, rt_t1_timeout_ms),
1875
+			cfg_get(tm, tm_cfg, rt_t2_timeout_ms));
1873 1876
 	}
1874 1877
 	return 1;
1875 1878
 }
... ...
@@ -150,8 +150,8 @@
150 150
 struct msgid_var user_fr_timeout;
151 151
 struct msgid_var user_fr_inv_timeout;
152 152
 #ifdef TM_DIFF_RT_TIMEOUT
153
-struct msgid_var user_rt_t1_timeout;
154
-struct msgid_var user_rt_t2_timeout;
153
+struct msgid_var user_rt_t1_timeout_ms;
154
+struct msgid_var user_rt_t2_timeout_ms;
155 155
 #endif
156 156
 struct msgid_var user_inv_max_lifetime;
157 157
 struct msgid_var user_noninv_max_lifetime;
... ...
@@ -185,8 +185,6 @@ int tm_init_timers(void)
185 185
 	default_tm_cfg.fr_inv_timeout=MS_TO_TICKS(default_tm_cfg.fr_inv_timeout);
186 186
 	default_tm_cfg.wait_timeout=MS_TO_TICKS(default_tm_cfg.wait_timeout);
187 187
 	default_tm_cfg.delete_timeout=MS_TO_TICKS(default_tm_cfg.delete_timeout);
188
-	default_tm_cfg.rt_t1_timeout=MS_TO_TICKS(default_tm_cfg.rt_t1_timeout);
189
-	default_tm_cfg.rt_t2_timeout=MS_TO_TICKS(default_tm_cfg.rt_t2_timeout);
190 188
 	default_tm_cfg.tm_max_inv_lifetime=MS_TO_TICKS(default_tm_cfg.tm_max_inv_lifetime);
191 189
 	default_tm_cfg.tm_max_noninv_lifetime=MS_TO_TICKS(default_tm_cfg.tm_max_noninv_lifetime);
192 190
 	/* fix 0 values to 1 tick (minimum possible wait time ) */
... ...
@@ -194,8 +192,8 @@ int tm_init_timers(void)
194 192
 	if (default_tm_cfg.fr_inv_timeout==0) default_tm_cfg.fr_inv_timeout=1;
195 193
 	if (default_tm_cfg.wait_timeout==0) default_tm_cfg.wait_timeout=1;
196 194
 	if (default_tm_cfg.delete_timeout==0) default_tm_cfg.delete_timeout=1;
197
-	if (default_tm_cfg.rt_t2_timeout==0) default_tm_cfg.rt_t2_timeout=1;
198
-	if (default_tm_cfg.rt_t1_timeout==0) default_tm_cfg.rt_t1_timeout=1;
195
+	if (default_tm_cfg.rt_t2_timeout_ms==0) default_tm_cfg.rt_t2_timeout_ms=1;
196
+	if (default_tm_cfg.rt_t1_timeout_ms==0) default_tm_cfg.rt_t1_timeout_ms=1;
199 197
 	if (default_tm_cfg.tm_max_inv_lifetime==0) default_tm_cfg.tm_max_inv_lifetime=1;
200 198
 	if (default_tm_cfg.tm_max_noninv_lifetime==0) default_tm_cfg.tm_max_noninv_lifetime=1;
201 199
 	
... ...
@@ -203,8 +201,10 @@ int tm_init_timers(void)
203 201
 	SIZE_FIT_CHECK(fr_timeout, default_tm_cfg.fr_timeout, "fr_timer");
204 202
 	SIZE_FIT_CHECK(fr_inv_timeout, default_tm_cfg.fr_inv_timeout, "fr_inv_timer");
205 203
 #ifdef TM_DIFF_RT_TIMEOUT
206
-	SIZE_FIT_CHECK(rt_t1_timeout, default_tm_cfg.rt_t1_timeout, "retr_timer1");
207
-	SIZE_FIT_CHECK(rt_t2_timeout, default_tm_cfg.rt_t2_timeout, "retr_timer2");
204
+	SIZE_FIT_CHECK(rt_t1_timeout_ms, default_tm_cfg.rt_t1_timeout_ms,
205
+					"retr_timer1");
206
+	SIZE_FIT_CHECK(rt_t2_timeout_ms, default_tm_cfg.rt_t2_timeout_ms,
207
+					"retr_timer2");
208 208
 #endif
209 209
 	SIZE_FIT_CHECK(end_of_life, default_tm_cfg.tm_max_inv_lifetime, "max_inv_lifetime");
210 210
 	SIZE_FIT_CHECK(end_of_life, default_tm_cfg.tm_max_noninv_lifetime, "max_noninv_lifetime");
... ...
@@ -212,8 +212,8 @@ int tm_init_timers(void)
212 212
 	memset(&user_fr_timeout, 0, sizeof(user_fr_timeout));
213 213
 	memset(&user_fr_inv_timeout, 0, sizeof(user_fr_inv_timeout));
214 214
 #ifdef TM_DIFF_RT_TIMEOUT
215
-	memset(&user_rt_t1_timeout, 0, sizeof(user_rt_t1_timeout));
216
-	memset(&user_rt_t2_timeout, 0, sizeof(user_rt_t2_timeout));
215
+	memset(&user_rt_t1_timeout_ms, 0, sizeof(user_rt_t1_timeout_ms));
216
+	memset(&user_rt_t2_timeout_ms, 0, sizeof(user_rt_t2_timeout_ms));
217 217
 #endif
218 218
 	memset(&user_inv_max_lifetime, 0, sizeof(user_inv_max_lifetime));
219 219
 	memset(&user_noninv_max_lifetime, 0, sizeof(user_noninv_max_lifetime));
... ...
@@ -222,7 +222,7 @@ int tm_init_timers(void)
222 222
 			" max_inv_lifetime=%d max_noninv_lifetime=%d\n",
223 223
 			default_tm_cfg.fr_timeout, default_tm_cfg.fr_inv_timeout,
224 224
 			default_tm_cfg.wait_timeout, default_tm_cfg.delete_timeout,
225
-			default_tm_cfg.rt_t1_timeout, default_tm_cfg.rt_t2_timeout,
225
+			default_tm_cfg.rt_t1_timeout_ms, default_tm_cfg.rt_t2_timeout_ms,
226 226
 			default_tm_cfg.tm_max_inv_lifetime, default_tm_cfg.tm_max_noninv_lifetime);
227 227
 	return 0;
228 228
 error:
... ...
@@ -263,10 +263,6 @@ int timer_fixup(void *handle, str *gname, str *name, void **val)
263 263
 	/* size fix checks */
264 264
 	IF_IS_TIMER_NAME(fr_timeout, "fr_timer")
265 265
 	else IF_IS_TIMER_NAME(fr_inv_timeout, "fr_inv_timer")
266
-#ifdef TM_DIFF_RT_TIMEOUT
267
-	else IF_IS_TIMER_NAME(rt_t1_timeout, "retr_timer1")
268
-	else IF_IS_TIMER_NAME(rt_t2_timeout, "retr_timer2")
269
-#endif
270 266
 	else IF_IS_TIMER_NAME(end_of_life, "max_inv_lifetime")
271 267
 	else IF_IS_TIMER_NAME(end_of_life, "max_noninv_lifetime")
272 268
 
... ...
@@ -277,6 +273,30 @@ error:
277 273
 	return -1;
278 274
 }
279 275
 
276
+
277
+
278
+/** fixup function for timer values that are kept in ms.
279
+ * (called by the configuration framework)
280
+ * It checks if the value fits in the tm structures 
281
+ */
282
+int timer_fixup_ms(void *handle, str *gname, str *name, void **val)
283
+{
284
+	long	t;
285
+
286
+	t = (long)(*val);
287
+
288
+	/* size fix checks */
289
+#ifdef TM_DIFF_RT_TIMEOUT
290
+	IF_IS_TIMER_NAME(rt_t1_timeout_ms, "retr_timer1")
291
+	else IF_IS_TIMER_NAME(rt_t2_timeout_ms, "retr_timer2")
292
+#endif
293
+
294
+	return 0;
295
+
296
+error:
297
+	return -1;
298
+}
299
+
280 300
 /******************** handlers ***************************/
281 301
 
282 302
 
... ...
@@ -528,7 +548,8 @@ ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln* tl, void *p)
528 548
 	ticks_t fr_remainder;
529 549
 	ticks_t retr_remainder;
530 550
 	ticks_t retr_interval;
531
-	ticks_t new_retr_interval;
551
+	unsigned long new_retr_interval_ms;
552
+	unsigned long crt_retr_interval_ms;
532 553
 	struct cell *t;
533 554
 
534 555
 	rbuf=(struct  retr_buf*)
... ...
@@ -569,28 +590,20 @@ ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln* tl, void *p)
569 590
 			if ((s_ticks_t)(rbuf->retr_expire-ticks)<=0){
570 591
 				if (rbuf->flags & F_RB_RETR_DISABLED)
571 592
 					goto disabled;
572
-				/* retr_interval= min (2*ri, rt_t2) , *p==2*ri*/
573
-				/* no branch version: 
574
-					#idef CC_SIGNED_RIGHT_SHIFT
575
-						ri=  rt_t2+((2*ri-rt_t2) & 
576
-						((signed)(2*ri-rt_t2)>>(sizeof(ticks_t)*8-1));
577
-					#else
578
-						ri=rt_t2+((2*ri-rt_t2)& -(2*ri<rt_t2));
579
-					#endif
580
-				*/
581
-				
593
+				crt_retr_interval_ms = (unsigned long)p;
582 594
 				/* get the  current interval from timer param. */
583
-				if ((rbuf->flags & F_RB_T2) || 
584
-						(((ticks_t)(unsigned long)p)>RT_T2_TIMEOUT(rbuf))){
585
-					retr_interval=RT_T2_TIMEOUT(rbuf);
586
-					new_retr_interval=RT_T2_TIMEOUT(rbuf);
595
+				if (unlikely((rbuf->flags & F_RB_T2) ||
596
+						(crt_retr_interval_ms > RT_T2_TIMEOUT_MS(rbuf)))){
597
+					retr_interval = MS_TO_TICKS(RT_T2_TIMEOUT_MS(rbuf));
598
+					new_retr_interval_ms = RT_T2_TIMEOUT_MS(rbuf);
587 599
 				}else{
588
-					retr_interval=(ticks_t)(unsigned long)p;
589
-					new_retr_interval=retr_interval<<1;
600
+					retr_interval = MS_TO_TICKS(crt_retr_interval_ms);
601
+					new_retr_interval_ms=crt_retr_interval_ms<<1;
590 602
 				}
591 603
 #ifdef TIMER_DEBUG
592
-				DBG("tm: timer: retr: new interval %d (max %d)\n", 
593
-						retr_interval, RT_T2_TIMEOUT(rbuf));
604
+				DBG("tm: timer: retr: new interval %ld ms / %d ticks"
605
+						" (max %d ms)\n", new_retr_interval_ms, retr_interval,
606
+						RT_T2_TIMEOUT_MS(rbuf));
594 607
 #endif
595 608
 				/* we could race with the reply_received code, but the 
596 609
 				 * worst thing that can happen is to delay a reset_to_t2
... ...
@@ -598,9 +611,9 @@ ticks_t retr_buf_handler(ticks_t ticks, struct timer_ln* tl, void *p)
598 611
 				rbuf->retr_expire=ticks+retr_interval;
599 612
 				/* set new interval to -1 on error, or retr_int. on success */
600 613
 				retr_remainder=retransmission_handler(rbuf) | retr_interval;
601
-				/* store the next retr. interval inside the timer struct,
614
+				/* store the next retr. interval in ms inside the timer struct,
602 615
 				 * in the data member */
603
-				tl->data=(void*)(unsigned long)(new_retr_interval);
616
+				tl->data=(void*)(new_retr_interval_ms);
604 617
 			}else{
605 618
 				retr_remainder= rbuf->retr_expire-ticks;
606 619
 				DBG("tm: timer: retr: nothing to do, expire in %d\n", 
... ...
@@ -126,11 +126,11 @@
126 126
 
127 127
 
128 128
 #ifdef  TM_DIFF_RT_TIMEOUT
129
-#define RT_T1_TIMEOUT(rb)	((rb)->my_T->rt_t1_timeout)
130
-#define RT_T2_TIMEOUT(rb)	((rb)->my_T->rt_t2_timeout)
129
+#define RT_T1_TIMEOUT_MS(rb)	((rb)->my_T->rt_t1_timeout_ms)
130
+#define RT_T2_TIMEOUT_MS(rb)	((rb)->my_T->rt_t2_timeout_ms)
131 131
 #else
132
-#define RT_T1_TIMEOUT(rb)	(cfg_get(tm, tm_cfg, rt_t1_timeout))
133
-#define RT_T2_TIMEOUT(rb)	(cfg_get(tm, tm_cfg, rt_t2_timeout))
132
+#define RT_T1_TIMEOUT_MS(rb)	(cfg_get(tm, tm_cfg, rt_t1_timeout_ms))
133
+#define RT_T2_TIMEOUT_MS(rb)	(cfg_get(tm, tm_cfg, rt_t2_timeout_ms))
134 134
 #endif
135 135
 
136 136
 #define TM_REQ_TIMEOUT(t) \
... ...
@@ -142,8 +142,8 @@
142 142
 extern struct msgid_var user_fr_timeout;
143 143
 extern struct msgid_var user_fr_inv_timeout;
144 144
 #ifdef TM_DIFF_RT_TIMEOUT
145
-extern struct msgid_var user_rt_t1_timeout;
146
-extern struct msgid_var user_rt_t2_timeout;
145
+extern struct msgid_var user_rt_t1_timeout_ms;
146
+extern struct msgid_var user_rt_t2_timeout_ms;
147 147
 #endif
148 148
 extern struct msgid_var user_inv_max_lifetime;
149 149
 extern struct msgid_var user_noninv_max_lifetime;
... ...
@@ -166,6 +166,7 @@ extern int tm_init_timers(void);
166 166
  * \return 0 on success, -1 on error
167 167
  */
168 168
 int timer_fixup(void *handle, str *gname, str *name, void **val);
169
+int timer_fixup_ms(void *handle, str *gname, str *name, void **val);
169 170
 
170 171
 ticks_t wait_handler(ticks_t t, struct timer_ln *tl, void* data);
171 172
 ticks_t retr_buf_handler(ticks_t t, struct timer_ln *tl, void* data);
... ...
@@ -176,7 +177,7 @@ ticks_t retr_buf_handler(ticks_t t, struct timer_ln *tl, void* data);
176 177
 
177 178
 #define init_rb_timers(rb) \
178 179
 	timer_init(&(rb)->timer, retr_buf_handler, \
179
-				(void*)(unsigned long)RT_T1_TIMEOUT(rb), 0)
180
+				(void*)(unsigned long)(RT_T1_TIMEOUT_MS(rb)), 0)
180 181
 
181 182
 /* set fr & retr timer
182 183
  * rb  -  pointer to struct retr_buf
... ...
@@ -184,23 +185,26 @@ ticks_t retr_buf_handler(ticks_t t, struct timer_ln *tl, void* data);
184 185
  * returns: -1 on error, 0 on success
185 186
  */
186 187
 #ifdef TIMER_DEBUG
187
-inline static int _set_fr_retr(struct retr_buf* rb, ticks_t retr,
188
+inline static int _set_fr_retr(struct retr_buf* rb, unsigned retr_ms,
188 189
 								const char* file, const char* func,
189 190
 								unsigned line)
190 191
 #else
191
-inline static int _set_fr_retr(struct retr_buf* rb, ticks_t retr)
192
+inline static int _set_fr_retr(struct retr_buf* rb, unsigned retr_ms)
192 193
 #endif
193 194
 {
194 195
 	ticks_t timeout;
195 196
 	ticks_t ticks;
196 197
 	ticks_t eol;
198
+	ticks_t retr_ticks;
197 199
 	int ret;
198 200
 	
199 201
 	ticks=get_ticks_raw();
200 202
 	timeout=rb->my_T->fr_timeout;
201 203
 	eol=rb->my_T->end_of_life;
202
-	rb->timer.data=(void*)(unsigned long)(2*retr); /* hack , next retr. int. */
203
-	rb->retr_expire=ticks+retr;
204
+	/* hack , next retr. int. */
205
+	retr_ticks = MS_TO_TICKS(retr_ms);
206
+	rb->timer.data=(void*)(unsigned long)(2*retr_ms);
207
+	rb->retr_expire=ticks + retr_ticks;
204 208
 	if (unlikely(rb->t_active)){
205 209
 		/* we could have set_fr_retr called in the same time (acceptable 
206 210
 		 * race), we rely on timer_add adding it only once */
... ...
@@ -211,11 +215,11 @@ inline static int _set_fr_retr(struct retr_buf* rb, ticks_t retr)
211 215
 		LOG(L_CRIT, "WARNING: -_set_fr_timer- already added: %p , tl=%p!!!\n",
212 216
 					rb, &rb->timer);
213 217
 	}
214
-	/* set active & if retr==-1 set disabled */
215
-	rb->flags|= (F_RB_RETR_DISABLED & -(retr==-1)); 
218
+	/* set active & if retr_ms==-1 set disabled */
219
+	rb->flags|= (F_RB_RETR_DISABLED & -(retr_ms==(unsigned)-1));
216 220
 #ifdef TM_FAST_RETR_TIMER
217
-	/* set timer to fast if retr enabled (retr!=-1) */
218
-	rb->timer.flags|=(F_TIMER_FAST & -(retr!=-1));
221
+	/* set timer to fast if retr enabled (retr_ms!=-1) */
222
+	rb->timer.flags|=(F_TIMER_FAST & -(retr_ms!=(unsigned)-1));
219 223
 #endif
220 224
 	/* adjust timeout to MIN(fr, maximum lifetime) if rb is a request
221 225
 	 *  (for neg. replies we are force to wait for the ACK so use fr) */
... ...
@@ -232,10 +236,10 @@ inline static int _set_fr_retr(struct retr_buf* rb, ticks_t retr)
232 236
 		return 0;
233 237
 	}
234 238
 #ifdef TIMER_DEBUG
235
-	ret=timer_add_safe(&(rb)->timer, (timeout<retr)?timeout:retr,
239
+	ret=timer_add_safe(&(rb)->timer, (timeout<retr_ticks)?timeout:retr_ticks,
236 240
 							file, func, line);
237 241
 #else
238
-	ret=timer_add(&(rb)->timer, (timeout<retr)?timeout:retr);
242
+	ret=timer_add(&(rb)->timer, (timeout<retr_ticks)?timeout:retr_ticks);
239 243
 #endif
240 244
 	if (ret==0) rb->t_active=1;
241 245
 	membar_write_atomic_op(); /* make sure t_active will be commited to mem.
... ...
@@ -265,7 +269,7 @@ do{ \
265 269
 #define switch_rb_retr_to_t2(rb) \
266 270
 	do{ \
267 271
 		(rb)->flags|=F_RB_T2; \
268
-		(rb)->retr_expire=get_ticks_raw()+RT_T2_TIMEOUT(rb); \
272
+		(rb)->retr_expire=get_ticks_raw()+MS_TO_TICKS(RT_T2_TIMEOUT_MS(rb)); \
269 273
 	}while(0)
270 274
 
271 275
 
... ...
@@ -324,23 +328,23 @@ inline static void change_fr(struct cell* t, ticks_t fr_inv, ticks_t fr)
324 328
  *  if timer value==0 => leave it unchanged
325 329
  */
326 330
 inline static void change_retr(struct cell* t, int now,
327
-								ticks_t rt_t1, ticks_t rt_t2)
331
+								unsigned rt_t1_ms, unsigned rt_t2_ms)
328 332
 {
329 333
 	int i;
330 334
 
331
-	if (rt_t1) t->rt_t1_timeout=rt_t1;
332
-	if (rt_t2) t->rt_t2_timeout=rt_t2;
335
+	if (rt_t1_ms) t->rt_t1_timeout_ms=rt_t1_ms;
336
+	if (rt_t2_ms) t->rt_t2_timeout_ms=rt_t2_ms;
333 337
 	if (now){
334 338
 		for (i=0; i<t->nr_of_outgoings; i++){
335
-			if (t->uac[i].request.t_active){ 
336
-					if ((t->uac[i].request.flags & F_RB_T2) && rt_t2)
339
+			if (t->uac[i].request.t_active){
340
+					if ((t->uac[i].request.flags & F_RB_T2) && rt_t2_ms)
337 341
 						/* not really needed (?) - if F_RB_T2 is set
338 342
 						 * t->rt_t2_timeout will be used anyway */
339
-						t->uac[i].request.timer.data=
340
-									(void*)(unsigned long)rt_t2;
341
-					else if (rt_t1)
342
-						t->uac[i].request.timer.data=
343
-									(void*)(unsigned long)rt_t1;
343
+						t->uac[i].request.timer.data =
344
+							(void*)(unsigned long)rt_t2_ms;
345
+					else if (rt_t1_ms)
346
+						t->uac[i].request.timer.data =
347
+							(void*)(unsigned long)rt_t1_ms;
344 348
 			}
345 349
 		}
346 350
 	}
... ...
@@ -482,8 +482,8 @@ static param_export_t params[]={
482 482
 	{"fr_inv_timer",        PARAM_INT, &default_tm_cfg.fr_inv_timeout        },
483 483
 	{"wt_timer",            PARAM_INT, &default_tm_cfg.wait_timeout          },
484 484
 	{"delete_timer",        PARAM_INT, &default_tm_cfg.delete_timeout        },
485
-	{"retr_timer1",         PARAM_INT, &default_tm_cfg.rt_t1_timeout         },
486
-	{"retr_timer2"  ,       PARAM_INT, &default_tm_cfg.rt_t2_timeout         },
485
+	{"retr_timer1",         PARAM_INT, &default_tm_cfg.rt_t1_timeout_ms      },
486
+	{"retr_timer2"  ,       PARAM_INT, &default_tm_cfg.rt_t2_timeout_ms      },
487 487
 	{"max_inv_lifetime",    PARAM_INT, &default_tm_cfg.tm_max_inv_lifetime   },
488 488
 	{"max_noninv_lifetime", PARAM_INT, &default_tm_cfg.tm_max_noninv_lifetime},
489 489
 	{"noisy_ctimer",        PARAM_INT, &default_tm_cfg.noisy_ctimer          },
... ...
@@ -304,8 +304,8 @@ static inline int t_uac_prepare(uac_req_t *uac_r,
304 304
 	new_cell->end_of_life=get_ticks_raw()+lifetime;
305 305
 #ifdef TM_DIFF_RT_TIMEOUT
306 306
 	/* same as above for retransmission intervals */
307
-	new_cell->rt_t1_timeout=cfg_get(tm, tm_cfg, rt_t1_timeout);
308
-	new_cell->rt_t2_timeout=cfg_get(tm, tm_cfg, rt_t2_timeout);
307
+	new_cell->rt_t1_timeout_ms = cfg_get(tm, tm_cfg, rt_t1_timeout_ms);
308
+	new_cell->rt_t2_timeout_ms = cfg_get(tm, tm_cfg, rt_t2_timeout_ms);
309 309
 #endif
310 310
 
311 311
 	set_kr(REQ_FWDED);