Browse code

tm: more log macros migrated from old form with location info in message

- they are added automatically
- code indentation coherence with clang format

Daniel-Constantin Mierla authored on 12/01/2017 16:17:04
Showing 7 changed files
... ...
@@ -13,15 +13,15 @@
13 13
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 14
  * GNU General Public License for more details.
15 15
  *
16
- * You should have received a copy of the GNU General Public License 
17
- * along with this program; if not, write to the Free Software 
16
+ * You should have received a copy of the GNU General Public License
17
+ * along with this program; if not, write to the Free Software
18 18
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19 19
  *
20 20
  */
21 21
 
22 22
 /*!
23
- * \file 
24
- * \brief TM :: 
23
+ * \file
24
+ * \brief TM ::
25 25
  * \ingroup tm
26 26
  */
27 27
 
... ...
@@ -50,36 +50,38 @@
50 50
 
51 51
 static enum kill_reason kr;
52 52
 
53
-/* pointer to the big table where all the transaction data
54
-   lives */
55
-struct s_table*  _tm_table;
53
+/* pointer to the big table where all the transaction data lives */
54
+struct s_table *_tm_table;
56 55
 
57
-struct s_table* tm_get_table(void) {
56
+struct s_table *tm_get_table(void)
57
+{
58 58
 	return _tm_table;
59 59
 }
60 60
 
61
-void reset_kr(void) {
62
-	kr=0;
61
+void reset_kr(void)
62
+{
63
+	kr = 0;
63 64
 }
64 65
 
65
-void set_kr( enum kill_reason _kr )
66
+void set_kr(enum kill_reason _kr)
66 67
 {
67
-	kr|=_kr;
68
+	kr |= _kr;
68 69
 }
69 70
 
70 71
 
71
-enum kill_reason get_kr() {
72
+enum kill_reason get_kr()
73
+{
72 74
 	return kr;
73 75
 }
74 76
 
75 77
 
76
-void lock_hash(int i) 
78
+void lock_hash(int i)
77 79
 {
78 80
 
79 81
 	int mypid;
80 82
 
81 83
 	mypid = my_pid();
82
-	if (likely(atomic_get(&_tm_table->entries[i].locker_pid) != mypid)) {
84
+	if(likely(atomic_get(&_tm_table->entries[i].locker_pid) != mypid)) {
83 85
 		lock(&_tm_table->entries[i].mutex);
84 86
 		atomic_set(&_tm_table->entries[i].locker_pid, mypid);
85 87
 	} else {
... ...
@@ -89,35 +91,34 @@ void lock_hash(int i)
89 91
 }
90 92
 
91 93
 
92
-void unlock_hash(int i) 
94
+void unlock_hash(int i)
93 95
 {
94
-	if (likely(_tm_table->entries[i].rec_lock_level == 0)) {
96
+	if(likely(_tm_table->entries[i].rec_lock_level == 0)) {
95 97
 		atomic_set(&_tm_table->entries[i].locker_pid, 0);
96 98
 		unlock(&_tm_table->entries[i].mutex);
97
-	} else  {
99
+	} else {
98 100
 		/* recursive locked => decrease rec. lock count */
99 101
 		_tm_table->entries[i].rec_lock_level--;
100 102
 	}
101 103
 }
102 104
 
103 105
 
104
-
105 106
 #ifdef TM_HASH_STATS
106
-unsigned int transaction_count( void )
107
+unsigned int transaction_count(void)
107 108
 {
108 109
 	unsigned int i;
109 110
 	unsigned int count;
110 111
 
111
-	count=0;	
112
-	for (i=0; i<TABLE_ENTRIES; i++) 
113
-		count+=_tm_table->entries[i].cur_entries;
112
+	count = 0;
113
+	for(i = 0; i < TABLE_ENTRIES; i++)
114
+		count += _tm_table->entries[i].cur_entries;
114 115
 	return count;
115 116
 }
116 117
 #endif
117 118
 
118 119
 
119
-
120
-void free_cell_helper(tm_cell_t* dead_cell, int silent, const char *fname, unsigned int fline )
120
+void free_cell_helper(
121
+		tm_cell_t *dead_cell, int silent, const char *fname, unsigned int fline)
121 122
 {
122 123
 	char *b;
123 124
 	int i;
... ...
@@ -127,40 +128,41 @@ void free_cell_helper(tm_cell_t* dead_cell, int silent, const char *fname, unsig
127 128
 
128 129
 	LM_DBG("freeing transaction %p from %s:%u\n", dead_cell, fname, fline);
129 130
 
130
-	if(dead_cell->prev_c!=NULL && dead_cell->next_c!=NULL) {
131
-		if(likely(silent==0)) {
131
+	if(dead_cell->prev_c != NULL && dead_cell->next_c != NULL) {
132
+		if(likely(silent == 0)) {
132 133
 			LM_WARN("removed cell %p is still linked in hash table (%s:%u)\n",
133
-				dead_cell, fname, fline);
134
+					dead_cell, fname, fline);
134 135
 			if(t_on_wait(dead_cell)) {
135 136
 				INIT_REF(dead_cell, 1);
136 137
 				LM_WARN("cell %p is still linked in wait timer (%s:%u)"
137
-						" - skip freeing now\n", dead_cell, fname, fline);
138
+						" - skip freeing now\n",
139
+						dead_cell, fname, fline);
138 140
 				return;
139 141
 			}
140 142
 		}
141 143
 		unlink_timers(dead_cell);
142 144
 		remove_from_hash_table_unsafe(dead_cell);
143 145
 	}
144
-	release_cell_lock( dead_cell );
145
-	if (unlikely(has_tran_tmcbs(dead_cell, TMCB_DESTROY)))
146
+	release_cell_lock(dead_cell);
147
+	if(unlikely(has_tran_tmcbs(dead_cell, TMCB_DESTROY)))
146 148
 		run_trans_callbacks(TMCB_DESTROY, dead_cell, 0, 0, 0);
147 149
 
148 150
 	shm_lock();
149 151
 	/* UA Server */
150
-	if ( dead_cell->uas.request )
151
-		sip_msg_free_unsafe( dead_cell->uas.request );
152
-	if ( dead_cell->uas.response.buffer )
153
-		shm_free_unsafe( dead_cell->uas.response.buffer );
152
+	if(dead_cell->uas.request)
153
+		sip_msg_free_unsafe(dead_cell->uas.request);
154
+	if(dead_cell->uas.response.buffer)
155
+		shm_free_unsafe(dead_cell->uas.response.buffer);
154 156
 #ifdef CANCEL_REASON_SUPPORT
155
-	if (unlikely(dead_cell->uas.cancel_reas))
157
+	if(unlikely(dead_cell->uas.cancel_reas))
156 158
 		shm_free_unsafe(dead_cell->uas.cancel_reas);
157 159
 #endif /* CANCEL_REASON_SUPPORT */
158 160
 
159 161
 	/* callbacks */
160
-	for( cbs=(struct tm_callback*)dead_cell->tmcb_hl.first ; cbs ; ) {
162
+	for(cbs = (struct tm_callback *)dead_cell->tmcb_hl.first; cbs;) {
161 163
 		cbs_tmp = cbs;
162 164
 		cbs = cbs->next;
163
-		if (cbs_tmp->release) {
165
+		if(cbs_tmp->release) {
164 166
 			/* It is safer to release the shm memory lock
165 167
 			 * otherwise the release function must to be aware of
166 168
 			 * the lock state (Miklos)
... ...
@@ -169,113 +171,114 @@ void free_cell_helper(tm_cell_t* dead_cell, int silent, const char *fname, unsig
169 171
 			cbs_tmp->release(cbs_tmp->param);
170 172
 			shm_lock();
171 173
 		}
172
-		shm_free_unsafe( cbs_tmp );
174
+		shm_free_unsafe(cbs_tmp);
173 175
 	}
174 176
 
175 177
 	/* UA Clients */
176
-	for ( i =0 ; i<dead_cell->nr_of_outgoings;  i++ )
177
-	{
178
+	for(i = 0; i < dead_cell->nr_of_outgoings; i++) {
178 179
 		/* retransmission buffer */
179
-		if ( (b=dead_cell->uac[i].request.buffer) )
180
-			shm_free_unsafe( b );
181
-		b=dead_cell->uac[i].local_cancel.buffer;
182
-		if (b!=0 && b!=BUSY_BUFFER)
183
-			shm_free_unsafe( b );
184
-		rpl=dead_cell->uac[i].reply;
185
-		if (rpl && rpl!=FAKED_REPLY && rpl->msg_flags&FL_SHM_CLONE) {
186
-			sip_msg_free_unsafe( rpl );
180
+		if((b = dead_cell->uac[i].request.buffer))
181
+			shm_free_unsafe(b);
182
+		b = dead_cell->uac[i].local_cancel.buffer;
183
+		if(b != 0 && b != BUSY_BUFFER)
184
+			shm_free_unsafe(b);
185
+		rpl = dead_cell->uac[i].reply;
186
+		if(rpl && rpl != FAKED_REPLY && rpl->msg_flags & FL_SHM_CLONE) {
187
+			sip_msg_free_unsafe(rpl);
187 188
 		}
188 189
 #ifdef USE_DNS_FAILOVER
189
-		if (dead_cell->uac[i].dns_h.a){
190
-			DBG("branch %d -> dns_h.srv (%.*s) ref=%d,"
191
-							" dns_h.a (%.*s) ref=%d\n", i,
192
-					dead_cell->uac[i].dns_h.srv?
193
-								dead_cell->uac[i].dns_h.srv->name_len:0,
194
-					dead_cell->uac[i].dns_h.srv?
195
-								dead_cell->uac[i].dns_h.srv->name:"",
196
-					dead_cell->uac[i].dns_h.srv?
197
-								dead_cell->uac[i].dns_h.srv->refcnt.val:0,
190
+		if(dead_cell->uac[i].dns_h.a) {
191
+			LM_DBG("branch %d -> dns_h.srv (%.*s) ref=%d,"
192
+				" dns_h.a (%.*s) ref=%d\n",
193
+					i, dead_cell->uac[i].dns_h.srv
194
+							? dead_cell->uac[i].dns_h.srv->name_len
195
+							: 0,
196
+					dead_cell->uac[i].dns_h.srv
197
+							? dead_cell->uac[i].dns_h.srv->name
198
+							: "",
199
+					dead_cell->uac[i].dns_h.srv
200
+							? dead_cell->uac[i].dns_h.srv->refcnt.val
201
+							: 0,
198 202
 					dead_cell->uac[i].dns_h.a->name_len,
199 203
 					dead_cell->uac[i].dns_h.a->name,
200 204
 					dead_cell->uac[i].dns_h.a->refcnt.val);
201 205
 		}
202 206
 		dns_srv_handle_put_shm_unsafe(&dead_cell->uac[i].dns_h);
203 207
 #endif
204
-		if (unlikely(dead_cell->uac[i].path.s)) {
208
+		if(unlikely(dead_cell->uac[i].path.s)) {
205 209
 			shm_free_unsafe(dead_cell->uac[i].path.s);
206 210
 		}
207
-		if (unlikely(dead_cell->uac[i].instance.s)) {
211
+		if(unlikely(dead_cell->uac[i].instance.s)) {
208 212
 			shm_free_unsafe(dead_cell->uac[i].instance.s);
209 213
 		}
210
-		if (unlikely(dead_cell->uac[i].ruid.s)) {
214
+		if(unlikely(dead_cell->uac[i].ruid.s)) {
211 215
 			shm_free_unsafe(dead_cell->uac[i].ruid.s);
212 216
 		}
213
-		if (unlikely(dead_cell->uac[i].location_ua.s)) {
217
+		if(unlikely(dead_cell->uac[i].location_ua.s)) {
214 218
 			shm_free_unsafe(dead_cell->uac[i].location_ua.s);
215 219
 		}
216 220
 	}
217 221
 
218 222
 #ifdef WITH_AS_SUPPORT
219
-	if (dead_cell->uac[0].local_ack)
223
+	if(dead_cell->uac[0].local_ack)
220 224
 		free_local_ack_unsafe(dead_cell->uac[0].local_ack);
221 225
 #endif
222 226
 
223 227
 	/* collected to tags */
224
-	tt=dead_cell->fwded_totags;
228
+	tt = dead_cell->fwded_totags;
225 229
 	while(tt) {
226
-		foo=tt->next;
230
+		foo = tt->next;
227 231
 		shm_free_unsafe(tt->tag.s);
228 232
 		shm_free_unsafe(tt);
229
-		tt=foo;
233
+		tt = foo;
230 234
 	}
231 235
 
232 236
 	/* free the avp list */
233
-	if (dead_cell->user_avps_from)
234
-		destroy_avp_list_unsafe( &dead_cell->user_avps_from );
235
-	if (dead_cell->user_avps_to)
236
-		destroy_avp_list_unsafe( &dead_cell->user_avps_to );
237
-	if (dead_cell->uri_avps_from)
238
-		destroy_avp_list_unsafe( &dead_cell->uri_avps_from );
239
-	if (dead_cell->uri_avps_to)
240
-		destroy_avp_list_unsafe( &dead_cell->uri_avps_to );
237
+	if(dead_cell->user_avps_from)
238
+		destroy_avp_list_unsafe(&dead_cell->user_avps_from);
239
+	if(dead_cell->user_avps_to)
240
+		destroy_avp_list_unsafe(&dead_cell->user_avps_to);
241
+	if(dead_cell->uri_avps_from)
242
+		destroy_avp_list_unsafe(&dead_cell->uri_avps_from);
243
+	if(dead_cell->uri_avps_to)
244
+		destroy_avp_list_unsafe(&dead_cell->uri_avps_to);
241 245
 #ifdef WITH_XAVP
242
-	if (dead_cell->xavps_list)
243
-		xavp_destroy_list_unsafe( &dead_cell->xavps_list );
246
+	if(dead_cell->xavps_list)
247
+		xavp_destroy_list_unsafe(&dead_cell->xavps_list);
244 248
 #endif
245 249
 
246 250
 	/* the cell's body */
247
-	shm_free_unsafe( dead_cell );
251
+	shm_free_unsafe(dead_cell);
248 252
 
249 253
 	shm_unlock();
250 254
 	t_stats_freed();
251 255
 }
252 256
 
253 257
 
254
-
255
-static inline void init_synonym_id( struct sip_msg *p_msg, char *hash )
258
+static inline void init_synonym_id(struct sip_msg *p_msg, char *hash)
256 259
 {
257 260
 	int size;
258 261
 	char *c;
259 262
 	unsigned int myrand;
260 263
 
261
-	if (p_msg) {
264
+	if(p_msg) {
262 265
 		/* char value of a proxied transaction is
263
-		   calculated out of header-fields forming
264
-		   transaction key
266
+		 * calculated out of header-fields forming
267
+		 * transaction key
265 268
 		*/
266
-		char_msg_val( p_msg, hash );
269
+		char_msg_val(p_msg, hash);
267 270
 	} else {
268 271
 		/* char value for a UAC transaction is created
269
-		   randomly -- UAC is an originating stateful element
270
-		   which cannot be refreshed, so the value can be
271
-		   anything
272
+		 * randomly -- UAC is an originating stateful element
273
+		 * which cannot be refreshed, so the value can be
274
+		 * anything
272 275
 		*/
273 276
 		/* HACK : not long enough */
274
-		myrand=kam_rand();
277
+		myrand = kam_rand();
275 278
 		c = hash;
276
-		size=MD5_LEN;
277
-		memset(c, '0', size );
278
-		int2reverse_hex( &c, &size, myrand );
279
+		size = MD5_LEN;
280
+		memset(c, '0', size);
281
+		int2reverse_hex(&c, &size, myrand);
279 282
 	}
280 283
 }
281 284
 
... ...
@@ -284,13 +287,12 @@ static void inline init_branches(struct cell *t)
284 287
 	unsigned int i;
285 288
 	struct ua_client *uac;
286 289
 
287
-	for(i=0;i<sr_dst_max_branches;i++)
288
-	{
289
-		uac=&t->uac[i];
290
+	for(i = 0; i < sr_dst_max_branches; i++) {
291
+		uac = &t->uac[i];
290 292
 		uac->request.my_T = t;
291 293
 		uac->request.branch = i;
292 294
 		init_rb_timers(&uac->request);
293
-		uac->local_cancel=uac->request;
295
+		uac->local_cancel = uac->request;
294 296
 #ifdef USE_DNS_FAILOVER
295 297
 		dns_srv_handle_init(&uac->dns_h);
296 298
 #endif
... ...
@@ -298,98 +300,97 @@ static void inline init_branches(struct cell *t)
298 300
 }
299 301
 
300 302
 
301
-struct cell*  build_cell( struct sip_msg* p_msg )
303
+struct cell *build_cell(struct sip_msg *p_msg)
302 304
 {
303
-	struct cell* new_cell;
304
-	int          sip_msg_len;
305
-	avp_list_t* old;
305
+	struct cell *new_cell;
306
+	int sip_msg_len;
307
+	avp_list_t *old;
306 308
 	struct tm_callback *cbs, *cbs_tmp;
307 309
 #ifdef WITH_XAVP
308
-	sr_xavp_t** xold;
310
+	sr_xavp_t **xold;
309 311
 #endif
310 312
 	unsigned int cell_size;
311 313
 
312 314
 	/* allocs a new cell, add space for:
313 315
 	 * md5 (MD5_LEN - sizeof(struct cell.md5))
314 316
 	 * uac (sr_dst_max_banches * sizeof(struct ua_client) ) */
315
-	cell_size = sizeof( struct cell ) + MD5_LEN - sizeof(((struct cell*)0)->md5)
317
+	cell_size = sizeof(struct cell) + MD5_LEN - sizeof(((struct cell *)0)->md5)
316 318
 				+ (sr_dst_max_branches * sizeof(struct ua_client));
317 319
 
318
-	new_cell = (struct cell*)shm_malloc( cell_size );
319
-	if  ( !new_cell ) {
320
-		ser_error=E_OUT_OF_MEM;
320
+	new_cell = (struct cell *)shm_malloc(cell_size);
321
+	if(!new_cell) {
322
+		ser_error = E_OUT_OF_MEM;
321 323
 		return NULL;
322 324
 	}
323 325
 
324 326
 	/* filling with 0 */
325
-	memset( new_cell, 0, cell_size );
327
+	memset(new_cell, 0, cell_size);
326 328
 
327 329
 	/* UAS */
328
-	new_cell->uas.response.my_T=new_cell;
330
+	new_cell->uas.response.my_T = new_cell;
329 331
 	init_rb_timers(&new_cell->uas.response);
330 332
 	/* UAC */
331
-	new_cell->uac = (struct ua_client*)((char*)new_cell + sizeof(struct cell)
332
-							+ MD5_LEN - sizeof(((struct cell*)0)->md5));
333
+	new_cell->uac =
334
+			(struct ua_client *)((char *)new_cell + sizeof(struct cell)
335
+								+ MD5_LEN - sizeof(((struct cell *)0)->md5));
333 336
 	/* timers */
334 337
 	init_cell_timers(new_cell);
335 338
 
336
-	old = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, 
337
-			&new_cell->uri_avps_from );
339
+	old = set_avp_list(
340
+			AVP_TRACK_FROM | AVP_CLASS_URI, &new_cell->uri_avps_from);
338 341
 	new_cell->uri_avps_from = *old;
339 342
 	*old = 0;
340 343
 
341
-	old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, 
342
-			&new_cell->uri_avps_to );
344
+	old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &new_cell->uri_avps_to);
343 345
 	new_cell->uri_avps_to = *old;
344 346
 	*old = 0;
345 347
 
346
-	old = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, 
347
-			&new_cell->user_avps_from );
348
+	old = set_avp_list(
349
+			AVP_TRACK_FROM | AVP_CLASS_USER, &new_cell->user_avps_from);
348 350
 	new_cell->user_avps_from = *old;
349 351
 	*old = 0;
350 352
 
351
-	old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, 
352
-			&new_cell->user_avps_to );
353
+	old = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &new_cell->user_avps_to);
353 354
 	new_cell->user_avps_to = *old;
354 355
 	*old = 0;
355 356
 
356 357
 #ifdef WITH_XAVP
357
-	xold = xavp_set_list(&new_cell->xavps_list );
358
+	xold = xavp_set_list(&new_cell->xavps_list);
358 359
 	new_cell->xavps_list = *xold;
359 360
 	*xold = 0;
360 361
 #endif
361 362
 
362
-	     /* We can just store pointer to domain avps in the transaction context,
363
-	      * because they are read-only
364
-	      */
365
-	new_cell->domain_avps_from = get_avp_list(AVP_TRACK_FROM | 
366
-								AVP_CLASS_DOMAIN);
363
+	/* We can just store pointer to domain avps in the transaction context,
364
+	 * because they are read-only */
365
+	new_cell->domain_avps_from =
366
+			get_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN);
367 367
 	new_cell->domain_avps_to = get_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN);
368 368
 
369 369
 	/* enter callback, which may potentially want to parse some stuff,
370 370
 	 * before the request is shmem-ized */
371
-	if (p_msg) {
371
+	if(p_msg) {
372 372
 		set_early_tmcb_list(p_msg, new_cell);
373 373
 		if(has_reqin_tmcbs())
374
-			run_reqin_callbacks( new_cell, p_msg, p_msg->REQ_METHOD);
374
+			run_reqin_callbacks(new_cell, p_msg, p_msg->REQ_METHOD);
375 375
 	}
376 376
 
377
-	if (p_msg) {
378
-		new_cell->uas.request = sip_msg_cloner(p_msg,&sip_msg_len);
379
-		if (!new_cell->uas.request)
377
+	if(p_msg) {
378
+		new_cell->uas.request = sip_msg_cloner(p_msg, &sip_msg_len);
379
+		if(!new_cell->uas.request)
380 380
 			goto error;
381
-		new_cell->uas.end_request=((char*)new_cell->uas.request)+sip_msg_len;
381
+		new_cell->uas.end_request =
382
+				((char *)new_cell->uas.request) + sip_msg_len;
382 383
 	}
383 384
 
384 385
 	/* UAC */
385 386
 	init_branches(new_cell);
386 387
 
387
-	new_cell->relayed_reply_branch   = -1;
388
+	new_cell->relayed_reply_branch = -1;
388 389
 	/* new_cell->T_canceled = T_UNDEFINED; */
389 390
 
390 391
 	init_synonym_id(p_msg, new_cell->md5);
391
-	init_cell_lock(  new_cell );
392
-	init_async_lock( new_cell );
392
+	init_cell_lock(new_cell);
393
+	init_async_lock(new_cell);
393 394
 	t_stats_created();
394 395
 	return new_cell;
395 396
 
... ...
@@ -399,19 +400,19 @@ error:
399 400
 	 * additional memory for their parameters,
400 401
 	 * hence TMCB_DESTROY needs to be called. (Miklos)
401 402
 	 */
402
-	if (unlikely(has_tran_tmcbs(new_cell, TMCB_DESTROY)))
403
+	if(unlikely(has_tran_tmcbs(new_cell, TMCB_DESTROY)))
403 404
 		run_trans_callbacks(TMCB_DESTROY, new_cell, 0, 0, 0);
404 405
 
405 406
 	/* free the callback list */
406
-	for( cbs=(struct tm_callback*)new_cell->tmcb_hl.first ; cbs ; ) {
407
+	for(cbs = (struct tm_callback *)new_cell->tmcb_hl.first; cbs;) {
407 408
 		cbs_tmp = cbs;
408 409
 		cbs = cbs->next;
409
-		if (cbs_tmp->release) {
410
+		if(cbs_tmp->release) {
410 411
 			cbs_tmp->release(cbs_tmp->param);
411 412
 		}
412
-		shm_free( cbs_tmp );
413
+		shm_free(cbs_tmp);
413 414
 	}
414
-	
415
+
415 416
 	destroy_avp_list(&new_cell->user_avps_from);
416 417
 	destroy_avp_list(&new_cell->user_avps_to);
417 418
 	destroy_avp_list(&new_cell->uri_avps_from);
... ...
@@ -429,24 +430,21 @@ error:
429 430
 }
430 431
 
431 432
 
432
-
433 433
 /* Release all the data contained by the hash table. All the aux. structures
434 434
  *  as sems, lists, etc, are also released */
435
-void free_hash_table(  )
435
+void free_hash_table()
436 436
 {
437
-	struct cell* p_cell;
438
-	struct cell* tmp_cell;
439
-	int    i;
437
+	struct cell *p_cell;
438
+	struct cell *tmp_cell;
439
+	int i;
440 440
 
441
-	if (_tm_table)
442
-	{
441
+	if(_tm_table) {
443 442
 		/* remove the data contained by each entry */
444
-		for( i = 0 ; i<TABLE_ENTRIES; i++)
445
-		{
446
-			release_entry_lock( (_tm_table->entries)+i );
443
+		for(i = 0; i < TABLE_ENTRIES; i++) {
444
+			release_entry_lock((_tm_table->entries) + i);
447 445
 			/* delete all synonyms at hash-collision-slot i */
448
-			clist_foreach_safe(&_tm_table->entries[i], p_cell, tmp_cell,
449
-									next_c){
446
+			clist_foreach_safe(&_tm_table->entries[i], p_cell, tmp_cell, next_c)
447
+			{
450 448
 				free_cell_silent(p_cell);
451 449
 			}
452 450
 		}
... ...
@@ -456,40 +454,37 @@ void free_hash_table(  )
456 454
 }
457 455
 
458 456
 
459
-
460
-
461 457
 /*
462 458
  */
463
-struct s_table* init_hash_table()
459
+struct s_table *init_hash_table()
464 460
 {
465
-	int              i;
461
+	int i;
466 462
 
467 463
 	/*allocs the table*/
468
-	_tm_table= (struct s_table*)shm_malloc( sizeof( struct s_table ) );
469
-	if ( !_tm_table) {
464
+	_tm_table = (struct s_table *)shm_malloc(sizeof(struct s_table));
465
+	if(!_tm_table) {
470 466
 		LOG(L_ERR, "ERROR: init_hash_table: no shmem for TM table\n");
471 467
 		goto error0;
472 468
 	}
473 469
 
474
-	memset( _tm_table, 0, sizeof (struct s_table ) );
470
+	memset(_tm_table, 0, sizeof(struct s_table));
475 471
 
476 472
 	/* try first allocating all the structures needed for syncing */
477
-	if (lock_initialize()==-1)
473
+	if(lock_initialize() == -1)
478 474
 		goto error1;
479 475
 
480 476
 	/* inits the entriess */
481
-	for(  i=0 ; i<TABLE_ENTRIES; i++ )
482
-	{
483
-		init_entry_lock( _tm_table, (_tm_table->entries)+i );
477
+	for(i = 0; i < TABLE_ENTRIES; i++) {
478
+		init_entry_lock(_tm_table, (_tm_table->entries) + i);
484 479
 		_tm_table->entries[i].next_label = kam_rand();
485 480
 		/* init cell list */
486 481
 		clist_init(&_tm_table->entries[i], next_c, prev_c);
487 482
 	}
488 483
 
489
-	return  _tm_table;
484
+	return _tm_table;
490 485
 
491 486
 error1:
492
-	free_hash_table( );
487
+	free_hash_table();
493 488
 error0:
494 489
 	return 0;
495 490
 }
... ...
@@ -505,36 +500,41 @@ void tm_xdata_swap(tm_cell_t *t, tm_xlinks_t *xd, int mode)
505 500
 	static tm_xlinks_t _txdata;
506 501
 	tm_xlinks_t *x;
507 502
 
508
-	if(xd==NULL)
503
+	if(xd == NULL)
509 504
 		x = &_txdata;
510 505
 	else
511 506
 		x = xd;
512 507
 
513
-	if(mode==0) {
514
-		if(t==NULL)
508
+	if(mode == 0) {
509
+		if(t == NULL)
515 510
 			return;
516
-		x->uri_avps_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from );
517
-		x->uri_avps_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to );
518
-		x->user_avps_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from );
519
-		x->user_avps_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to );
520
-		x->domain_avps_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from );
521
-		x->domain_avps_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to );
511
+		x->uri_avps_from =
512
+				set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from);
513
+		x->uri_avps_to =
514
+				set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to);
515
+		x->user_avps_from = set_avp_list(
516
+				AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from);
517
+		x->user_avps_to =
518
+				set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to);
519
+		x->domain_avps_from = set_avp_list(
520
+				AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from);
521
+		x->domain_avps_to = set_avp_list(
522
+				AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to);
522 523
 #ifdef WITH_XAVP
523 524
 		x->xavps_list = xavp_set_list(&t->xavps_list);
524 525
 #endif
525
-	} else if(mode==1) {
526
+	} else if(mode == 1) {
526 527
 		/* restore original avp list */
527
-		set_avp_list( AVP_TRACK_FROM | AVP_CLASS_URI, x->uri_avps_from );
528
-		set_avp_list( AVP_TRACK_TO | AVP_CLASS_URI, x->uri_avps_to );
529
-		set_avp_list( AVP_TRACK_FROM | AVP_CLASS_USER, x->user_avps_from );
530
-		set_avp_list( AVP_TRACK_TO | AVP_CLASS_USER, x->user_avps_to );
531
-		set_avp_list( AVP_TRACK_FROM | AVP_CLASS_DOMAIN, x->domain_avps_from );
532
-		set_avp_list( AVP_TRACK_TO | AVP_CLASS_DOMAIN, x->domain_avps_to );
528
+		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, x->uri_avps_from);
529
+		set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, x->uri_avps_to);
530
+		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, x->user_avps_from);
531
+		set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, x->user_avps_to);
532
+		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, x->domain_avps_from);
533
+		set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, x->domain_avps_to);
533 534
 #ifdef WITH_XAVP
534 535
 		xavp_set_list(x->xavps_list);
535 536
 #endif
536 537
 	}
537
-
538 538
 }
539 539
 
540 540
 /**
... ...
@@ -542,12 +542,13 @@ void tm_xdata_swap(tm_cell_t *t, tm_xlinks_t *xd, int mode)
542 542
  */
543 543
 void tm_xdata_replace(tm_xdata_t *newxd, tm_xlinks_t *bakxd)
544 544
 {
545
-	if(newxd==NULL && bakxd!=NULL) {
545
+	if(newxd == NULL && bakxd != NULL) {
546 546
 		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, bakxd->uri_avps_from);
547 547
 		set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, bakxd->uri_avps_to);
548 548
 		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, bakxd->user_avps_from);
549 549
 		set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, bakxd->user_avps_to);
550
-		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, bakxd->domain_avps_from);
550
+		set_avp_list(
551
+				AVP_TRACK_FROM | AVP_CLASS_DOMAIN, bakxd->domain_avps_from);
551 552
 		set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, bakxd->domain_avps_to);
552 553
 #ifdef WITH_XAVP
553 554
 		xavp_set_list(bakxd->xavps_list);
... ...
@@ -555,19 +556,19 @@ void tm_xdata_replace(tm_xdata_t *newxd, tm_xlinks_t *bakxd)
555 556
 		return;
556 557
 	}
557 558
 
558
-	if(newxd!=NULL && bakxd!=NULL) {
559
-		bakxd->uri_avps_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI,
560
-				&newxd->uri_avps_from);
561
-		bakxd->uri_avps_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI,
562
-				&newxd->uri_avps_to);
563
-		bakxd->user_avps_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER,
564
-				&newxd->user_avps_from);
565
-		bakxd->user_avps_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER,
566
-				&newxd->user_avps_to);
567
-		bakxd->domain_avps_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN,
568
-				&newxd->domain_avps_from);
569
-		bakxd->domain_avps_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN,
570
-				&newxd->domain_avps_to);
559
+	if(newxd != NULL && bakxd != NULL) {
560
+		bakxd->uri_avps_from = set_avp_list(
561
+				AVP_TRACK_FROM | AVP_CLASS_URI, &newxd->uri_avps_from);
562
+		bakxd->uri_avps_to =
563
+				set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &newxd->uri_avps_to);
564
+		bakxd->user_avps_from = set_avp_list(
565
+				AVP_TRACK_FROM | AVP_CLASS_USER, &newxd->user_avps_from);
566
+		bakxd->user_avps_to = set_avp_list(
567
+				AVP_TRACK_TO | AVP_CLASS_USER, &newxd->user_avps_to);
568
+		bakxd->domain_avps_from = set_avp_list(
569
+				AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &newxd->domain_avps_from);
570
+		bakxd->domain_avps_to = set_avp_list(
571
+				AVP_TRACK_TO | AVP_CLASS_DOMAIN, &newxd->domain_avps_to);
571 572
 #ifdef WITH_XAVP
572 573
 		bakxd->xavps_list = xavp_set_list(&newxd->xavps_list);
573 574
 #endif
... ...
@@ -13,14 +13,14 @@
13 13
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 14
  * GNU General Public License for more details.
15 15
  *
16
- * You should have received a copy of the GNU General Public License 
17
- * along with this program; if not, write to the Free Software 
16
+ * You should have received a copy of the GNU General Public License
17
+ * along with this program; if not, write to the Free Software
18 18
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19 19
  *
20 20
  */
21 21
 
22 22
 /**  TM :: hash table, flags and other general defines.
23
- * @file 
23
+ * @file
24 24
  * @ingroup tm
25 25
  */
26 26
 
... ...
@@ -88,121 +88,126 @@ void lock_hash(int i);
88 88
 void unlock_hash(int i);
89 89
 
90 90
 
91
-#define NO_CANCEL       ( (char*) 0 )
92
-#define EXTERNAL_CANCEL ( (char*) -1)
91
+#define NO_CANCEL ((char *)0)
92
+#define EXTERNAL_CANCEL ((char *)-1)
93 93
 
94
-#define TYPE_LOCAL_ACK    -2
94
+#define TYPE_LOCAL_ACK -2
95 95
 #define TYPE_LOCAL_CANCEL -1
96
-#define TYPE_REQUEST       0
96
+#define TYPE_REQUEST 0
97 97
 
98 98
 /* to be able to assess whether a script writer forgot to
99
-   release a transaction and leave it for ever in memory,
100
-   we mark it with operations done over it; if none of these
101
-   flags is set and script is being left, it is a sign of
102
-   script error and we need to release on writer's
103
-   behalf
104
-
105
-   REQ_FWDED means there is a UAC with final response timer
106
-             ticking. If it hits, transaction will be completed.
107
-   REQ_RPLD means that a transaction has been replied -- either
108
-            it implies going to wait state, or for invite transactions
109
-            FR timer is ticking until ACK arrives
110
-   REQ_RLSD means that a transaction was put on wait explicitly
111
-            from t_release_transaction
112
-   REQ_EXIST means that this request is a retransmission which does not
113
-            affect transactional state
114
-   REQ_ERR_DELAYED mean that tm wants to send  reply(ser_error) but it
115
-            delayed it to end-of-script to allow it to be overriden.
116
-            If this is set and all of the above flag are not => send reply
117
-            on end of script. If any of the above flags is set, do not
118
-            send (especially REQ_RPLD and REQ_RLSD).
99
+ * release a transaction and leave it for ever in memory,
100
+ * we mark it with operations done over it; if none of these
101
+ * flags is set and script is being left, it is a sign of
102
+ * script error and we need to release on writer's
103
+ * behalf
104
+ *
105
+ * REQ_FWDED means there is a UAC with final response timer
106
+ *     ticking. If it hits, transaction will be completed.
107
+ * REQ_RPLD means that a transaction has been replied -- either
108
+ *     it implies going to wait state, or for invite transactions
109
+ *     FR timer is ticking until ACK arrives
110
+ * REQ_RLSD means that a transaction was put on wait explicitly
111
+ *     from t_release_transaction
112
+ * REQ_EXIST means that this request is a retransmission which does not
113
+ *     affect transactional state
114
+ * REQ_ERR_DELAYED mean that tm wants to send  reply(ser_error) but it
115
+ *     delayed it to end-of-script to allow it to be overriden.
116
+ *     If this is set and all of the above flag are not => send reply
117
+ *     on end of script. If any of the above flags is set, do not
118
+ *     send (especially REQ_RPLD and REQ_RLSD).
119 119
 */
120
-enum kill_reason { REQ_FWDED=1, REQ_RPLD=2, REQ_RLSD=4, REQ_EXIST=8,
121
-				   REQ_ERR_DELAYED=16 };
120
+enum kill_reason
121
+{
122
+	REQ_FWDED = 1,
123
+	REQ_RPLD = 2,
124
+	REQ_RLSD = 4,
125
+	REQ_EXIST = 8,
126
+	REQ_ERR_DELAYED = 16
127
+};
122 128
 
123 129
 
124 130
 /* #define F_RB_T_ACTIVE		0x01  (obsolete) fr or retr active */
125
-#define F_RB_T2				0x02
126
-#define F_RB_RETR_DISABLED	0x04 /* retransmission disabled */
127
-#define F_RB_FR_INV	0x08 /* timer switched to FR_INV */
128
-#define F_RB_TIMEOUT	0x10 /* timeout */
129
-#define F_RB_REPLIED	0x20 /* reply received */
130
-#define F_RB_CANCELED	0x40 /* rb/branch canceled */
131
-#define F_RB_DEL_TIMER	0x80 /* timer should be deleted if active */
132
-#define F_RB_NH_LOOSE	0x100 /* next hop is a loose router */
133
-#define F_RB_NH_STRICT	0x200 /* next hop is a strict router */
131
+#define F_RB_T2 0x02
132
+#define F_RB_RETR_DISABLED 0x04 /* retransmission disabled */
133
+#define F_RB_FR_INV 0x08		/* timer switched to FR_INV */
134
+#define F_RB_TIMEOUT 0x10		/* timeout */
135
+#define F_RB_REPLIED 0x20		/* reply received */
136
+#define F_RB_CANCELED 0x40		/* rb/branch canceled */
137
+#define F_RB_DEL_TIMER 0x80		/* timer should be deleted if active */
138
+#define F_RB_NH_LOOSE 0x100		/* next hop is a loose router */
139
+#define F_RB_NH_STRICT 0x200	/* next hop is a strict router */
134 140
 /* must detect when neither loose nor strict flag is set -> two flags.
135 141
  * alternatively, 1x flag for strict/loose and 1x for loose|strict set/not */
136
-#define F_RB_RELAYREPLY	0x400 /* branch under relay reply condition */
142
+#define F_RB_RELAYREPLY 0x400 /* branch under relay reply condition */
137 143
 
138 144
 
139 145
 /* if canceled or intended to be canceled, return true */
140
-#define uac_dont_fork(uac)	((uac)->local_cancel.buffer)
146
+#define uac_dont_fork(uac) ((uac)->local_cancel.buffer)
141 147
 
142 148
 
143 149
 typedef struct retr_buf
144 150
 {
145 151
 	short activ_type;
146 152
 	/* set to status code if the buffer is a reply,
147
-	0 if request or -1 if local CANCEL */
148
-	volatile unsigned short flags; /* DISABLED, T2 */
153
+	 * 0 if request or -1 if local CANCEL */
154
+	volatile unsigned short flags;   /* DISABLED, T2 */
149 155
 	volatile unsigned char t_active; /* timer active */
150
-	unsigned short branch; /* no more then 65k branches :-) */
156
+	unsigned short branch;			 /* no more than 64k branches */
151 157
 	int buffer_len;
152 158
 	char *buffer;
153 159
 	/*the cell that contains this retrans_buff*/
154
-	struct cell* my_T;
160
+	struct cell *my_T;
155 161
 	struct timer_ln timer;
156 162
 	struct dest_info dst;
157 163
 	ticks_t retr_expire;
158 164
 	ticks_t fr_expire; /* ticks value after which fr. will fire */
159
-}retr_buf_type;
160
-
165
+} retr_buf_type;
161 166
 
162 167
 
163 168
 /* User Agent Server content */
164 169
 
165 170
 typedef struct ua_server
166 171
 {
167
-	struct sip_msg   *request;
168
-	char             *end_request;
169
-	struct retr_buf  response;
170
-	/* keep to-tags for local 200 replies for INVITE -- 
172
+	struct sip_msg *request;
173
+	char *end_request;
174
+	struct retr_buf response;
175
+	/* keep to-tags for local 200 replies for INVITE --
171 176
 	 * we need them for dialog-wise matching of ACKs;
172 177
 	 * the pointer shows to shmem-ed reply */
173
-	str				 local_totag;
178
+	str local_totag;
174 179
 #ifdef CANCEL_REASON_SUPPORT
175
-	struct cancel_reason* cancel_reas; /* pointer to cancel reason, used
176
-										  for e2e cancels */
180
+	struct cancel_reason *cancel_reas; /* pointer to cancel reason, used
181
+										* for e2e cancels */
177 182
 #endif /* CANCEL_REASON_SUPPORT */
178
-	unsigned int     status;
179
-}ua_server_type;
180
-
183
+	unsigned int status;
184
+} ua_server_type;
181 185
 
182 186
 
183 187
 /* User Agent Client content */
184 188
 
185 189
 /* UAC internal flags */
186
-#define TM_UAC_FLAG_RR	(1)		/* Record-Route applied */
187
-#define TM_UAC_FLAG_R2	(1<<1)	/* 2nd Record-Route applied */
188
-#define TM_UAC_FLAG_FB	(1<<2)	/* Mark first entry in new branch set */
189
-#define TM_UAC_FLAG_BLIND	(1<<3)	/* A blind uac */
190
+#define TM_UAC_FLAG_RR (1)			/* Record-Route applied */
191
+#define TM_UAC_FLAG_R2 (1 << 1)		/* 2nd Record-Route applied */
192
+#define TM_UAC_FLAG_FB (1 << 2)		/* Mark first entry in new branch set */
193
+#define TM_UAC_FLAG_BLIND (1 << 3)	/* A blind uac */
190 194
 
191 195
 typedef struct ua_client
192 196
 {
193 197
 	/* if we store a reply (branch picking), this is where it is */
194
-	struct sip_msg  *reply;
195
-	char *end_reply;	/* pointer to end of sip_msg so we know the shm blocked used in clone...(used in async replies) */
196
-	struct retr_buf  request;
198
+	struct sip_msg *reply;
199
+	char *end_reply; /* pointer to end of sip_msg so we know the shm blocked
200
+						* used in clone...(used in async replies) */
201
+	struct retr_buf request;
197 202
 	/* we maintain a separate copy of cancel rather than
198
-	   reuse the structure for original request; the
199
-	   original request is no longer needed but its delayed
200
-	   timer may fire and interfere with whoever tries to
201
-	   rewrite it
203
+	 * reuse the structure for original request; the
204
+	 * original request is no longer needed but its delayed
205
+	 * timer may fire and interfere with whoever tries to
206
+	 * rewrite it
202 207
 	*/
203 208
 	struct retr_buf local_cancel;
204 209
 	/* pointer to retransmission buffer where uri is printed;
205
-	   good for generating ACK/CANCEL */
210
+	 * good for generating ACK/CANCEL */
206 211
 #ifdef USE_DNS_FAILOVER
207 212
 	struct dns_srv_handle dns_h;
208 213
 #endif
... ...
@@ -212,7 +217,7 @@ typedef struct ua_client
212 217
 	str ruid;
213 218
 	str location_ua;
214 219
 	/* if we don't store, we at least want to know the status */
215
-	int             last_received;
220
+	int last_received;
216 221
 
217 222
 	/* internal flags per tm uac */
218 223
 	unsigned int flags;
... ...
@@ -224,7 +229,7 @@ typedef struct ua_client
224 229
 #ifdef WITH_AS_SUPPORT
225 230
 	/**
226 231
 	 * Resent for every rcvd 2xx reply.
227
-	 * This member's as an alternative to passing the reply to the AS, 
232
+	 * This member's as an alternative to passing the reply to the AS,
228 233
 	 * every time a reply for local request is rcvd.
229 234
 	 * Member can not be union'ed with local_cancel, since CANCEL can happen
230 235
 	 * concurrently with a 2xx reply (to generate an ACK).
... ...
@@ -239,17 +244,20 @@ typedef struct ua_client
239 244
 	unsigned short on_reply;
240 245
 	/* unused - keep the structure aligned to 32b */
241 246
 	unsigned short on_unused;
242
-}ua_client_type;
247
+} ua_client_type;
243 248
 
244 249
 
245
-struct totag_elem {
250
+struct totag_elem
251
+{
246 252
 	struct totag_elem *next;
247 253
 	str tag;
248 254
 	volatile int acked;
249 255
 };
250 256
 
251
-/* structure for storing transaction state prior to suspending of async transactions */
252
-typedef struct async_state {
257
+/* structure for storing transaction state prior to suspending
258
+ * of async transactions */
259
+typedef struct async_state
260
+{
253 261
 	unsigned int backup_route;
254 262
 	unsigned int backup_branch;
255 263
 	unsigned int blind_uac;
... ...
@@ -258,45 +266,48 @@ typedef struct async_state {
258 266
 
259 267
 /* transaction's flags */
260 268
 /* is the transaction's request an INVITE? */
261
-#define T_IS_INVITE_FLAG     (1<<0)
269
+#define T_IS_INVITE_FLAG (1 << 0)
262 270
 /* is this a transaction generated by local request? */
263
-#define T_IS_LOCAL_FLAG      (1<<1)
271
+#define T_IS_LOCAL_FLAG (1 << 1)
264 272
 /* set to one if you want to disallow silent transaction
265
-   dropping when C timer hits */
266
-#define T_NOISY_CTIMER_FLAG  (1<<2)
273
+ * dropping when C timer hits */
274
+#define T_NOISY_CTIMER_FLAG (1 << 2)
267 275
 /* transaction canceled
268 276
  * WARNING: this flag can be set outside reply lock from e2e_cancel().
269 277
  * If a future flag could be affected by a race w/ e2e_cancel() the code
270 278
  * should be changed.*/
271
-#define T_CANCELED           (1<<3)
279
+#define T_CANCELED (1 << 3)
272 280
 /* 6xx received => stop forking */
273
-#define T_6xx            (1<<4) 
281
+#define T_6xx (1 << 4)
274 282
 
275
-#define T_IN_AGONY (1<<5) /* set if waiting to die (delete timer)
276
-                             TODO: replace it with del on unref */
277
-#define T_AUTO_INV_100 (1<<6) /* send an 100 reply automatically  to inv. */
283
+#define T_IN_AGONY (1 << 5)		/* set if waiting to die (delete timer)
284
+								 * TODO: replace it with del on unref */
285
+#define T_AUTO_INV_100 (1 << 6) /* send an 100 reply automatically  to inv. */
278 286
 #ifdef WITH_AS_SUPPORT
279
-	/* don't generate automatically an ACK for local transaction */
280
-#	define T_NO_AUTO_ACK	(1<<7)
287
+/* don't generate automatically an ACK for local transaction */
288
+#define T_NO_AUTO_ACK (1 << 7)
281 289
 #endif
282 290
 
283
-#define T_DISABLE_6xx (1<<8) /* treat 6xx as a normal reply */
284
-#define T_DISABLE_FAILOVER (1<<9) /* don't perform dns failover */
291
+#define T_DISABLE_6xx (1 << 8)		/* treat 6xx as a normal reply */
292
+#define T_DISABLE_FAILOVER (1 << 9) /* don't perform dns failover */
285 293
 #ifdef CANCEL_REASON_SUPPORT
286
-#define T_NO_E2E_CANCEL_REASON (1<<10) /* don't propagate CANCEL Reason */
287
-#endif /* CANCEL_REASON_SUPPORT */
288
-#define T_DONT_FORK   (T_CANCELED|T_6xx)
294
+#define T_NO_E2E_CANCEL_REASON (1 << 10) /* don't propagate CANCEL Reason */
295
+#endif									 /* CANCEL_REASON_SUPPORT */
296
+#define T_DONT_FORK (T_CANCELED | T_6xx)
289 297
 
290 298
 #ifdef WITH_AS_SUPPORT
291
-	/* provisional replies must trigger callbacks for local transaction */
292
-#	define T_PASS_PROVISIONAL_FLAG (1<<11)
293
-#	define pass_provisional(_t_)	((_t_)->flags&T_PASS_PROVISIONAL_FLAG)
299
+/* provisional replies must trigger callbacks for local transaction */
300
+#define T_PASS_PROVISIONAL_FLAG (1 << 11)
301
+#define pass_provisional(_t_) ((_t_)->flags & T_PASS_PROVISIONAL_FLAG)
294 302
 #endif
295
-#define T_ASYNC_CONTINUE (1<<12) /* Is this transaction in a continuation after being suspended */
303
+#define T_ASYNC_CONTINUE \
304
+	(1 << 12) /* Is this transaction in a continuation after being suspended */
296 305
 
297
-#define T_DISABLE_INTERNAL_REPLY (1<<13) /* don't send internal negative reply */
298
-#define T_ADMIN_REPLY (1<<14) /* t reply sent by admin (e.g., from cfg script) */
299
-#define T_ASYNC_SUSPENDED (1<<15)
306
+#define T_DISABLE_INTERNAL_REPLY \
307
+	(1 << 13) /* don't send internal negative reply */
308
+#define T_ADMIN_REPLY \
309
+	(1 << 14) /* t reply sent by admin (e.g., from cfg script) */
310
+#define T_ASYNC_SUSPENDED (1 << 15)
300 311
 
301 312
 /* unsigned short should be enough for a retr. timer: max. 65535 ms =>
302 313
  * max retr. = 65 s which should be enough and saves us 2*2 bytes */
... ...
@@ -345,12 +356,12 @@ typedef struct cell
345 356
 	/* linking data */
346 357
 	/* WARNING: don't move or change order of next_c or prev_c
347 358
 	 * or breakage will occur */
348
-	struct cell*     next_c;
349
-	struct cell*     prev_c;
359
+	struct cell *next_c;
360
+	struct cell *prev_c;
350 361
 	/* tells in which hash table entry the cell lives */
351
-	unsigned int  hash_index;
362
+	unsigned int hash_index;
352 363
 	/* sequence number within hash collision slot */
353
-	unsigned int  label;
364
+	unsigned int label;
354 365
 	/* different information about the transaction */
355 366
 	unsigned short flags;
356 367
 	/* number of forks */
... ...
@@ -362,34 +373,34 @@ typedef struct cell
362 373
 	 * is removed the ref_count should be decreased (via UNREF()).
363 374
 	 * This includes adding the cell to the hash table (REF() before adding)
364 375
 	 * and removing it from the hash table (UNREF_FREE() after unlinking).
365
-	 * Exception: it does not include starting/stopping timers (timers are 
376
+	 * Exception: it does not include starting/stopping timers (timers are
366 377
 	 * forced-stopped every time when ref_count reaches 0)
367 378
 	 * If the cell is no longer referenced (ref_count==0 after an UNREF),
368 379
 	 * it will be automatically deleted by the UNREF() operation.
369 380
 	 */
370 381
 	atomic_t ref_count;
371
-#else 
382
+#else
372 383
 	/* how many processes are currently processing this transaction ;
373
-	   note that only processes working on a request/reply belonging
374
-	   to a transaction increase ref_count -- timers don't, since we
375
-	   rely on transaction state machine to clean-up all but wait timer
376
-	   when entering WAIT state and the wait timer is the only place
377
-	   from which a transaction can be deleted (if ref_count==0); good
378
-	   for protecting from conditions in which wait_timer hits and
379
-	   tries to delete a transaction whereas at the same time 
380
-	   a delayed message belonging to the transaction is received */
384
+	 * note that only processes working on a request/reply belonging
385
+	 * to a transaction increase ref_count -- timers don't, since we
386
+	 * rely on transaction state machine to clean-up all but wait timer
387
+	 * when entering WAIT state and the wait timer is the only place
388
+	 * from which a transaction can be deleted (if ref_count==0); good
389
+	 * for protecting from conditions in which wait_timer hits and
390
+	 * tries to delete a transaction whereas at the same time
391
+	 * a delayed message belonging to the transaction is received */
381 392
 	volatile unsigned int ref_count;
382 393
 #endif
383 394
 
384 395
 	/* needed for generating local ACK/CANCEL for local
385
-	   transactions; all but cseq_n include the entire
386
-	   header field value, cseq_n only Cseq number; with
387
-	   local transactions, pointers point to outbound buffer,
388
-	   with proxied transactions to inbound request */
396
+	 * transactions; all but cseq_n include the entire
397
+	 * header field value, cseq_n only Cseq number; with
398
+	 * local transactions, pointers point to outbound buffer,
399
+	 * with proxied transactions to inbound request */
389 400
 	str from, callid, cseq_n, to;
390 401
 	/* method shortcut -- for local transactions, pointer to
391
-	   outbound buffer, for proxies transactions pointer to
392
-	   original message; needed for reply matching */
402
+	 * outbound buffer, for proxies transactions pointer to
403
+	 * original message; needed for reply matching */
393 404
 	str method;
394 405
 
395 406
 	/* head of callback list */
... ...
@@ -399,19 +410,19 @@ typedef struct cell
399 410
 	struct timer_ln wait_timer; /* used also for delete */
400 411
 
401 412
 	/* UA Server */
402
-	struct ua_server  uas;
413
+	struct ua_server uas;
403 414
 	/* UA Clients */
404
-	struct ua_client  *uac;
405
-	
415
+	struct ua_client *uac;
416
+
406 417
 	/* store transaction state to be used for async transactions */
407 418
 	struct async_state async_backup;
408
-	
409
-	/* to-tags of 200/INVITEs which were received from downstream and 
410
-	 * forwarded or passed to UAC; note that there can be arbitrarily 
419
+
420
+	/* to-tags of 200/INVITEs which were received from downstream and
421
+	 * forwarded or passed to UAC; note that there can be arbitrarily
411 422
 	 * many due to downstream forking; */
412 423
 	struct totag_elem *fwded_totags;
413 424
 
414
-	     /* lists with avps */
425
+	/* lists with avps */
415 426
 	struct usr_avp *uri_avps_from;
416 427
 	struct usr_avp *uri_avps_to;
417 428
 	struct usr_avp *user_avps_from;
... ...
@@ -431,10 +442,10 @@ typedef struct cell
431 442
 
432 443
 #ifdef ENABLE_ASYNC_MUTEX
433 444
 	/* protect against concurrent async continues */
434
-	ser_lock_t   async_mutex;
445
+	ser_lock_t async_mutex;
435 446
 #endif
436 447
 
437
-	ticks_t fr_timeout;     /* final response interval for retr_bufs */
448
+	ticks_t fr_timeout;		/* final response interval for retr_bufs */
438 449
 	ticks_t fr_inv_timeout; /* final inv. response interval for retr_bufs */
439 450
 #ifdef TM_DIFF_RT_TIMEOUT
440 451
 	retr_timeout_t rt_t1_timeout_ms; /* start retr. interval for retr_bufs */
... ...
@@ -452,9 +463,9 @@ typedef struct cell
452 463
 	unsigned short on_branch_failure;
453 464
 	/* the onreply_route to be processed if registered to do so */
454 465
 	unsigned short on_reply;
455
-	 /* The route to take for each downstream branch separately */
466
+	/* The route to take for each downstream branch separately */
456 467
 	unsigned short on_branch;
457
-	 /* branch route backup for late branch add (t_append_branch) */
468
+	/* branch route backup for late branch add (t_append_branch) */
458 469
 	unsigned short on_branch_delayed;
459 470
 
460 471
 	/* place holder for MD5checksum, MD5_LEN bytes are extra alloc'ed */
... ...
@@ -465,11 +476,11 @@ typedef struct cell
465 476
 
466 477
 #if 0
467 478
 /* warning: padding too much => big size increase */
468
-#define ENTRY_PAD_TO  128 /* should be a multiple of cacheline size for 
469
-                             best performance*/
470
-#define ENTRY_PAD_BYTES	 \
471
-	(ENTRY_PAD_TO-2*sizeof(struct cell*)+sizeof(ser_lock_t)+sizeof(int)+ \
472
-	 				2*sizeof(long))
479
+#define ENTRY_PAD_TO 128 /* should be a multiple of cacheline size for
480
+						 * best performance*/
481
+#define ENTRY_PAD_BYTES                                            \
482
+	(ENTRY_PAD_TO - 2 * sizeof(struct cell *) + sizeof(ser_lock_t) \
483
+			+ sizeof(int) + 2 * sizeof(long))
473 484
 #else
474 485
 #define ENTRY_PAD_BYTES 0
475 486
 #endif
... ...
@@ -479,128 +490,122 @@ typedef struct entry
479 490
 {
480 491
 	/* WARNING: don't move or change order of next_c or prev_c
481 492
 	 * or breakage will occur */
482
-	struct cell*    next_c; 
483
-	struct cell*    prev_c;
493
+	struct cell *next_c;
494
+	struct cell *prev_c;
484 495
 	/* sync mutex */
485
-	ser_lock_t      mutex;
496
+	ser_lock_t mutex;
486 497
 	atomic_t locker_pid; /* pid of the process that holds the lock */
487
-	int rec_lock_level; /* recursive lock count */
498
+	int rec_lock_level;  /* recursive lock count */
488 499
 	/* currently highest sequence number in a synonym list */
489
-	unsigned int    next_label;
500
+	unsigned int next_label;
490 501
 #ifdef TM_HASH_STATS
491 502
 	unsigned long acc_entries;
492 503
 	unsigned long cur_entries;
493 504
 #endif
494 505
 	char _pad[ENTRY_PAD_BYTES];
495
-}entry_type;
496
-
506
+} entry_type;
497 507
 
498 508
 
499 509
 /* transaction table */
500
-struct s_table
510
+typedef struct s_table
501 511
 {
502 512
 	/* table of hash entries; each of them is a list of synonyms  */
503
-	struct entry   entries[ TABLE_ENTRIES ];
504
-};
513
+	struct entry entries[TABLE_ENTRIES];
514
+} s_table_t;
505 515
 
506
-/* pointer to the big table where all the transaction data
507
-   lives */
508
-extern struct s_table*  _tm_table; /* private internal stuff, don't touch
509
-									  directly */
516
+/* pointer to the big table where all the transaction data lives */
517
+extern struct s_table *_tm_table; /* private internal stuff, don't touch
518
+								 * directly */
510 519
 
511 520
 #define list_entry(ptr, type, member) \
512
-	((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
521
+	((type *)((char *)(ptr) - (unsigned long)(&((type *)0)->member)))
513 522
 
514 523
 #define get_retr_timer_payload(_tl_) \
515
-	list_entry( _tl_, struct retr_buf, retr_timer)
516
-#define get_fr_timer_payload(_tl_) \
517
-	list_entry( _tl_, struct retr_buf, fr_timer)
518
-#define get_wait_timer_payload(_tl_) \
519
-	list_entry( _tl_, struct cell, wait_tl)
520
-#define get_dele_timer_payload(_tl_) \
521
-	list_entry( _tl_, struct cell, dele_tl)
524
+	list_entry(_tl_, struct retr_buf, retr_timer)
525
+#define get_fr_timer_payload(_tl_) list_entry(_tl_, struct retr_buf, fr_timer)
526
+#define get_wait_timer_payload(_tl_) list_entry(_tl_, struct cell, wait_tl)
527
+#define get_dele_timer_payload(_tl_) list_entry(_tl_, struct cell, dele_tl)
522 528
 
523 529
 #define get_T_from_reply_rb(_rb_) \
524
-	list_entry( list_entry( _rb_, (struct ua_server), response),\
525
-		struct cell, uas)
526
-#define get_T_from_request_rb(_rb_, _br_) \
530
+	list_entry(list_entry(_rb_, (struct ua_server), response), struct cell, uas)
531
+#define get_T_from_request_rb(_rb_, _br_)                        \
527 532
 	list_entry( list_entry( (rb_, (struct ua_client), request) - \
528 533
 		(_br_)*sizeof(struct retr_buf), struct cell, uas)
529
-#define get_T_from_cancel_rb(_rb_, _br_) \
534
+#define get_T_from_cancel_rb(_rb_, _br_)                              \
530 535
 	list_entry( list_entry( (rb_, (struct ua_client), local_cancel) - \
531 536
 		(_br_)*sizeof(struct retr_buf), struct cell, uas)
532 537
 
533
-#define is_invite(_t_)           ((_t_)->flags&T_IS_INVITE_FLAG)
534
-#define is_local(_t_)            ((_t_)->flags&T_IS_LOCAL_FLAG)
535
-#define has_noisy_ctimer(_t_)    ((_t_)->flags&T_NOISY_CTIMER_FLAG)
536
-#define was_cancelled(_t_)       ((_t_)->flags&T_CANCELED)
537
-#define no_new_branches(_t_)     ((_t_)->flags&T_6xx)
538
+#define is_invite(_t_) ((_t_)->flags & T_IS_INVITE_FLAG)
539
+#define is_local(_t_) ((_t_)->flags & T_IS_LOCAL_FLAG)
540
+#define has_noisy_ctimer(_t_) ((_t_)->flags & T_NOISY_CTIMER_FLAG)
541
+#define was_cancelled(_t_) ((_t_)->flags & T_CANCELED)
542
+#define no_new_branches(_t_) ((_t_)->flags & T_6xx)
538 543
 
539 544
 
540 545
 void reset_kr(void);
541
-void set_kr( enum kill_reason kr );
546
+void set_kr(enum kill_reason kr);
542 547
 enum kill_reason get_kr(void);
543 548
 
544 549
 #define get_tm_table() (_tm_table)
545 550
 
546
-typedef struct s_table* (*tm_get_table_f)(void);
547
-struct s_table* tm_get_table(void);
551
+typedef struct s_table *(*tm_get_table_f)(void);
552
+struct s_table *tm_get_table(void);
548 553
 
549
-struct s_table* init_hash_table(void);
550
-void   free_hash_table(void);
554
+struct s_table *init_hash_table(void);
555
+void free_hash_table(void);
551 556
 
552
-void   free_cell_helper(tm_cell_t* dead_cell, int silent, const char *fname, unsigned int fline);
557
+void free_cell_helper(tm_cell_t *dead_cell, int silent, const char *fname,
558
+		unsigned int fline);
553 559
 #define free_cell(t) free_cell_helper((t), 0, __FILE__, __LINE__)
554 560
 #define free_cell_silent(t) free_cell_helper((t), 1, __FILE__, __LINE__)
555 561
 
556
-struct cell*  build_cell( struct sip_msg* p_msg );
562
+struct cell *build_cell(struct sip_msg *p_msg);
557 563
 
558 564
 #ifdef TM_HASH_STATS
559
-unsigned int transaction_count( void );
565
+unsigned int transaction_count(void);
560 566
 #endif
561 567
 
562 568
 
563 569
 /*  Takes an already created cell and links it into hash table on the
564 570
  *  appropriate entry. */
565
-inline static void insert_into_hash_table_unsafe( struct cell * p_cell,
566
-													unsigned int hash )
571
+inline static void insert_into_hash_table_unsafe(
572
+		struct cell *p_cell, unsigned int hash)
567 573
 {
568 574
 	p_cell->label = _tm_table->entries[hash].next_label++;
569 575
 #ifdef EXTRA_DEBUG
570 576
 	DEBUG("cell label: %u\n", p_cell->label);
571 577
 #endif
572
-	p_cell->hash_index=hash;
578
+	p_cell->hash_index = hash;
573 579
 	/* insert at the beginning */
574 580
 	clist_insert(&_tm_table->entries[hash], p_cell, next_c, prev_c);
575 581
 
576
-	/* update stats */
582
+/* update stats */
577 583
 #ifdef TM_HASH_STATS
578 584
 	_tm_table->entries[hash].cur_entries++;
579 585
 	_tm_table->entries[hash].acc_entries++;
580 586
 #endif
581
-	t_stats_new( is_local(p_cell) );
587
+	t_stats_new(is_local(p_cell));
582 588
 }
583 589
 
584 590
 
585
-
586 591
 /*  Un-link a  cell from hash_table, but the cell itself is not released */
587
-inline static void remove_from_hash_table_unsafe( struct cell * p_cell)
592
+inline static void remove_from_hash_table_unsafe(struct cell *p_cell)
588 593
 {
589 594
 	clist_rm(p_cell, next_c, prev_c);
590 595
 	p_cell->next_c = 0;
591 596
 	p_cell->prev_c = 0;
592
-#	ifdef EXTRA_DEBUG
597
+#ifdef EXTRA_DEBUG
593 598
 #ifdef TM_HASH_STATS
594
-	if (_tm_table->entries[p_cell->hash_index].cur_entries==0){
599
+	if(_tm_table->entries[p_cell->hash_index].cur_entries == 0) {
595 600
 		LOG(L_CRIT, "BUG: bad things happened: cur_entries=0\n");
596 601
 		abort();
597 602
 	}
598 603
 #endif
599
-#	endif
604
+#endif
600 605
 #ifdef TM_HASH_STATS
601 606
 	_tm_table->entries[p_cell->hash_index].cur_entries--;
602 607
 #endif
603
-	t_stats_deleted( is_local(p_cell) );
608
+	t_stats_deleted(is_local(p_cell));
604 609
 }
605 610
 
606 611
 /**
... ...
@@ -611,5 +616,3 @@ void tm_xdata_swap(tm_cell_t *t, tm_xlinks_t *xd, int mode);
611 616
 void tm_xdata_replace(tm_xdata_t *newxd, tm_xlinks_t *bakxd);
612 617
 
613 618
 #endif
614
-
615
-
... ...
@@ -13,8 +13,8 @@
13 13
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 14
  * GNU General Public License for more details.
15 15
  *
16
- * You should have received a copy of the GNU General Public License 
17
- * along with this program; if not, write to the Free Software 
16
+ * You should have received a copy of the GNU General Public License
17
+ * along with this program; if not, write to the Free Software
18 18
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19 19
  */
20 20
 
... ...
@@ -28,35 +28,33 @@
28 28
 #include "../../core/dprint.h"
29 29
 
30 30
 
31
-
32
-#ifndef GEN_LOCK_T_PREFERED 
31
+#ifndef GEN_LOCK_T_PREFERED
33 32
 /* semaphore probing limits */
34
-#define SEM_MIN		16
35
-#define SEM_MAX		4096
33
+#define SEM_MIN 16
34
+#define SEM_MAX 4096
36 35
 
37 36
 /* we implement mutex here using lock sets; as the number of
38
-   semaphores may be limited (e.g. sysv) and number of synchronized 
39
-   elements high, we partition the synced SER elements and share 
40
-   semaphores in each of the partitions; we try to use as many 
41
-   semaphores as OS gives us for finest granularity. 
42
-
43
-   we allocate the locks according to the following plans:
44
-
45
-   1) we allocate a semaphore set for hash_entries and
46
-      try to use as many semaphores in it as OS allows;
47
-      we partition the hash_entries by available
48
-      semaphores which are shared  in each partition
49
-   2) cells get always the same semaphore as its hash
50
-      entry in which they live
51
-
52
-*/
37
+ * semaphores may be limited (e.g. sysv) and number of synchronized
38
+ * elements high, we partition the synced SER elements and share
39
+ * semaphores in each of the partitions; we try to use as many
40
+ * semaphores as OS gives us for finest granularity.
41
+ *
42
+ * we allocate the locks according to the following plans:
43
+ *
44
+ * 1) we allocate a semaphore set for hash_entries and
45
+ *    try to use as many semaphores in it as OS allows;
46
+ *    we partition the hash_entries by available
47
+ *    semaphores which are shared  in each partition
48
+ * 2) cells get always the same semaphore as its hash
49
+ *    entry in which they live
50
+ */
53 51
 
54 52
 /* and the maximum number of semaphores in the entry_semaphore set */
55 53
 static int sem_nr;
56
-gen_lock_set_t* entry_semaphore=0;
57
-gen_lock_set_t* reply_semaphore=0;
54
+gen_lock_set_t *entry_semaphore = 0;
55
+gen_lock_set_t *reply_semaphore = 0;
58 56
 #ifdef ENABLE_ASYNC_MUTEX
59
-gen_lock_set_t* async_semaphore=0;
57
+gen_lock_set_t *async_semaphore = 0;
60 58
 #endif
61 59
 #endif
62 60
 
... ...
@@ -71,99 +69,96 @@ int lock_initialize()
71 69
 #endif
72 70
 
73