Browse code

modules/tm: extended async usage - enables resuming of tx in orginal route block tx was suspended, not only failure route - dedicated lock to prevent multiple invocations of suspend on tz (reply lock used to be used) - extra flag (T_ASYNC_CONTINUE) to mark a transaction that is being execute post suspend

Jason Penton authored on 30/09/2013 09:14:54
Showing 8 changed files
... ...
@@ -91,6 +91,7 @@ struct cell;
91 91
 struct timer;
92 92
 struct retr_buf;
93 93
 struct ua_client;
94
+struct async_state;
94 95
 
95 96
 #include "../../mem/shm_mem.h"
96 97
 #include "lock.h"
... ...
@@ -271,7 +272,13 @@ struct totag_elem {
271 271
 	volatile int acked;
272 272
 };
273 273
 
274
-
274
+/* structure for storing transaction state prior to suspending of async transactions */
275
+typedef struct async_state {
276
+	unsigned int backup_route;
277
+	unsigned int backup_branch;
278
+	unsigned int blind_uac;
279
+	unsigned int ruri_new;
280
+} async_state_type;
275 281
 
276 282
 /* transaction's flags */
277 283
 /* is the transaction's request an INVITE? */
... ...
@@ -309,8 +316,9 @@ struct totag_elem {
309 309
 #	define T_PASS_PROVISIONAL_FLAG (1<<11)
310 310
 #	define pass_provisional(_t_)	((_t_)->flags&T_PASS_PROVISIONAL_FLAG)
311 311
 #endif
312
+#define T_ASYNC_CONTINUE (1<<12) /* Is this transaction in a continuation after being suspended */
312 313
 
313
-#define T_DISABLE_INTERNAL_REPLY (1<<12) /* don't send internal negative reply */
314
+#define T_DISABLE_INTERNAL_REPLY (1<<13) /* don't send internal negative reply */
314 315
 
315 316
 /* unsigned short should be enough for a retr. timer: max. 65535 ms =>
316 317
  * max retr. = 65 s which should be enough and saves us 2*2 bytes */
... ...
@@ -417,6 +425,9 @@ typedef struct cell
417 417
 	/* UA Clients */
418 418
 	struct ua_client  uac[ MAX_BRANCHES ];
419 419
 	
420
+	/* store transaction state to be used for async transactions */
421
+	struct async_state async_backup;
422
+	
420 423
 	/* to-tags of 200/INVITEs which were received from downstream and 
421 424
 	 * forwarded or passed to UAC; note that there can be arbitrarily 
422 425
 	 * many due to downstream forking; */
... ...
@@ -435,7 +446,9 @@ typedef struct cell
435 435
 
436 436
 	/* protection against concurrent reply processing */
437 437
 	ser_lock_t   reply_mutex;
438
-	
438
+	/* protect against concurrent async continues */
439
+	ser_lock_t   async_mutex;
440
+		
439 441
 	ticks_t fr_timeout;     /* final response interval for retr_bufs */
440 442
 	ticks_t fr_inv_timeout; /* final inv. response interval for retr_bufs */
441 443
 #ifdef TM_DIFF_RT_TIMEOUT
... ...
@@ -71,6 +71,7 @@
71 71
 static int sem_nr;
72 72
 gen_lock_set_t* entry_semaphore=0;
73 73
 gen_lock_set_t* reply_semaphore=0;
74
+gen_lock_set_t* async_semaphore=0;
74 75
 #endif
75 76
 
76 77
 
... ...
@@ -100,6 +101,10 @@ again:
100 100
 			lock_set_destroy(reply_semaphore);
101 101
 			lock_set_dealloc(reply_semaphore);
102 102
 		}
103
+		if (async_semaphore!=0){
104
+			lock_set_destroy(async_semaphore);
105
+			lock_set_dealloc(async_semaphore);
106
+		}
103 107
 		
104 108
 		if (i==0){
105 109
 			LOG(L_CRIT, "lock_initialize: could not allocate semaphore"
... ...
@@ -154,6 +159,20 @@ again:
154 154
 			i--;
155 155
 			goto again;
156 156
 	}
157
+	i++;
158
+	if (((async_semaphore=lock_set_alloc(i))==0)||
159
+		(lock_set_init(async_semaphore)==0)){
160
+			if (async_semaphore){
161
+				lock_set_dealloc(async_semaphore);
162
+				async_semaphore=0;
163
+			}
164
+			DBG("DEBUG:lock_initialize: async semaphore initialization"
165
+				" failure: %s\n", strerror(errno));
166
+			probe_run=1;
167
+			i--;
168
+			goto again;
169
+	}
170
+	
157 171
 
158 172
 	/* return success */
159 173
 	LOG(L_INFO, "INFO: semaphore arrays of size %d allocated\n", sem_nr );
... ...
@@ -193,7 +212,11 @@ void lock_cleanup()
193 193
 		lock_set_destroy(reply_semaphore);
194 194
 		lock_set_dealloc(reply_semaphore);
195 195
 	};
196
-	entry_semaphore =  reply_semaphore = 0;
196
+	if (async_semaphore !=0) {
197
+		lock_set_destroy(async_semaphore);
198
+		lock_set_dealloc(async_semaphore);
199
+	};
200
+	entry_semaphore =  reply_semaphore = async_semaphore = 0;
197 201
 
198 202
 }
199 203
 #endif /*GEN_LOCK_T_PREFERED*/
... ...
@@ -229,7 +252,16 @@ int init_entry_lock( struct s_table* ht, struct entry *entry )
229 229
 	return 0;
230 230
 }
231 231
 
232
-
232
+int init_async_lock( struct cell *cell )
233
+{
234
+#ifdef GEN_LOCK_T_PREFERED
235
+	lock_init(&cell->async_mutex);
236
+#else
237
+	cell->async_mutex.semaphore_set=async_semaphore;
238
+	cell->async_mutex.semaphore_index = cell->hash_index % sem_nr;
239
+#endif /* GEN_LOCK_T_PREFERED */
240
+	return 0;
241
+}
233 242
 
234 243
 int release_cell_lock( struct cell *cell )
235 244
 {
... ...
@@ -76,6 +76,7 @@ void lock_cleanup(void);
76 76
 
77 77
 int init_cell_lock( struct cell *cell );
78 78
 int init_entry_lock( struct s_table* ht, struct entry *entry );
79
+int init_async_lock( struct cell *cell );
79 80
 
80 81
 
81 82
 int release_cell_lock( struct cell *cell );
... ...
@@ -755,6 +755,11 @@ int add_blind_uac( /*struct cell *t*/ )
755 755
 	membar_write(); /* to allow lockless prepare_to_cancel() we want to be sure
756 756
 					   all the writes finished before updating branch number*/
757 757
 	t->nr_of_outgoings=(branch+1);
758
+	t->async_backup.blind_uac = branch; /* whenever we create a blind UAC, lets save the current branch
759
+					 * this is used in async tm processing specifically to be able to route replies
760
+					 * that were possibly in response to a request forwarded on this blind UAC......
761
+					 * we still want replies to be processed as if it were a normal UAC */
762
+	
758 763
 	/* start FR timer -- protocol set by default to PROTO_NONE,
759 764
        which means retransmission timer will not be started
760 765
     */
... ...
@@ -806,13 +806,13 @@ static int _reply( struct cell *trans, struct sip_msg* p_msg,
806 806
 	}
807 807
 }
808 808
 
809
-/** create or restore a "fake environment" for running a failure_route.
810
- *if msg is set -> it will fake the env. vars conforming with the msg; if NULL
809
+/** create or restore a "fake environment" for running a failure_route, 
810
+ * OR an "async environment" depending on is_async_value (0=std failure-faked, 1=async)
811
+ * if msg is set -> it will fake the env. vars conforming with the msg; if NULL
811 812
  * the env. will be restore to original.
812
- * Side-effect: mark_ruri_consumed().
813
+ * Side-effect: mark_ruri_consumed() for faked env only.
813 814
  */
814
-void faked_env( struct cell *t, struct sip_msg *msg)
815
-{
815
+void faked_env(struct cell *t, struct sip_msg *msg, int is_async_env) {
816 816
 	static int backup_route_type;
817 817
 	static struct cell *backup_t;
818 818
 	static int backup_branch;
... ...
@@ -835,64 +835,83 @@ void faked_env( struct cell *t, struct sip_msg *msg)
835 835
 		 * a shmem-ed replica of the request; advertise it in route type;
836 836
 		 * for example t_reply needs to know that
837 837
 		 */
838
-		backup_route_type=get_route_type();
839
-		set_route_type(FAILURE_ROUTE);
840
-		/* don't bother backing up ruri state, since failure route
841
-		   is called either on reply or on timer and in both cases
842
-		   the ruri should not be used again for forking */
843
-		ruri_mark_consumed(); /* in failure route we assume ruri
844
-								 should not be used again for forking */
838
+		backup_route_type = get_route_type();
839
+
840
+		if (is_async_env) {
841
+			set_route_type(t->async_backup.backup_route);
842
+			if (t->async_backup.ruri_new) {
843
+				ruri_mark_new();
844
+			}
845
+		} else {
846
+			set_route_type(FAILURE_ROUTE);
847
+			/* don't bother backing up ruri state, since failure route
848
+			   is called either on reply or on timer and in both cases
849
+			   the ruri should not be used again for forking */
850
+			ruri_mark_consumed(); /* in failure route we assume ruri
851
+								     should not be used again for forking */
852
+		}
845 853
 		/* also, tm actions look in beginning whether transaction is
846 854
 		 * set -- whether we are called from a reply-processing
847 855
 		 * or a timer process, we need to set current transaction;
848 856
 		 * otherwise the actions would attempt to look the transaction
849 857
 		 * up (unnecessary overhead, refcounting)
850 858
 		 */
851
-		/* backup */
852
-		backup_t=get_t();
853
-		backup_branch=get_t_branch();
854
-		backup_msgid=global_msg_id;
855
-		/* fake transaction and message id */
856
-		global_msg_id=msg->id;
857
-		set_t(t, T_BR_UNDEFINED);
858
-		/* make available the avp list from transaction */
859
-
860
-		backup_uri_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from );
861
-		backup_uri_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to );
862
-		backup_user_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from );
863
-		backup_user_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to );
864
-		backup_domain_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from );
865
-		backup_domain_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to );
859
+		if (!is_async_env) {
860
+			/* backup */
861
+			backup_t = get_t();
862
+			backup_branch = get_t_branch();
863
+			backup_msgid = global_msg_id;
864
+			/* fake transaction and message id */
865
+			global_msg_id = msg->id;
866
+			set_t(t, T_BR_UNDEFINED);
867
+
868
+			/* make available the avp list from transaction */
869
+			backup_uri_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, &t->uri_avps_from);
870
+			backup_uri_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, &t->uri_avps_to);
871
+			backup_user_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, &t->user_avps_from);
872
+			backup_user_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, &t->user_avps_to);
873
+			backup_domain_from = set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, &t->domain_avps_from);
874
+			backup_domain_to = set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, &t->domain_avps_to);
866 875
 #ifdef WITH_XAVP
867
-		backup_xavps = xavp_set_list(&t->xavps_list);
876
+			backup_xavps = xavp_set_list(&t->xavps_list);
868 877
 #endif
869
-		/* set default send address to the saved value */
870
-		backup_si=bind_address;
871
-		bind_address=t->uac[0].request.dst.send_sock;
872
-		/* backup lump lists */
873
-		backup_add_rm = t->uas.request->add_rm;
874
-		backup_body_lumps = t->uas.request->body_lumps;
875
-		backup_reply_lump = t->uas.request->reply_lump;
878
+			/* set default send address to the saved value */
879
+			backup_si = bind_address;
880
+			bind_address = t->uac[0].request.dst.send_sock;
881
+			/* backup lump lists */
882
+			backup_add_rm = t->uas.request->add_rm;
883
+			backup_body_lumps = t->uas.request->body_lumps;
884
+			backup_reply_lump = t->uas.request->reply_lump;
885
+		} else {
886
+			global_msg_id = msg->id;
887
+			set_t(t, t->async_backup.backup_branch);
888
+		}
876 889
 	} else {
877
-		/* restore original environment */
878
-		set_t(backup_t, backup_branch);
879
-		global_msg_id=backup_msgid;
880
-		set_route_type(backup_route_type);
881
-		/* restore original avp list */
882
-		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, backup_user_from );
883
-		set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, backup_user_to );
884
-		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, backup_domain_from );
885
-		set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, backup_domain_to );
886
-		set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, backup_uri_from );
887
-		set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, backup_uri_to );
890
+		if (!is_async_env) {
891
+			/* restore original environment */
892
+			set_t(backup_t, backup_branch);
893
+			global_msg_id = backup_msgid;
894
+			set_route_type(backup_route_type);
895
+			/* restore original avp list */
896
+			set_avp_list(AVP_TRACK_FROM | AVP_CLASS_USER, backup_user_from);
897
+			set_avp_list(AVP_TRACK_TO | AVP_CLASS_USER, backup_user_to);
898
+			set_avp_list(AVP_TRACK_FROM | AVP_CLASS_DOMAIN, backup_domain_from);
899
+			set_avp_list(AVP_TRACK_TO | AVP_CLASS_DOMAIN, backup_domain_to);
900
+			set_avp_list(AVP_TRACK_FROM | AVP_CLASS_URI, backup_uri_from);
901
+			set_avp_list(AVP_TRACK_TO | AVP_CLASS_URI, backup_uri_to);
888 902
 #ifdef WITH_XAVP
889
-		xavp_set_list(backup_xavps);
903
+			xavp_set_list(backup_xavps);
890 904
 #endif
891
-		bind_address=backup_si;
892
-		/* restore lump lists */
893
-		t->uas.request->add_rm = backup_add_rm;
894
-		t->uas.request->body_lumps = backup_body_lumps;
895
-		t->uas.request->reply_lump = backup_reply_lump;
905
+			bind_address = backup_si;
906
+			/* restore lump lists */
907
+			t->uas.request->add_rm = backup_add_rm;
908
+			t->uas.request->body_lumps = backup_body_lumps;
909
+			t->uas.request->reply_lump = backup_reply_lump;
910
+		} else {
911
+			/*we don't need to restore anything as there was no "environment" prior 
912
+						    to continuing (we are in a different process)*/
913
+			LM_WARN("nothing to restore in async continue, useless call\n");
914
+		}
896 915
 	}
897 916
 }
898 917
 
... ...
@@ -1031,7 +1050,7 @@ int run_failure_handlers(struct cell *t, struct sip_msg *rpl,
1031 1031
 		return 0;
1032 1032
 	}
1033 1033
 	/* fake also the env. conforming to the fake msg */
1034
-	faked_env( t, &faked_req);
1034
+	faked_env( t, &faked_req, 0);
1035 1035
 	/* DONE with faking ;-) -> run the failure handlers */
1036 1036
 
1037 1037
 	if (unlikely(has_tran_tmcbs( t, TMCB_ON_FAILURE)) ) {
... ...
@@ -1053,7 +1072,7 @@ int run_failure_handlers(struct cell *t, struct sip_msg *rpl,
1053 1053
 	}
1054 1054
 
1055 1055
 	/* restore original environment and free the fake msg */
1056
-	faked_env( t, 0);
1056
+	faked_env( t, 0, 0);
1057 1057
 	free_faked_req(&faked_req,t);
1058 1058
 
1059 1059
 	/* if failure handler changed flag, update transaction context */
... ...
@@ -1092,7 +1111,7 @@ int run_branch_failure_handlers(struct cell *t, struct sip_msg *rpl,
1092 1092
 		return 0;
1093 1093
 	}
1094 1094
 	/* fake also the env. conforming to the fake msg */
1095
-	faked_env( t, &faked_req);
1095
+	faked_env( t, &faked_req, 0);
1096 1096
 	set_route_type(BRANCH_FAILURE_ROUTE);
1097 1097
 	set_t(t, picked_branch);
1098 1098
 	/* DONE with faking ;-) -> run the branch_failure handlers */
... ...
@@ -1113,7 +1132,7 @@ int run_branch_failure_handlers(struct cell *t, struct sip_msg *rpl,
1113 1113
 	}
1114 1114
 
1115 1115
 	/* restore original environment and free the fake msg */
1116
-	faked_env( t, 0);
1116
+	faked_env( t, 0, 0);
1117 1117
 	free_faked_req(&faked_req,t);
1118 1118
 
1119 1119
 	/* if branch_failure handler changed flag, update transaction context */
... ...
@@ -1203,8 +1222,8 @@ int t_pick_branch(int inc_branch, int inc_code, struct cell *t, int *res_code)
1203 1203
 		 * to be a pending, incomplete branch. */
1204 1204
 		if ((!t->uac[b].request.buffer) && (t->uac[b].last_received>=200))
1205 1205
 			continue;
1206
-		/* there is still an unfinished UAC transaction; wait now! */
1207
-		if ( t->uac[b].last_received<200 )
1206
+		/* there is still an unfinished UAC transaction (we ignore unfinished blind UACs) wait now! */
1207
+		if ( t->uac[b].last_received<200 && !((t->flags&T_ASYNC_CONTINUE) && b==t->async_backup.blind_uac))
1208 1208
 			return -2;
1209 1209
 		/* if reply is null => t_send_branch "faked" reply, skip over it */
1210 1210
 		if ( rpl && 
... ...
@@ -235,7 +235,7 @@ void t_drop_replies(int v);
235 235
 
236 236
 void rpc_reply(rpc_t* rpc, void* c);
237 237
 
238
-void faked_env( struct cell *t,struct sip_msg *msg);
238
+void faked_env( struct cell *t,struct sip_msg *msg, int is_async_env);
239 239
 int fake_req(struct sip_msg *faked_req,
240 240
 		struct sip_msg *shmem_msg, int extra_flags, struct ua_client *uac);
241 241
 
... ...
@@ -33,6 +33,7 @@
33 33
 
34 34
 #include "../../action.h"
35 35
 #include "../../script_cb.h"
36
+#include "../../dset.h"
36 37
 
37 38
 #include "config.h"
38 39
 #include "sip_msg.h"
... ...
@@ -104,6 +105,12 @@ int t_suspend(struct sip_msg *msg,
104 104
 		return -1;
105 105
 	}
106 106
 
107
+	/* backup some extra info that can be used in continuation logic */
108
+	t->async_backup.backup_route = get_route_type();
109
+	t->async_backup.backup_branch = get_t_branch();
110
+	t->async_backup.ruri_new = ruri_get_forking_state();
111
+
112
+
107 113
 	return 0;
108 114
 }
109 115
 
... ...
@@ -140,14 +147,29 @@ int t_continue(unsigned int hash_index, unsigned int label,
140 140
 
141 141
 	/* The transaction has to be locked to protect it
142 142
 	 * form calling t_continue() multiple times simultaneously */
143
-	LOCK_REPLIES(t);
144
-
145
-	/* Try to find the blind UAC, and cancel its fr timer.
146
-	 * We assume that the last blind uac called t_continue(). */
147
-	for (	branch = t->nr_of_outgoings-1;
148
-		branch >= 0 && t->uac[branch].request.buffer;
149
-		branch--);
150
-
143
+	LOCK_ASYNC_CONTINUE(t);
144
+	t->flags |= T_ASYNC_CONTINUE;   /* we can now know anywhere in kamailio 
145
+					 * that we are executing post a suspend */
146
+	
147
+	/* which route block type were we in when we were suspended */
148
+	int cb_type = REQUEST_CB_TYPE;
149
+        switch (t->async_backup.backup_route) {
150
+            case REQUEST_ROUTE:
151
+                cb_type = REQUEST_CB_TYPE;
152
+                break;
153
+            case FAILURE_ROUTE:
154
+                cb_type = FAILURE_CB_TYPE;
155
+                break;
156
+            case TM_ONREPLY_ROUTE:
157
+                 cb_type = ONREPLY_CB_TYPE;
158
+                break;
159
+            case BRANCH_ROUTE:
160
+                cb_type = BRANCH_CB_TYPE;
161
+                break;
162
+        }
163
+	
164
+	branch = t->async_backup.blind_uac;	/* get the branch of the blind UAC setup 
165
+						 * during suspend */
151 166
 	if (branch >= 0) {
152 167
 		stop_rb_timers(&t->uac[branch].request);
153 168
 
... ...
@@ -155,20 +177,15 @@ int t_continue(unsigned int hash_index, unsigned int label,
155 155
 			/* Either t_continue() has already been
156 156
 			 * called or the branch has already timed out.
157 157
 			 * Needless to continue. */
158
-			UNLOCK_REPLIES(t);
158
+			UNLOCK_ASYNC_CONTINUE(t);
159 159
 			UNREF(t); /* t_unref would kill the transaction */
160 160
 			return 1;
161 161
 		}
162 162
 
163
-		/* Set last_received to something >= 200,
164
-		 * the actual value does not matter, the branch
165
-		 * will never be picked up for response forwarding.
166
-		 * If last_received is lower than 200,
167
-		 * then the branch may tried to be cancelled later,
168
-		 * for example when t_reply() is called from
169
-		 * a failure route => deadlock, because both
170
-		 * of them need the reply lock to be held. */
171
-		t->uac[branch].last_received=500;
163
+		/*we really don't need this next line anymore otherwise we will 
164
+		  never be able to forward replies after a (t_relay) on this branch.
165
+		  We want to try and treat this branch as 'normal' (as if it were a normal req, not async)' */
166
+		//t->uac[branch].last_received=500;
172 167
 		uac = &t->uac[branch];
173 168
 	}
174 169
 	/* else
... ...
@@ -183,22 +200,19 @@ int t_continue(unsigned int hash_index, unsigned int label,
183 183
 		ret = -1;
184 184
 		goto kill_trans;
185 185
 	}
186
-	faked_env( t, &faked_req);
186
+	faked_env( t, &faked_req, 1);
187 187
 
188
-	/* The sip msg is a faked msg just like in failure route
189
-	 * therefore execute the pre- and post-script callbacks
190
-	 * of failure route (Miklos)
191
-	 */
192
-	if (exec_pre_script_cb(&faked_req, FAILURE_CB_TYPE)>0) {
188
+	/* execute the pre/post -script callbacks based on original route block */
189
+	if (exec_pre_script_cb(&faked_req, cb_type)>0) {
193 190
 		if (run_top_route(route, &faked_req, 0)<0)
194 191
 			LOG(L_ERR, "ERROR: t_continue: Error in run_top_route\n");
195
-		exec_post_script_cb(&faked_req, FAILURE_CB_TYPE);
192
+		exec_post_script_cb(&faked_req, cb_type);
196 193
 	}
197 194
 
198 195
 	/* TODO: save_msg_lumps should clone the lumps to shm mem */
199 196
 
200 197
 	/* restore original environment and free the fake msg */
201
-	faked_env( t, 0);
198
+	faked_env( t, 0, 1);
202 199
 	free_faked_req(&faked_req, t);
203 200
 
204 201
 	/* update the flags */
... ...
@@ -224,7 +238,7 @@ int t_continue(unsigned int hash_index, unsigned int label,
224 224
 		}
225 225
 	}
226 226
 
227
-	UNLOCK_REPLIES(t);
227
+	UNLOCK_ASYNC_CONTINUE(t);
228 228
 
229 229
 	/* unref the transaction */
230 230
 	t_unref(t->uas.request);
... ...
@@ -241,10 +255,10 @@ kill_trans:
241 241
 			"reply generation failed\n");
242 242
 		/* The transaction must be explicitely released,
243 243
 		 * no more timer is running */
244
-		UNLOCK_REPLIES(t);
244
+		UNLOCK_ASYNC_CONTINUE(t);
245 245
 		t_release_transaction(t);
246 246
 	} else {
247
-		UNLOCK_REPLIES(t);
247
+		UNLOCK_ASYNC_CONTINUE(t);
248 248
 	}
249 249
 
250 250
 	t_unref(t->uas.request);
... ...
@@ -29,6 +29,9 @@
29 29
 #ifndef _T_SUSPEND_H
30 30
 #define _T_SUSPEND_H
31 31
 
32
+#define LOCK_ASYNC_CONTINUE(_t) lock(&(_t)->async_mutex )
33
+#define UNLOCK_ASYNC_CONTINUE(_t) unlock(&(_t)->async_mutex )
34
+
32 35
 int t_suspend(struct sip_msg *msg,
33 36
 		unsigned int *hash_index, unsigned int *label);
34 37
 typedef int (*t_suspend_f)(struct sip_msg *msg,