Browse code

sctp: connection reuse & connection tracking

- support for sctp connection tracking and true sctp connection
reuse for replies.
We need this to support reply connection reuse with asymmetric
sctp peers (while there is no reason not to use the same port on
sctp for sending and receiving messages and it should be
strongly discouraged to do so, we never now with what
implementation we'll have to deal).
What makes this particularly complex is the not-yet-complete
sctp API, the slight but important behaviour
differences between its linux, freebsd and solaris
implementations and the fact that is more geared towards serial
single threaded application rather then parallel-processing
multi-process or multi-threaded ones.

- keep track of the number of active and tracked sctp connections

- blacklist moved into sctp_handles_assoc_change()

Andrei Pelinescu-Onciul authored on 04/03/2009 20:56:00
Showing 5 changed files
... ...
@@ -641,11 +641,20 @@ int forward_reply(struct sip_msg* msg)
641 641
 	dst.comp=msg->via2->comp_no;
642 642
 #endif
643 643
 
644
+#if defined USE_TCP || defined USE_SCTP
645
+	if (
644 646
 #ifdef USE_TCP
645
-	if (dst.proto==PROTO_TCP
647
+			dst.proto==PROTO_TCP
646 648
 #ifdef USE_TLS
647 649
 			|| dst.proto==PROTO_TLS
648 650
 #endif
651
+#ifdef USE_SCTP
652
+			||
653
+#endif /* USE_SCTP */
654
+#endif /* USE_TCP */
655
+#ifdef USE_SCTP
656
+			dst.proto==PROTO_SCTP
657
+#endif /* USE_SCTP */
649 658
 			){
650 659
 		/* find id in i param if it exists */
651 660
 		if (msg->via1->i && msg->via1->i->value.s){
... ...
@@ -1959,6 +1959,14 @@ try_again:
1959 1959
 		}
1960 1960
 	}
1961 1961
 #endif /* USE_TCP */
1962
+#ifdef USE_SCTP
1963
+	if (!sctp_disable){
1964
+		if (init_sctp()<0){
1965
+			LOG(L_CRIT, "Could not initialize sctp, exiting...\n");
1966
+			goto error;
1967
+		}
1968
+	}
1969
+#endif /* USE_SCTP */
1962 1970
 	/* init_daemon? */
1963 1971
 	if (!dont_fork){
1964 1972
 		if ( daemonize(argv[0]) <0 ) goto error;
... ...
@@ -2287,7 +2287,7 @@ char* create_via_hf( unsigned int *len,
2287 2287
 	char* via;
2288 2288
 	str extra_params;
2289 2289
 	struct hostport hp;
2290
-#ifdef USE_TCP
2290
+#if defined USE_TCP || defined USE_SCTP
2291 2291
 	char* id_buf;
2292 2292
 	unsigned int id_len;
2293 2293
 
... ...
@@ -2299,13 +2299,21 @@ char* create_via_hf( unsigned int *len,
2299 2299
 	extra_params.s=0;
2300 2300
 
2301 2301
 
2302
-#ifdef USE_TCP
2302
+#if defined USE_TCP || defined USE_SCTP
2303 2303
 	/* add id if tcp */
2304
-	if (msg
2305
-	&& ((msg->rcv.proto==PROTO_TCP)
2304
+	if (msg && (
2305
+#ifdef USE_TCP
2306
+		(msg->rcv.proto==PROTO_TCP)
2306 2307
 #ifdef USE_TLS
2307 2308
 			|| (msg->rcv.proto==PROTO_TLS)
2308 2309
 #endif
2310
+#ifdef USE_SCTP
2311
+			||
2312
+#endif /* USE_SCTP */
2313
+#endif /* USE_TCP */
2314
+#ifdef USE_SCTP
2315
+			(msg->rcv.proto==PROTO_SCTP)
2316
+#endif /* USE_SCTP */
2309 2317
 			)){
2310 2318
 		if  ((id_buf=id_builder(msg, &id_len))==0){
2311 2319
 			LOG(L_ERR, "ERROR: create_via_hf:"
... ...
@@ -2318,13 +2326,13 @@ char* create_via_hf( unsigned int *len,
2318 2318
 		extra_params.s=id_buf;
2319 2319
 		extra_params.len=id_len;
2320 2320
 	}
2321
-#endif
2321
+#endif /* USE_TCP || USE_SCTP */
2322 2322
 
2323 2323
 	set_hostport(&hp, msg);
2324 2324
 	via = via_builder( len, send_info, branch,
2325 2325
 							extra_params.len?&extra_params:0, &hp);
2326 2326
 
2327
-#ifdef USE_TCP
2327
+#if defined USE_TCP || defined USE_SCTP
2328 2328
 	/* we do not need id_buf any more, the id is already in the new via header */
2329 2329
 	if (id_buf) pkg_free(id_buf);
2330 2330
 #endif
... ...
@@ -53,9 +53,15 @@
53 53
 #ifdef USE_DST_BLACKLIST
54 54
 #include "dst_blacklist.h"
55 55
 #endif /* USE_DST_BLACKLIST */
56
+#include "timer_ticks.h"
57
+#include "clist.h"
58
+#include "error.h"
59
+#include "timer.h"
56 60
 
57 61
 
58 62
 
63
+static atomic_t* sctp_conn_no;
64
+
59 65
 /* check if the underlying OS supports sctp
60 66
    returns 0 if yes, -1 on error */
61 67
 int sctp_check_support()
... ...
@@ -521,6 +527,1004 @@ error:
521 521
 #endif /* USE_SCTP_OO */
522 522
 
523 523
 
524
+#define SCTP_CONN_REUSE /* FIXME */
525
+#ifdef SCTP_CONN_REUSE
526
+
527
+/* we  need SCTP_ADDR_HASH for being able to make inquires related to existing
528
+   sctp association to a particular address  (optional) */
529
+/*#define SCTP_ADDR_HASH*/
530
+
531
+#define SCTP_ID_HASH_SIZE 1024 /* must be 2^k */
532
+#define SCTP_ASSOC_HASH_SIZE 1024 /* must be 2^k */
533
+#define SCTP_ADDR_HASH_SIZE 1024 /* must be 2^k */
534
+
535
+/* lock method */
536
+#ifdef GEN_LOCK_T_UNLIMITED
537
+#define SCTP_HASH_LOCK_PER_BUCKET
538
+#elif defined GEN_LOCK_SET_T_UNLIMITED
539
+#define SCTP_HASH_LOCK_SET
540
+#else
541
+#define SCTP_HASH_ONE_LOCK
542
+#endif
543
+
544
+
545
+#ifdef SCTP_HASH_LOCK_PER_BUCKET
546
+/* lock included in the hash bucket */
547
+#define LOCK_SCTP_ID_H(h)		lock_get(&sctp_con_id_hash[(h)].lock)
548
+#define UNLOCK_SCTP_ID_H(h)		lock_release(&sctp_con_id_hash[(h)].lock)
549
+#define LOCK_SCTP_ASSOC_H(h)	lock_get(&sctp_con_assoc_hash[(h)].lock)
550
+#define UNLOCK_SCTP_ASSOC_H(h)	lock_release(&sctp_con_assoc_hash[(h)].lock)
551
+#define LOCK_SCTP_ADDR_H(h)		lock_get(&sctp_con_addr_hash[(h)].lock)
552
+#define UNLOCK_SCTP_ADDR_H(h)	lock_release(&sctp_con_addr_hash[(h)].lock)
553
+#elif defined SCTP_HASH_LOCK_SET
554
+static gen_lock_set_t* sctp_con_id_h_lock_set=0;
555
+static gen_lock_set_t* sctp_con_assoc_h_lock_set=0;
556
+static gen_lock_set_t* sctp_con_addr_h_lock_set=0;
557
+#define LOCK_SCTP_ID_H(h)		lock_set_get(sctp_con_id_h_lock_set, (h))
558
+#define UNLOCK_SCTP_ID_H(h)		lock_set_release(sctp_con_id_h_lock_set, (h))
559
+#define LOCK_SCTP_ASSOC_H(h)	lock_set_get(sctp_con_assoc_h_lock_set, (h))
560
+#define UNLOCK_SCTP_ASSOC_H(h)	\
561
+	lock_set_release(sctp_con_assoc_h_lock_set, (h))
562
+#define LOCK_SCTP_ADDR_H(h)	lock_set_get(sctp_con_addr_h_lock_set, (h))
563
+#define UNLOCK_SCTP_ADDR_H(h)	lock_set_release(sctp_con_addr_h_lock_set, (h))
564
+#else /* use only one lock */
565
+static gen_lock_t* sctp_con_id_h_lock=0;
566
+static gen_lock_t* sctp_con_assoc_h_lock=0;
567
+static gen_lock_t* sctp_con_addr_h_lock=0;
568
+#define LOCK_SCTP_ID_H(h)		lock_get(sctp_con_id_h_lock)
569
+#define UNLOCK_SCTP_ID_H(h)		lock_release(sctp_con_id_hlock)
570
+#define LOCK_SCTP_ASSOC_H(h)	lock_get(sctp_con_assoc_h_lock)
571
+#define UNLOCK_SCTP_ASSOC_H(h)	lock_release(sctp_con_assoc_h_lock)
572
+#define LOCK_SCTP_ADDR_H(h)	lock_get(sctp_con_addr_h_lock)
573
+#define UNLOCK_SCTP_ADDR_H(h)	lock_release(sctp_con_addr_h_lock)
574
+#endif /* SCTP_HASH_LOCK_PER_BUCKET */
575
+
576
+
577
+/* sctp connection flags */
578
+#define SCTP_CON_UP_SEEN   1
579
+#define SCTP_CON_RCV_SEEN  2
580
+#define SCTP_CON_DOWN_SEEN 4
581
+
582
+struct sctp_connection{
583
+	unsigned int id;       /**< ser unique global id */
584
+	unsigned int assoc_id; /**< sctp assoc id (can be reused for new assocs)*/
585
+	struct socket_info* si; /**< local socket used */
586
+	unsigned flags; /**< internal flags UP_SEEN, RCV_SEEN, DOWN_SEEN */
587
+	ticks_t start;
588
+	ticks_t expire; 
589
+	union sockaddr_union remote; /**< remote ip & port */
590
+};
591
+
592
+struct sctp_lst_connector{
593
+	/* id hash */
594
+	struct sctp_con_elem* next_id;
595
+	struct sctp_con_elem* prev_id;
596
+	/* assoc hash */
597
+	struct sctp_con_elem* next_assoc;
598
+	struct sctp_con_elem* prev_assoc;
599
+#ifdef SCTP_ADDR_HASH
600
+	/* addr hash */
601
+	struct sctp_con_elem* next_addr;
602
+	struct sctp_con_elem* prev_addr;
603
+#endif /* SCTP_ADDR_HASH */
604
+};
605
+
606
+struct sctp_con_elem{
607
+	struct sctp_lst_connector l; /* must be first */
608
+	atomic_t refcnt;
609
+	/* data */
610
+	struct sctp_connection con;
611
+};
612
+
613
+struct sctp_con_id_hash_head{
614
+	struct sctp_lst_connector l; /* must be first */
615
+#ifdef SCTP_HASH_LOCK_PER_BUCKET
616
+	gen_lock_t lock;
617
+#endif /* SCTP_HASH_LOCK_PER_BUCKET */
618
+};
619
+
620
+struct sctp_con_assoc_hash_head{
621
+	struct sctp_lst_connector l; /* must be first */
622
+#ifdef SCTP_HASH_LOCK_PER_BUCKET
623
+	gen_lock_t lock;
624
+#endif /* SCTP_HASH_LOCK_PER_BUCKET */
625
+};
626
+
627
+#ifdef SCTP_ADDR_HASH
628
+struct sctp_con_addr_hash_head{
629
+	struct sctp_lst_connector l; /* must be first */
630
+#ifdef SCTP_HASH_LOCK_PER_BUCKET
631
+	gen_lock_t lock;
632
+#endif /* SCTP_HASH_LOCK_PER_BUCKET */
633
+};
634
+#endif /* SCTP_ADDR_HASH */
635
+
636
+static struct sctp_con_id_hash_head*     sctp_con_id_hash;
637
+static struct sctp_con_assoc_hash_head*  sctp_con_assoc_hash;
638
+#ifdef SCTP_ADDR_HASH
639
+static struct sctp_con_addr_hash_head*  sctp_con_addr_hash;
640
+#endif /* SCTP_ADDR_HASH */
641
+
642
+static atomic_t* sctp_id;
643
+static atomic_t* sctp_conn_tracked;
644
+
645
+
646
+#define get_sctp_con_id_hash(id) ((id) % SCTP_ID_HASH_SIZE)
647
+#define get_sctp_con_assoc_hash(assoc_id)  ((assoc_id) % SCTP_ASSOC_HASH_SIZE)
648
+#ifdef SCTP_ADDR_HASH
649
+static inline unsigned get_sctp_con_addr_hash(union sockaddr_union* remote,
650
+											struct socket_info* si)
651
+{
652
+	struct ip_addr ip;
653
+	unsigned short port;
654
+	unsigned h;
655
+	
656
+	su2ip_addr(&ip, remote);
657
+	port=su_getport(remote);
658
+	if (likely(ip.len==4))
659
+		h=ip.u.addr32[0]^port;
660
+	else if (ip.len==16)
661
+		h=ip.u.addr32[0]^ip.u.addr32[1]^ip.u.addr32[2]^ ip.u.addr32[3]^port;
662
+	else
663
+		h=0; /* error */
664
+	/* make sure the first bits are influenced by all 32
665
+	 * (the first log2(SCTP_ADDR_HASH_SIZE) bits should be a mix of all
666
+	 *  32)*/
667
+	h ^= h>>17;
668
+	h ^= h>>7;
669
+	return h & (SCTP_ADDR_HASH_SIZE-1);
670
+}
671
+#endif /* SCTP_ADDR_HASH */
672
+
673
+
674
+
675
+/** destroy sctp conn hashes. */
676
+void destroy_sctp_con_tracking()
677
+{
678
+	int r;
679
+	
680
+#ifdef SCTP_HASH_LOCK_PER_BUCKET
681
+	if (sctp_con_id_hash)
682
+		for(r=0; r<SCTP_ID_HASH_SIZE; r++)
683
+			lock_destroy(&sctp_con_id_hash[r].lock);
684
+	if (sctp_con_assoc_hash)
685
+		for(r=0; r<SCTP_ASSOC_HASH_SIZE; r++)
686
+			lock_destroy(&sctp_con_assoc_hash[r].lock);
687
+#	ifdef SCTP_ADDR_HASH
688
+	if (sctp_con_addr_hash)
689
+		for(r=0; r<SCTP_ADDR_HASH_SIZE; r++)
690
+			lock_destroy(&sctp_con_addr_hash[r].lock);
691
+#	endif /* SCTP_ADDR_HASH */
692
+#elif defined SCTP_HASH_LOCK_SET
693
+	if (sctp_con_id_h_lock_set){
694
+		lock_set_destroy(sctp_con_id_h_lock_set);
695
+		lock_set_dealloc(sctp_con_id_h_lock_set);
696
+		sctp_con_id_h_lock_set=0;
697
+	}
698
+	if (sctp_con_assoc_h_lock_set){
699
+		lock_set_destroy(sctp_con_assoc_h_lock_set);
700
+		lock_set_dealloc(sctp_con_assoc_h_lock_set);
701
+		sctp_con_assoc_h_lock_set=0;
702
+	}
703
+#	ifdef SCTP_ADDR_HASH
704
+	if (sctp_con_addr_h_lock_set){
705
+		lock_set_destroy(sctp_con_addr_h_lock_set);
706
+		lock_set_dealloc(sctp_con_addr_h_lock_set);
707
+		sctp_con_addr_h_lock_set=0;
708
+	}
709
+#	endif /* SCTP_ADDR_HASH */
710
+#else /* SCTP_HASH_ONE_LOCK */
711
+	if (sctp_con_id_h_lock){
712
+		lock_destroy(sctp_con_id_h_lock);
713
+		lock_dealloc(sctp_con_id_h_lock);
714
+		sctp_con_id_h_lock=0;
715
+	}
716
+	if (sctp_con_assoc_h_lock){
717
+		lock_destroy(sctp_con_assoc_h_lock);
718
+		lock_dealloc(sctp_con_assoc_h_lock);
719
+		sctp_con_assoc_h_lock=0;
720
+	}
721
+#	ifdef SCTP_ADDR_HASH
722
+	if (sctp_con_addr_h_lock){
723
+		lock_destroy(sctp_con_addr_h_lock);
724
+		lock_dealloc(sctp_con_addr_h_lock);
725
+		sctp_con_addr_h_lock=0;
726
+	}
727
+#	endif /* SCTP_ADDR_HASH */
728
+#endif /* SCTP_HASH_LOCK_PER_BUCKET/SCTP_HASH_LOCK_SET/one lock */
729
+	if (sctp_con_id_hash){
730
+		shm_free(sctp_con_id_hash);
731
+		sctp_con_id_hash=0;
732
+	}
733
+	if (sctp_con_assoc_hash){
734
+		shm_free(sctp_con_assoc_hash);
735
+		sctp_con_assoc_hash=0;
736
+	}
737
+#ifdef SCTP_ADDR_HASH
738
+	if (sctp_con_addr_hash){
739
+		shm_free(sctp_con_addr_hash);
740
+		sctp_con_addr_hash=0;
741
+	}
742
+#endif /* SCTP_ADDR_HASH */
743
+	if (sctp_id){
744
+		shm_free(sctp_id);
745
+		sctp_id=0;
746
+	}
747
+	if (sctp_conn_tracked){
748
+		shm_free(sctp_conn_tracked);
749
+		sctp_conn_tracked=0;
750
+	}
751
+}
752
+
753
+
754
+
755
+/** intializaze sctp_conn hashes.
756
+  * @return 0 on success, <0 on error
757
+  */
758
+int init_sctp_con_tracking()
759
+{
760
+	int r, ret;
761
+	
762
+	sctp_con_id_hash=shm_malloc(SCTP_ID_HASH_SIZE*sizeof(*sctp_con_id_hash));
763
+	sctp_con_assoc_hash=shm_malloc(SCTP_ASSOC_HASH_SIZE*
764
+									sizeof(*sctp_con_assoc_hash));
765
+#ifdef SCTP_ADDR_HASH
766
+	sctp_con_addr_hash=shm_malloc(SCTP_ADDR_HASH_SIZE*
767
+									sizeof(*sctp_con_addr_hash));
768
+#endif /* SCTP_ADDR_HASH */
769
+	sctp_id=shm_malloc(sizeof(*sctp_id));
770
+	sctp_conn_tracked=shm_malloc(sizeof(*sctp_conn_tracked));
771
+	if (sctp_con_id_hash==0 || sctp_con_assoc_hash==0 ||
772
+#ifdef SCTP_ADDR_HASH
773
+			sctp_con_addr_hash==0 ||
774
+#endif /* SCTP_ADDR_HASH */
775
+			sctp_id==0 || sctp_conn_tracked==0){
776
+		ERR("sctp init: memory allocation error\n");
777
+		ret=E_OUT_OF_MEM;
778
+		goto error;
779
+	}
780
+	atomic_set(sctp_id, 0);
781
+	atomic_set(sctp_conn_tracked, 0);
782
+	for (r=0; r<SCTP_ID_HASH_SIZE; r++)
783
+		clist_init(&sctp_con_id_hash[r], l.next_id, l.prev_id);
784
+	for (r=0; r<SCTP_ASSOC_HASH_SIZE; r++)
785
+		clist_init(&sctp_con_assoc_hash[r], l.next_assoc, l.prev_assoc);
786
+#ifdef SCTP_ADDR_HASH
787
+	for (r=0; r<SCTP_ADDR_HASH_SIZE; r++)
788
+		clist_init(&sctp_con_addr_hash[r], l.next_addr, l.prev_addr);
789
+#endif /* SCTP_ADDR_HASH */
790
+#ifdef SCTP_HASH_LOCK_PER_BUCKET
791
+	for (r=0; r<SCTP_ID_HASH_SIZE; r++){
792
+		if (lock_init(&sctp_con_id_hash[r].lock)==0){
793
+			ret=-1;
794
+			ERR("sctp init: failed to initialize locks\n");
795
+			goto error;
796
+		}
797
+	}
798
+	for (r=0; r<SCTP_ASSOC_HASH_SIZE; r++){
799
+		if (lock_init(&sctp_con_assoc_hash[r].lock)==0){
800
+			ret=-1;
801
+			ERR("sctp init: failed to initialize locks\n");
802
+			goto error;
803
+		}
804
+	}
805
+#	ifdef SCTP_ADDR_HASH
806
+	for (r=0; r<SCTP_ADDR_HASH_SIZE; r++){
807
+		if (lock_init(&sctp_con_addr_hash[r].lock)==0){
808
+			ret=-1;
809
+			ERR("sctp init: failed to initialize locks\n");
810
+			goto error;
811
+		}
812
+	}
813
+#	endif /* SCTP_ADDR_HASH */
814
+#elif defined SCTP_HASH_LOCK_SET
815
+	sctp_con_id_h_lock_set=lock_set_alloc(SCTP_ID_HASH_SIZE);
816
+	sctp_con_assoc_h_lock_set=lock_set_alloc(SCTP_ASSOC_HASH_SIZE);
817
+#	ifdef SCTP_ADDR_HASH
818
+	sctp_con_addr_h_lock_set=lock_set_alloc(SCTP_ADDR_HASH_SIZE);
819
+#	endif /* SCTP_ADDR_HASH */
820
+	if (sctp_con_id_h_lock_set==0 || sctp_con_assoc_h_lock_set==0
821
+#	ifdef SCTP_ADDR_HASH
822
+			|| sctp_con_addr_h_lock_set==0
823
+#	endif /* SCTP_ADDR_HASH */
824
+			){
825
+		ret=E_OUT_OF_MEM;
826
+		ERR("sctp_init: failed to alloc lock sets\n");
827
+		goto error;
828
+	}
829
+	if (lock_set_init(sctp_con_id_h_lock_set)==0){
830
+		lock_set_dealloc(sctp_con_id_h_lock_set);
831
+		sctp_con_id_h_lock_set=0;
832
+		ret=-1;
833
+		ERR("sctp init: failed to initialize lock set\n");
834
+		goto error;
835
+	}
836
+	if (lock_set_init(sctp_con_assoc_h_lock_set)==0){
837
+		lock_set_dealloc(sctp_con_assoc_h_lock_set);
838
+		sctp_con_assoc_h_lock_set=0;
839
+		ret=-1;
840
+		ERR("sctp init: failed to initialize lock set\n");
841
+		goto error;
842
+	}
843
+#	ifdef SCTP_ADDR_HASH
844
+	if (lock_set_init(sctp_con_addr_h_lock_set)==0){
845
+		lock_set_dealloc(sctp_con_addr_h_lock_set);
846
+		sctp_con_addr_h_lock_set=0;
847
+		ret=-1;
848
+		ERR("sctp init: failed to initialize lock set\n");
849
+		goto error;
850
+	}
851
+#	endif /* SCTP_ADDR_HASH */
852
+#else /* SCTP_HASH_ONE_LOCK */
853
+	sctp_con_id_h_lock=lock_alloc();
854
+	sctp_con_assoc_h_lock=lock_alloc();
855
+#	ifdef SCTP_ADDR_HASH
856
+	sctp_con_addr_h_lock=lock_alloc();
857
+#	endif /* SCTP_ADDR_HASH */
858
+	if (sctp_con_id_h_lock==0 || sctp_con_assoc_h_lock==0
859
+#	ifdef SCTP_ADDR_HASH
860
+			|| sctp_con_addr_h_lock==0
861
+#	endif /* SCTP_ADDR_HASH */
862
+			){
863
+		ret=E_OUT_OF_MEM;
864
+		ERR("sctp init: failed to alloc locks\n");
865
+		goto error;
866
+	}
867
+	if (lock_init(sctp_con_id_h_lock)==0){
868
+		lock_dealloc(sctp_con_id_h_lock);
869
+		sctp_con_id_h_lock=0;
870
+		ret=-1;
871
+		ERR("sctp init: failed to initialize lock\n");
872
+		goto error;
873
+	}
874
+	if (lock_init(sctp_con_assoc_h_lock)==0){
875
+		lock_dealloc(sctp_con_assoc_h_lock);
876
+		sctp_con_assoc_h_lock=0;
877
+		ret=-1;
878
+		ERR("sctp init: failed to initialize lock\n");
879
+		goto error;
880
+	}
881
+#	ifdef SCTP_ADDR_HASH
882
+	if (lock_init(sctp_con_addr_h_lock)==0){
883
+		lock_dealloc(sctp_con_addr_h_lock);
884
+		sctp_con_addr_h_lock=0;
885
+		ret=-1;
886
+		ERR("sctp init: failed to initialize lock\n");
887
+		goto error;
888
+	}
889
+#	endif /* SCTP_ADDR_HASH */
890
+#endif /* SCTP_HASH_LOCK_PER_BUCKET/SCTP_HASH_LOCK_SET/one lock */
891
+	return 0;
892
+error:
893
+	destroy_sctp_con_tracking();
894
+	return ret;
895
+}
896
+
897
+
898
+
899
+#if 0
900
+/** adds "e" to the hashes, safe locking version.*/
901
+static void sctp_con_add(struct sctp_con_elem* e)
902
+{
903
+	unsigned hash;
904
+	DBG("sctp_con_add(%p) ( ser id %d, assoc_id %d)\n",
905
+			e, e->con.id, e->con.assoc_id);
906
+	
907
+	e->l.next_id=e->l.prev_id=0;
908
+	e->l.next_assoc=e->l.prev_assoc=0;
909
+#ifdef SCTP_ADDR_HASH
910
+	e->l.next_addr=e->l.prev_addr=0;
911
+	e->refcnt.val+=3; /* account for the 3 lists */
912
+#else /* SCTP_ADDR_HASH */
913
+	e->refcnt.val+=2; /* account for the 2 lists */
914
+#endif /* SCTP_ADDR_HASH */
915
+	hash=get_sctp_con_id_hash(e->con.id);
916
+	DBG("adding to con id hash %d\n", hash);
917
+	LOCK_SCTP_ID_H(hash);
918
+		clist_insert(&sctp_con_id_hash[hash], e, l.next_id, l.prev_id);
919
+	UNLOCK_SCTP_ID_H(hash);
920
+	hash=get_sctp_con_assoc_hash(e->con.assoc_id);
921
+	DBG("adding to assoc_id hash %d\n", hash);
922
+	LOCK_SCTP_ASSOC_H(hash);
923
+		clist_insert(&sctp_con_assoc_hash[hash], e,
924
+						l.next_assoc, l.prev_assoc);
925
+	UNLOCK_SCTP_ASSOC_H(hash);
926
+#ifdef SCTP_ADDR_HASH
927
+	hash=get_sctp_con_addr_hash(&e->con.remote, e->con.si);
928
+	DBG("adding to addr hash %d\n", hash);
929
+	LOCK_SCTP_ADDR_H(hash);
930
+		clist_insert(&sctp_con_addr_hash[hash], e,
931
+						l.next_addr, l.prev_addr);
932
+	UNLOCK_SCTP_ADDR_H(hash);
933
+#endif /* SCTP_ADDR_HASH */
934
+	atomic_inc(sctp_conn_tracked);
935
+}
936
+#endif
937
+
938
+
939
+
940
+/** helper internal del elem function, the id hash must be locked.
941
+  * WARNING: the id hash(h) _must_ be locked (LOCK_SCTP_ID_H(h)).
942
+  * @param h - id hash
943
+  * @param e - sctp_con_elem to delete (from all the hashes)
944
+  * @return 0 if the id hash was unlocked, 1 if it's still locked */
945
+inline static int _sctp_con_del_id_locked(unsigned h, struct sctp_con_elem* e)
946
+{
947
+	unsigned assoc_id_h;
948
+	int deref; /* delayed de-reference counter */
949
+	int locked;
950
+#ifdef SCTP_ADDR_HASH
951
+	unsigned addr_h;
952
+#endif /* SCTP_ADDR_HASH */
953
+	
954
+	locked=1;
955
+	clist_rm(e, l.next_id, l.prev_id);
956
+	e->l.next_id=e->l.prev_id=0; /* mark it as id unhashed */
957
+	/* delay atomic dereference, so that we'll perform only one
958
+	   atomic op. even for multiple derefs. It also has the
959
+	   nice side-effect that the entry will be guaranteed to be
960
+	   referenced until we perform the delayed deref. at the end,
961
+	   so we don't need to keep some lock to prevent somebody from
962
+	   deleting the entry from under us */
963
+	deref=1; /* removed from one list =>  deref once */
964
+	/* remove it from the assoc hash if needed */
965
+	if (likely(e->l.next_assoc)){
966
+		UNLOCK_SCTP_ID_H(h);
967
+		locked=0; /* no longer id-locked */
968
+		/* we haven't dec. refcnt, so it's still safe to use e */
969
+		assoc_id_h=get_sctp_con_assoc_hash(e->con.assoc_id);
970
+		LOCK_SCTP_ASSOC_H(assoc_id_h);
971
+			/* make sure nobody removed it in the meantime */
972
+			if (likely(e->l.next_assoc)){
973
+				clist_rm(e, l.next_assoc, l.prev_assoc);
974
+				e->l.next_assoc=e->l.prev_assoc=0; /* mark it as removed */
975
+				deref++; /* rm'ed from the assoc list => inc. delayed deref. */
976
+			}
977
+		UNLOCK_SCTP_ASSOC_H(assoc_id_h);
978
+	}
979
+#ifdef SCTP_ADDR_HASH
980
+	/* remove it from the addr. hash if needed */
981
+	if (likely(e->l.next_addr)){
982
+		if (unlikely(locked)){
983
+			UNLOCK_SCTP_ID_H(h);
984
+			locked=0; /* no longer id-locked */
985
+		}
986
+		addr_h=get_sctp_con_addr_hash(&e->con.remote, e->con.si);
987
+		LOCK_SCTP_ADDR_H(addr_h);
988
+			/* make sure nobody removed it in the meantime */
989
+			if (likely(e->l.next_addr)){
990
+				clist_rm(e, l.next_addr, l.prev_addr);
991
+				e->l.next_addr=e->l.prev_addr=0; /* mark it as removed */
992
+				deref++; /* rm'ed from the addr list => inc. delayed deref. */
993
+			}
994
+		UNLOCK_SCTP_ADDR_H(addr_h);
995
+	}
996
+#endif /* SCTP_ADDR_HASH */
997
+	
998
+	/* performed delayed de-reference */
999
+	if (atomic_add(&e->refcnt, -deref)==0){
1000
+		atomic_dec(sctp_conn_tracked);
1001
+		shm_free(e);
1002
+	}
1003
+	else
1004
+		DBG("del assoc post-deref (kept): ser id %d, assoc_id %d,"
1005
+			" post-refcnt %d, deref %d, post-tracked %d\n",
1006
+			e->con.id, e->con.assoc_id, atomic_get(&e->refcnt), deref,
1007
+			atomic_get(sctp_conn_tracked));
1008
+	return locked;
1009
+}
1010
+
1011
+
1012
+
1013
+/** helper internal del elem function, the assoc hash must be locked.
1014
+  * WARNING: the assoc hash(h) _must_ be locked (LOCK_SCTP_ASSOC_H(h)).
1015
+  * @param h - assoc hash
1016
+  * @param e - sctp_con_elem to delete (from all the hashes)
1017
+  * @return 0 if the assoc hash was unlocked, 1 if it's still locked */
1018
+inline static int _sctp_con_del_assoc_locked(unsigned h,
1019
+												struct sctp_con_elem* e)
1020
+{
1021
+	unsigned id_hash;
1022
+	int deref; /* delayed de-reference counter */
1023
+	int locked;
1024
+#ifdef SCTP_ADDR_HASH
1025
+	unsigned addr_h;
1026
+#endif /* SCTP_ADDR_HASH */
1027
+	
1028
+	locked=1;
1029
+	clist_rm(e, l.next_assoc, l.prev_assoc);
1030
+	e->l.next_assoc=e->l.prev_assoc=0; /* mark it as assoc unhashed */
1031
+	/* delay atomic dereference, so that we'll perform only one
1032
+	   atomic op. even for multiple derefs. It also has the
1033
+	   nice side-effect that the entry will be guaranteed to be
1034
+	   referenced until we perform the delayed deref. at the end,
1035
+	   so we don't need to keep some lock to prevent somebody from
1036
+	   deleting the entry from under us */
1037
+	deref=1; /* removed from one list =>  deref once */
1038
+	/* remove it from the id hash if needed */
1039
+	if (likely(e->l.next_id)){
1040
+		UNLOCK_SCTP_ASSOC_H(h);
1041
+		locked=0; /* no longer assoc-hash-locked */
1042
+		/* we have a ref. to it so it's still safe to use e */
1043
+		id_hash=get_sctp_con_id_hash(e->con.id);
1044
+		LOCK_SCTP_ID_H(id_hash);
1045
+			/* make sure nobody removed it in the meantime */
1046
+			if (likely(e->l.next_id)){
1047
+				clist_rm(e, l.next_id, l.prev_id);
1048
+				e->l.next_id=e->l.prev_id=0; /* mark it as removed */
1049
+				deref++; /* rm'ed from the id list => inc. delayed deref. */
1050
+			}
1051
+		UNLOCK_SCTP_ID_H(id_hash);
1052
+	}
1053
+#ifdef SCTP_ADDR_HASH
1054
+	/* remove it from the addr. hash if needed */
1055
+	if (likely(e->l.next_addr)){
1056
+		if (unlikely(locked)){
1057
+			UNLOCK_SCTP_ASSOC_H(h);
1058
+			locked=0; /* no longer id-locked */
1059
+		}
1060
+		addr_h=get_sctp_con_addr_hash(&e->con.remote, e->con.si);
1061
+		LOCK_SCTP_ADDR_H(addr_h);
1062
+			/* make sure nobody removed it in the meantime */
1063
+			if (likely(e->l.next_addr)){
1064
+				clist_rm(e, l.next_addr, l.prev_addr);
1065
+				e->l.next_addr=e->l.prev_addr=0; /* mark it as removed */
1066
+				deref++; /* rm'ed from the addr list => inc. delayed deref. */
1067
+			}
1068
+		UNLOCK_SCTP_ADDR_H(addr_h);
1069
+	}
1070
+#endif /* SCTP_ADDR_HASH */
1071
+	if (atomic_add(&e->refcnt, -deref)==0){
1072
+		atomic_dec(sctp_conn_tracked);
1073
+		shm_free(e);
1074
+	}
1075
+	else
1076
+		DBG("del assoc post-deref (kept): ser id %d, assoc_id %d,"
1077
+				" post-refcnt %d, deref %d, post-tracked %d\n",
1078
+				e->con.id, e->con.assoc_id, atomic_get(&e->refcnt), deref,
1079
+				atomic_get(sctp_conn_tracked));
1080
+	return locked;
1081
+}
1082
+
1083
+
1084
+
1085
+#ifdef SCTP_ADDR_HASH
1086
+/** helper internal del elem function, the addr hash must be locked.
1087
+  * WARNING: the addr hash(h) _must_ be locked (LOCK_SCTP_ADDR_H(h)).
1088
+  * @param h - addr hash
1089
+  * @param e - sctp_con_elem to delete (from all the hashes)
1090
+  * @return 0 if the addr hash was unlocked, 1 if it's still locked */
1091
+inline static int _sctp_con_del_addr_locked(unsigned h,
1092
+												struct sctp_con_elem* e)
1093
+{
1094
+	unsigned id_hash;
1095
+	unsigned assoc_id_h;
1096
+	int deref; /* delayed de-reference counter */
1097
+	int locked;
1098
+	
1099
+	locked=1;
1100
+	clist_rm(e, l.next_addr, l.prev_addr);
1101
+	e->l.next_addr=e->l.prev_addr=0; /* mark it as addr unhashed */
1102
+	/* delay atomic dereference, so that we'll perform only one
1103
+	   atomic op. even for multiple derefs. It also has the
1104
+	   nice side-effect that the entry will be guaranteed to be
1105
+	   referenced until we perform the delayed deref. at the end,
1106
+	   so we don't need to keep some lock to prevent somebody from
1107
+	   deleting the entry from under us */
1108
+	deref=1; /* removed from one list =>  deref once */
1109
+	/* remove it from the id hash if needed */
1110
+	if (likely(e->l.next_id)){
1111
+		UNLOCK_SCTP_ADDR_H(h);
1112
+		locked=0; /* no longer addr-hash-locked */
1113
+		/* we have a ref. to it so it's still safe to use e */
1114
+		id_hash=get_sctp_con_id_hash(e->con.id);
1115
+		LOCK_SCTP_ID_H(id_hash);
1116
+			/* make sure nobody removed it in the meantime */
1117
+			if (likely(e->l.next_id)){
1118
+				clist_rm(e, l.next_id, l.prev_id);
1119
+				e->l.next_id=e->l.prev_id=0; /* mark it as removed */
1120
+				deref++; /* rm'ed from the id list => inc. delayed deref. */
1121
+			}
1122
+		UNLOCK_SCTP_ID_H(id_hash);
1123
+	}
1124
+	/* remove it from the assoc hash if needed */
1125
+	if (likely(e->l.next_assoc)){
1126
+		if (locked){
1127
+			UNLOCK_SCTP_ADDR_H(h);
1128
+			locked=0; /* no longer addr-hash-locked */
1129
+		}
1130
+		/* we haven't dec. refcnt, so it's still safe to use e */
1131
+		assoc_id_h=get_sctp_con_assoc_hash(e->con.assoc_id);
1132
+		LOCK_SCTP_ASSOC_H(assoc_id_h);
1133
+			/* make sure nobody removed it in the meantime */
1134
+			if (likely(e->l.next_assoc)){
1135
+				clist_rm(e, l.next_assoc, l.prev_assoc);
1136
+				e->l.next_assoc=e->l.prev_assoc=0; /* mark it as removed */
1137
+				deref++; /* rm'ed from the assoc list => inc. delayed deref. */
1138
+			}
1139
+		UNLOCK_SCTP_ASSOC_H(assoc_id_h);
1140
+	}
1141
+	if (atomic_add(&e->refcnt, -deref)==0){
1142
+		atomic_dec(sctp_conn_tracked);
1143
+		shm_free(e);
1144
+	}
1145
+	else
1146
+		DBG("del assoc post-deref (kept): ser id %d, assoc_id %d,"
1147
+				" post-refcnt %d, deref %d, post-tracked %d\n",
1148
+				e->con.id, e->con.assoc_id, atomic_get(&e->refcnt), deref,
1149
+				atomic_get(sctp_conn_tracked));
1150
+	return locked;
1151
+}
1152
+#endif /* SCTP_ADDR_HASH */
1153
+
1154
+
1155
+
1156
+/** using id, get the corresponding sctp assoc & socket. 
1157
+ *  @param id - ser unique assoc id
1158
+ *  @param si  - result parameter, filled with the socket info on success
1159
+ *  @param remote - result parameter, filled with the address and port
1160
+ *  @param del - if 1 delete the entry,
1161
+ *  @return assoc_id (!=0) on success & sets si, 0 on not found
1162
+ * si and remote will not be touched on failure.
1163
+ *
1164
+ */
1165
+int sctp_con_get_assoc(unsigned int id, struct socket_info** si, 
1166
+								union sockaddr_union *remote, int del)
1167
+{
1168
+	unsigned h;
1169
+	ticks_t now; 
1170
+	struct sctp_con_elem* e;
1171
+	struct sctp_con_elem* tmp;
1172
+	int ret;
1173
+	
1174
+	ret=0;
1175
+	now=get_ticks_raw();
1176
+	h=get_sctp_con_id_hash(id);
1177
+#if 0
1178
+again:
1179
+#endif
1180
+	LOCK_SCTP_ID_H(h);
1181
+		clist_foreach_safe(&sctp_con_id_hash[h], e, tmp, l.next_id){
1182
+			if(e->con.id==id){
1183
+				ret=e->con.assoc_id;
1184
+				*si=e->con.si;
1185
+				*remote=e->con.remote;
1186
+				if (del){
1187
+					if (_sctp_con_del_id_locked(h, e)==0)
1188
+						goto skip_unlock;
1189
+				}else
1190
+					e->con.expire=now+S_TO_TICKS(sctp_options.sctp_autoclose);
1191
+				break;
1192
+			}
1193
+#if 0
1194
+			else if (TICKS_LT(e->con.expire, now)){
1195
+				WARN("sctp con: found expired assoc %d, id %d (%d s ago)\n",
1196
+						e->con.assoc_id, e->con.id,
1197
+						TICKS_TO_S(now-e->con.expire));
1198
+				if (_sctp_con_del_id_locked(h, e)==0)
1199
+					goto again; /* if unlocked need to restart the list */
1200
+			}
1201
+#endif
1202
+		}
1203
+	UNLOCK_SCTP_ID_H(h);
1204
+skip_unlock:
1205
+	return ret;
1206
+}
1207
+
1208
+
1209
+
1210
+/** using the assoc_id, remote addr. & socket, get the corresp. internal id.
1211
+ *  @param assoc_id - sctp assoc id
1212
+ *  @param si  - socket on which the packet was received
1213
+ *  @param del - if 1 delete the entry,
1214
+ *  @return assoc_id (!=0) on success, 0 on not found
1215
+ */
1216
+int sctp_con_get_id(unsigned int assoc_id, union sockaddr_union* remote,
1217
+					struct socket_info* si, int del)
1218
+{
1219
+	unsigned h;
1220
+	ticks_t now; 
1221
+	struct sctp_con_elem* e;
1222
+	struct sctp_con_elem* tmp;
1223
+	int ret;
1224
+	
1225
+	ret=0;
1226
+	now=get_ticks_raw();
1227
+	h=get_sctp_con_assoc_hash(assoc_id);
1228
+#if 0
1229
+again:
1230
+#endif
1231
+	LOCK_SCTP_ASSOC_H(h);
1232
+		clist_foreach_safe(&sctp_con_assoc_hash[h], e, tmp, l.next_assoc){
1233
+			if(e->con.assoc_id==assoc_id && e->con.si==si &&
1234
+					su_cmp(remote, &e->con.remote)){
1235
+				ret=e->con.id;
1236
+				if (del){
1237
+					if (_sctp_con_del_assoc_locked(h, e)==0)
1238
+						goto skip_unlock;
1239
+				}else
1240
+					e->con.expire=now+S_TO_TICKS(sctp_options.sctp_autoclose);
1241
+				break;
1242
+			}
1243
+#if 0
1244
+			else if (TICKS_LT(e->con.expire, now)){
1245
+				WARN("sctp con: found expired assoc %d, id %d (%d s ago)\n",
1246
+						e->con.assoc_id, e->con.id,
1247
+						TICKS_TO_S(now-e->con.expire));
1248
+				if (_sctp_con_del_assoc_locked(h, e)==0)
1249
+					goto again; /* if unlocked need to restart the list */
1250
+			}
1251
+#endif
1252
+		}
1253
+	UNLOCK_SCTP_ASSOC_H(h);
1254
+skip_unlock:
1255
+	return ret;
1256
+}
1257
+
1258
+
1259
+
1260
+#ifdef SCTP_ADDR_HASH
1261
+/** using the dest. & source socket, get the corresponding id and assoc_id 
1262
+ *  @param remote   - peer address & port
1263
+ *  @param si       - local source socket
1264
+ *  @param assoc_id - result, filled with the sctp assoc_id
1265
+ *  @param del - if 1 delete the entry,
1266
+ *  @return ser id (!=0) on success, 0 on not found
1267
+ */
1268
+int sctp_con_addr_get_id_assoc(union sockaddr_union* remote,
1269
+								struct socket_info* si,
1270
+								int* assoc_id, int del)
1271
+{
1272
+	unsigned h;
1273
+	ticks_t now; 
1274
+	struct sctp_con_elem* e;
1275
+	struct sctp_con_elem* tmp;
1276
+	int ret;
1277
+	
1278
+	ret=0;
1279
+	*assoc_id=0;
1280
+	now=get_ticks_raw();
1281
+	h=get_sctp_con_addr_hash(remote, si);
1282
+again:
1283
+	LOCK_SCTP_ADDR_H(h);
1284
+		clist_foreach_safe(&sctp_con_addr_hash[h], e, tmp, l.next_addr){
1285
+			if(su_cmp(remote, &e->con.remote) && e->con.si==si){
1286
+				ret=e->con.id;
1287
+				*assoc_id=e->con.assoc_id;
1288
+				if (del){
1289
+					if (_sctp_con_del_addr_locked(h, e)==0)
1290
+						goto skip_unlock;
1291
+				}else
1292
+					e->con.expire=now+S_TO_TICKS(sctp_options.sctp_autoclose);
1293
+				break;
1294
+			}
1295
+#if 0
1296
+			else if (TICKS_LT(e->con.expire, now)){
1297
+				WARN("sctp con: found expired assoc %d, id %d (%d s ago)\n",
1298
+						e->con.assoc_id, e->con.id,
1299
+						TICKS_TO_S(now-e->con.expire));
1300
+				if (_sctp_con_del_addr_locked(h, e)==0)
1301
+					goto again; /* if unlocked need to restart the list */
1302
+			}
1303
+#endif
1304
+		}
1305
+	UNLOCK_SCTP_ADDR_H(h);
1306
+skip_unlock:
1307
+	return ret;
1308
+}
1309
+#endif /* SCTP_ADDR_HASH */
1310
+
1311
+
1312
+
1313
+/** del con tracking for (assod_id, si).
1314
+ * @return 0 on success, -1 on error (not found)
1315
+ */
1316
+#define sctp_con_del_assoc(assoc_id, si) \
1317
+	(-(sctp_con_get_id((assoc_id), (si), 1)==0))
1318
+
1319
+
1320
+
1321
+/** create a new sctp con elem.
1322
+  * @param id - ser connection id
1323
+  * @param assoc_id - sctp assoc id
1324
+  * @param si - corresp. socket
1325
+  * @param remote - remote side
1326
+  * @return pointer to shm allocated sctp_con_elem on success, 0 on error
1327
+  */
1328
+struct sctp_con_elem* sctp_con_new(unsigned id, unsigned assoc_id, 
1329
+									struct socket_info* si,
1330
+									union sockaddr_union* remote)
1331
+{
1332
+	struct sctp_con_elem* e;
1333
+	
1334
+	e=shm_malloc(sizeof(*e));
1335
+	if (unlikely(e==0))
1336
+		goto error;
1337
+	e->l.next_id=e->l.prev_id=0;
1338
+	e->l.next_assoc=e->l.prev_assoc=0;
1339
+	atomic_set(&e->refcnt, 0);
1340
+	e->con.id=id;
1341
+	e->con.assoc_id=assoc_id;
1342
+	e->con.si=si;
1343
+	e->con.flags=0;
1344
+	if (likely(remote))
1345
+		e->con.remote=*remote;
1346
+	else
1347
+		memset(&e->con.remote, 0, sizeof(e->con.remote));
1348
+	e->con.start=get_ticks_raw();
1349
+	e->con.expire=e->con.start+S_TO_TICKS(sctp_options.sctp_autoclose);
1350
+	return e;
1351
+error:
1352
+	return 0;
1353
+}
1354
+
1355
+
1356
+
1357
+/** handles every ev on sctp assoc_id.
1358
+  * @return ser id on success (!=0) or 0 on not found/error
1359
+  */
1360
+static int sctp_con_track(int assoc_id, struct socket_info* si,
1361
+							union sockaddr_union* remote, int ev)
1362
+{
1363
+	int id;
1364
+	unsigned hash;
1365
+	unsigned assoc_hash;
1366
+	struct sctp_con_elem* e;
1367
+	struct sctp_con_elem* tmp;
1368
+	
1369
+	id=0;
1370
+	DBG("sctp_con_track(%d, %p, %d) \n", assoc_id, si, ev);
1371
+	
1372
+	/* search for (assoc_id, si) */
1373
+	assoc_hash=get_sctp_con_assoc_hash(assoc_id);
1374
+	LOCK_SCTP_ASSOC_H(assoc_hash);
1375
+		clist_foreach_safe(&sctp_con_assoc_hash[assoc_hash], e, tmp,
1376
+								l.next_assoc){
1377
+			/* we need to use the remote side address, because at least
1378
+			   on linux assoc_id are immediately reused (even if sctp
1379
+			   autoclose is off) and so it's possible that the association
1380
+			   id we saved is already closed and assigned to another
1381
+			   association by the time we search for it) */
1382
+			if(e->con.assoc_id==assoc_id && e->con.si==si &&
1383
+					su_cmp(remote, &e->con.remote)){
1384
+				if (ev==SCTP_CON_DOWN_SEEN){
1385
+					if (e->con.flags & SCTP_CON_UP_SEEN){
1386
+						/* DOWN after UP => delete */
1387
+						id=e->con.id;
1388
+						/* do delete */
1389
+						if (_sctp_con_del_assoc_locked(assoc_hash, e)==0)
1390
+							goto found; /* skip unlock */
1391
+					}else{
1392
+						/* DOWN after DOWN => error
1393
+						   DOWN after RCV w/ no UP -> not possible
1394
+						    since we never create a tracking entry on RCV
1395
+							only */
1396
+						BUG("unexpected flags: %x for assoc_id %d, id %d"
1397
+								", sctp con %p\n", e->con.flags, assoc_id,
1398
+								e->con.id, e);
1399
+						/* do delete */
1400
+						if (_sctp_con_del_assoc_locked(assoc_hash, e)==0)
1401
+							goto found; /* skip unlock */
1402
+					}
1403
+				}else if (ev==SCTP_CON_RCV_SEEN){
1404
+					/* RCV after UP or DOWN => just mark RCV as seen */
1405
+					id=e->con.id;
1406
+					e->con.flags |= SCTP_CON_RCV_SEEN;
1407
+				}else{
1408
+					/* SCTP_CON_UP */
1409
+					if (e->con.flags & SCTP_CON_DOWN_SEEN){
1410
+						/* UP after DOWN => delete */
1411
+						id=e->con.id;
1412
+						/* do delete */
1413
+						if (_sctp_con_del_assoc_locked(assoc_hash, e)==0)
1414
+							goto found; /* skip unlock */
1415
+					}else{
1416
+						/* UP after UP or after RCVD => BUG */
1417
+						BUG("connection with same assoc_id (%d) already"
1418
+								" present, flags %x\n",
1419
+								assoc_id, e->con.flags);
1420
+					}
1421
+				}
1422
+				UNLOCK_SCTP_ASSOC_H(assoc_hash);
1423
+				goto found;
1424
+			}
1425
+		}
1426
+		/* not found */
1427
+		if (unlikely(ev!=SCTP_CON_RCV_SEEN)){
1428
+			/* UP or DOWN and no tracking entry => create new tracking entry
1429
+			   for both of them (because we can have a re-ordered DOWN before
1430
+			   the UP) */
1431
+again:
1432
+				id=atomic_add(sctp_id, 1);
1433
+				if (unlikely(id==0)){
1434
+					/* overflow  and 0 is not a valid id */
1435
+					goto again;
1436
+				}
1437
+				e=sctp_con_new(id, assoc_id, si, remote);
1438
+				if (likely(e)){
1439
+					e->con.flags=ev;
1440
+					e->l.next_id=e->l.prev_id=0;
1441
+					e->l.next_assoc=e->l.prev_assoc=0;
1442
+#ifdef SCTP_ADDR_HASH
1443
+					e->l.next_addr=e->l.prev_addr=0;
1444
+					e->refcnt.val+=3; /* account for the 3 lists */
1445
+#else /* SCTP_ADDR_HASH */
1446
+					e->refcnt.val+=2; /* account for the 2 lists */
1447
+#endif /* SCTP_ADDR_HASH */
1448
+					/* already locked */
1449
+					clist_insert(&sctp_con_assoc_hash[assoc_hash], e,
1450
+									l.next_assoc, l.prev_assoc);
1451
+					hash=get_sctp_con_id_hash(e->con.id);
1452
+					LOCK_SCTP_ID_H(hash);
1453
+						clist_insert(&sctp_con_id_hash[hash], e,
1454
+									l.next_id, l.prev_id);
1455
+					UNLOCK_SCTP_ID_H(hash);
1456
+#ifdef SCTP_ADDR_HASH
1457
+					hash=get_sctp_con_addr_hash(&e->con.remote, e->con.si);
1458
+					LOCK_SCTP_ADDR_H(hash);
1459
+						clist_insert(&sctp_con_addr_hash[hash], e,
1460
+									l.next_addr, l.prev_addr);
1461
+					UNLOCK_SCTP_ADDR_H(hash);
1462
+#endif /* SCTP_ADDR_HASH */
1463
+					atomic_inc(sctp_conn_tracked);
1464
+				}
1465
+		} /* else not found and RCV -> ignore
1466
+			 We cannot create a new entry because we don't know when to
1467
+			 delete it (we can have UP DOWN RCV which would result in a
1468
+			 tracking entry living forever). This means that if we receive
1469
+			 a msg. on an assoc. before it's UP notification we won't know
1470
+			 the id for connection reuse, but since happens very rarely it's
1471
+			 an acceptable tradeoff */
1472
+	UNLOCK_SCTP_ASSOC_H(assoc_hash);
1473
+	if (unlikely(e==0)){
1474
+		ERR("memory allocation failure\n");
1475
+		goto error;
1476
+	}
1477
+found:
1478
+	return id;
1479
+error:
1480
+	return 0;
1481
+}
1482
+
1483
+
1484
+
1485
+#endif /* SCTP_CONN_REUSE */
1486
+
1487
+
1488
+int init_sctp()
1489
+{
1490
+	int ret;
1491
+	
1492
+	ret=0;
1493
+	/* sctp options must be initialized before  calling this function */
1494
+	sctp_conn_no=shm_malloc(sizeof(*sctp_conn_tracked));
1495
+	if ( sctp_conn_no==0){
1496
+		ERR("sctp init: memory allocation error\n");
1497
+		ret=E_OUT_OF_MEM;
1498
+		goto error;
1499
+	}
1500
+	atomic_set(sctp_conn_no, 0);
1501
+#ifdef SCTP_CONN_REUSE
1502
+	return init_sctp_con_tracking();
1503
+#endif
1504
+error:
1505
+	return ret;
1506
+}
1507
+
1508
+
1509
+
1510
+void destroy_sctp()
1511
+{
1512
+	if (sctp_conn_no){
1513
+		shm_free(sctp_conn_no);
1514
+		sctp_conn_no=0;
1515
+	}
1516
+#ifdef SCTP_CONN_REUSE
1517
+	destroy_sctp_con_tracking();
1518
+#endif
1519
+}
1520
+
1521
+
524 1522
 
525 1523
 static int sctp_msg_send_raw(struct dest_info* dst, char* buf, unsigned len,
526 1524
 						struct sctp_sndrcvinfo* sndrcv_info);
... ...
@@ -593,7 +1597,7 @@ static char* sctp_paddr_change_state2s(unsigned int state)
593 593
 
594 594
 
595 595
 /* handle SCTP_SEND_FAILED notifications: if packet marked for retries
596
- * retry the send (with 0 associd)
596
+ * retry the send (with 0 assoc_id)
597 597
  * returns 0 on success, -1 on failure
598 598
  */
599 599
 static int sctp_handle_send_failed(struct socket_info* si,
... ...
@@ -676,7 +1680,73 @@ static int sctp_handle_assoc_change(struct socket_info* si,
676 676
 									int state,
677 677
 									int assoc_id)
678 678
 {
679
-	return -1; /* failure, not implemented */
679
+	int ret;
680
+	
681
+	ret=-1;
682
+	switch(state){
683
+		case SCTP_COMM_UP:
684
+			atomic_inc(sctp_conn_no);
685
+#ifdef SCTP_CONN_REUSE
686
+			/* new connection, track it */
687
+			sctp_con_track(assoc_id, si, su, SCTP_CON_UP_SEEN);
688
+#if 0
689
+again:
690
+			id=atomic_add(sctp_id, 1);
691
+			if (unlikely(id==0)){
692
+				/* overflow  and 0 is not a valid id */
693
+				goto again;
694
+			}
695
+			e=sctp_con_new(id, assoc_id, si, su);
696
+			if (unlikely(e==0)){
697
+				ERR("memory allocation failure\n");
698
+			}else{
699
+				sctp_con_add(e);
700
+				ret=0;
701
+			}
702
+#endif
703
+#endif /* SCTP_CONN_REUSE */
704
+			break;
705
+		case SCTP_COMM_LOST:
706
+#ifdef USE_DST_BLACKLIST
707
+			/* blacklist only if send_retries is turned off (if on we don't
708
+			   know here if we did retry or we are at the first error) */
709
+			if (cfg_get(core, core_cfg, use_dst_blacklist) &&
710
+					(sctp_options.sctp_send_retries==0))
711
+						dst_blacklist_su(BLST_ERR_SEND, PROTO_SCTP, su, 0);
712
+#endif /* USE_DST_BLACKLIST */
713
+			/* no break */
714
+		case SCTP_SHUTDOWN_COMP:
715
+			atomic_dec(sctp_conn_no);
716
+#ifdef SCTP_CONN_REUSE
717
+			/* connection down*/
718
+			sctp_con_track(assoc_id, si, su, SCTP_CON_DOWN_SEEN);
719
+#if 0
720
+			if (unlikely(sctp_con_del_assoc(assoc_id, si)!=0))
721
+				WARN("sctp con: tried to remove inexistent connection\n");
722
+			else
723
+				ret=0;
724
+#endif
725
+#endif /* SCTP_CONN_REUSE */
726
+			break;
727
+		case SCTP_RESTART:
728
+			/* do nothing on restart */
729
+			break;
730
+		case SCTP_CANT_STR_ASSOC:
731
+			/* do nothing when failing to start an assoc
732
+			  (in this case we never see SCTP_COMM_UP so we never 
733
+			  track the assoc) */
734
+#ifdef USE_DST_BLACKLIST
735
+			/* blacklist only if send_retries is turned off (if on we don't 
736
+			   know here if we did retry or we are at the first error) */
737
+			if (cfg_get(core, core_cfg, use_dst_blacklist) &&
738
+					(sctp_options.sctp_send_retries==0))
739
+						dst_blacklist_su(BLST_ERR_CONNECT, PROTO_SCTP, su, 0);
740
+#endif /* USE_DST_BLACKLIST */
741
+			break;
742
+		default:
743
+			break;
744
+	}
745
+	return ret;
680 746
 }
681 747
 
682 748
 
... ...
@@ -688,8 +1758,6 @@ static int sctp_handle_notification(struct socket_info* si,
688 688
 	union sctp_notification* snp;
689 689
 	char su_buf[SU2A_MAX_STR_SIZE];
690 690
 	
691
-	DBG("sctp_rcv_loop: MSG_NOTIFICATION\n");
692
-	
693 691
 	#define SNOT DBG
694 692
 	#define ERR_LEN_TOO_SMALL(length, val, bind_addr, from_su, text) \
695 693
 		if (unlikely((length)<(val))){\
... ...
@@ -714,7 +1782,7 @@ static int sctp_handle_notification(struct socket_info* si,
714 714
 			ERR_LEN_TOO_SMALL(len, sizeof(struct sctp_remote_error), si, su,
715 715
 								"SCTP_REMOTE_ERROR");
716 716
 			SNOT("sctp notification from %s on %.*s:%d: SCTP_REMOTE_ERROR:"
717
-					" %d, len %d\n, assoc. %d",
717
+					" %d, len %d\n, assoc_id %d",
718 718
 					su2a(su, sizeof(*su)), si->name.len, si->name.s,
719 719
 					si->port_no,
720 720
 					ntohs(snp->sn_remote_error.sre_error),
... ...
@@ -726,7 +1794,7 @@ static int sctp_handle_notification(struct socket_info* si,
726 726
 			ERR_LEN_TOO_SMALL(len, sizeof(struct sctp_send_failed), si, su,
727 727
 								"SCTP_SEND_FAILED");
728 728
 			SNOT("sctp notification from %s on %.*s:%d: SCTP_SEND_FAILED:"
729
-					" error %d, assoc. %d, flags %x\n",
729
+					" error %d, assoc_id %d, flags %x\n",
730 730
 					su2a(su, sizeof(*su)), si->name.len, si->name.s,
731 731
 					si->port_no, snp->sn_send_failed.ssf_error,
732 732
 					snp->sn_send_failed.ssf_assoc_id,
... ...
@@ -740,7 +1808,7 @@ static int sctp_handle_notification(struct socket_info* si,
740 740
 									&snp->sn_paddr_change.spc_aaddr, 
741 741
 									sizeof(snp->sn_paddr_change.spc_aaddr)));
742 742
 			SNOT("sctp notification from %s on %.*s:%d: SCTP_PEER_ADDR_CHANGE"
743
-					": %s: %s: assoc. %d \n",
743
+					": %s: %s: assoc_id %d \n",
744 744
 					su2a(su, sizeof(*su)), si->name.len, si->name.s,
745 745
 					si->port_no, su_buf,
746 746
 					sctp_paddr_change_state2s(snp->sn_paddr_change.spc_state),
... ...
@@ -751,7 +1819,7 @@ static int sctp_handle_notification(struct socket_info* si,
751 751
 			ERR_LEN_TOO_SMALL(len, sizeof(struct sctp_shutdown_event), si, su,
752 752
 								"SCTP_SHUTDOWN_EVENT");
753 753
 			SNOT("sctp notification from %s on %.*s:%d: SCTP_SHUTDOWN_EVENT:"
754
-					" assoc. %d\n",
754
+					" assoc_id %d\n",
755 755
 					su2a(su, sizeof(*su)), si->name.len, si->name.s,
756 756
 					si->port_no, snp->sn_shutdown_event.sse_assoc_id);
757 757
 			break;
... ...
@@ -759,7 +1827,7 @@ static int sctp_handle_notification(struct socket_info* si,
759 759
 			ERR_LEN_TOO_SMALL(len, sizeof(struct sctp_assoc_change), si, su,
760 760
 								"SCTP_ASSOC_CHANGE");
761 761
 			SNOT("sctp notification from %s on %.*s:%d: SCTP_ASSOC_CHANGE"
762
-					": %s: assoc. %d, ostreams %d, istreams %d\n",
762
+					": %s: assoc_id %d, ostreams %d, istreams %d\n",
763 763
 					su2a(su, sizeof(*su)), si->name.len, si->name.s,
764 764
 					si->port_no,
765 765
 					sctp_assoc_change_state2s(snp->sn_assoc_change.sac_state),
... ...
@@ -767,21 +1835,6 @@ static int sctp_handle_notification(struct socket_info* si,
767 767
 					snp->sn_assoc_change.sac_outbound_streams,
768 768
 					snp->sn_assoc_change.sac_inbound_streams
769 769
 					);
770
-#ifdef USE_DST_BLACKLIST
771
-			/* blacklist only if send_retries is turned off (if on we don't 
772
-			   know here if we did retry or we are at the first error) */
773
-			if (cfg_get(core, core_cfg, use_dst_blacklist) &&
774
-					(sctp_options.sctp_send_retries==0)){
775
-				switch(snp->sn_assoc_change.sac_state) {
776
-					case SCTP_CANT_STR_ASSOC:
777
-						dst_blacklist_su(BLST_ERR_CONNECT, PROTO_SCTP, su, 0);
778
-						break;
779
-					case SCTP_COMM_LOST:
780
-						dst_blacklist_su(BLST_ERR_SEND, PROTO_SCTP, su, 0);
781
-						break;
782
-				}
783
-			}
784
-#endif /* USE_DST_BLACKLIST */
785 770
 			sctp_handle_assoc_change(si, su, snp->sn_assoc_change.sac_state,
786 771
 										snp->sn_assoc_change.sac_assoc_id);
787 772
 			break;
... ...
@@ -798,8 +1851,9 @@ static int sctp_handle_notification(struct socket_info* si,
798 798
 		case SCTP_PARTIAL_DELIVERY_EVENT:
799 799
 			ERR_LEN_TOO_SMALL(len, sizeof(struct sctp_pdapi_event), si, su,
800 800
 								"SCTP_PARTIAL_DELIVERY_EVENT");
801
-			SNOT("sctp notification from %s on %.*s:%d: "
802
-					"SCTP_PARTIAL_DELIVERY_EVENT: %d%s, assoc. %d\n",
801
+			ERR("sctp notification from %s on %.*s:%d: "
802
+					"SCTP_PARTIAL_DELIVERY_EVENT not supported: %d %s,"
803
+					"assoc_id %d\n",
803 804
 					su2a(su, sizeof(*su)), si->name.len, si->name.s,
804 805
 					si->port_no, snp->sn_pdapi_event.pdapi_indication,
805 806
 					(snp->sn_pdapi_event.pdapi_indication==
... ...
@@ -848,7 +1902,7 @@ int sctp_rcv_loop()
848 848
 	ri.dst_port=bind_address->port_no;
849 849
 	ri.dst_ip=bind_address->address;
850 850
 	ri.proto=PROTO_SCTP;
851
-	ri.proto_reserved1=ri.proto_reserved2=0;
851
+	ri.proto_reserved2=0;
852 852
 	
853 853
 	iov[0].iov_base=buf;
854 854
 	iov[0].iov_len=BUF_SIZE;
... ...
@@ -865,10 +1919,9 @@ int sctp_rcv_loop()
865 865
 		msg.msg_namelen=sockaddru_len(bind_address->su);
866 866
 		msg.msg_control=cbuf;
867 867
 		msg.msg_controllen=sizeof(cbuf);
868
+		sinfo=0;
868 869
 
869 870
 		len=recvmsg(bind_address->socket, &msg, 0);
870
-		/* len=sctp_recvmsg(bind_address->socket, buf, BUF_SIZE, &ri.src_su.s,
871
-							&msg.msg_namelen, &sinfo, &msg.msg_flags); */
872 871
 		if (len==-1){
873 872
 			if (errno==EAGAIN){
874 873
 				DBG("sctp_rcv_loop: EAGAIN on sctp socket\n");
... ...
@@ -882,7 +1935,7 @@ int sctp_rcv_loop()
882 882
 			else goto error;
883 883
 		}
884 884
 		if (unlikely(msg.msg_flags & MSG_NOTIFICATION)){
885
-			/* intercept usefull notifications */
885
+			/* intercept useful notifications */
886 886
 			sctp_handle_notification(bind_address, &ri.src_su, buf, len);
887 887
 			continue;
888 888
 		}else if (unlikely(!(msg.msg_flags & MSG_EOR))){
... ...
@@ -904,8 +1957,8 @@ int sctp_rcv_loop()
904 904
 						) && (cmsg->cmsg_len>=CMSG_LEN(sizeof(*sinfo)))) ){
905 905
 				sinfo=(struct sctp_sndrcvinfo*)CMSG_DATA(cmsg);
906 906
 				DBG("sctp recv: message from %s:%d stream %d  ppid %x"
907
-						" flags %x%s tsn %u" " cumtsn %u associd %d\n",
908
-						ip_addr2a(&ri.src_ip), htons(ri.src_port),
907
+						" flags %x%s tsn %u" " cumtsn %u assoc_id %d\n",
908
+						ip_addr2a(&ri.src_ip), ri.src_port,
909 909
 						sinfo->sinfo_stream, sinfo->sinfo_ppid,
910 910
 						sinfo->sinfo_flags,
911 911
 						(sinfo->sinfo_flags&SCTP_UNORDERED)?
... ...
@@ -921,13 +1974,13 @@ int sctp_rcv_loop()
921 921
 		/* sanity checks */
922 922
 		if (len<MIN_SCTP_PACKET) {
923 923
 			tmp=ip_addr2a(&ri.src_ip);
924
-			DBG("sctp_rcv_loop: probing packet received from %s %d\n",
925
-					tmp, htons(ri.src_port));
924
+			DBG("sctp_rcv_loop: probing packet received from %s:%d\n",
925
+					tmp, ri.src_port);
926 926
 			continue;
927 927
 		}
928 928
 		if (ri.src_port==0){
929 929
 			tmp=ip_addr2a(&ri.src_ip);
930
-			LOG(L_INFO, "sctp_rcv_loop: dropping 0 port packet from %s\n",
930
+			LOG(L_INFO, "sctp_rcv_loop: dropping 0 port packet from %s:0\n",
931 931
 						tmp);
932 932
 			continue;
933 933
 		}
... ...
@@ -936,6 +1989,26 @@ int sctp_rcv_loop()
936 936
 #endif
937 937
 		/* update the local config */
938 938
 		cfg_update();
939
+#ifdef SCTP_CONN_REUSE
940
+		if (likely(sinfo)){
941
+			ri.proto_reserved1 = sctp_con_track(sinfo->sinfo_assoc_id,
942
+												ri.bind_address, 
943
+												&ri.src_su,
944
+												SCTP_CON_RCV_SEEN);
945
+			/* debugging */
946
+			if (unlikely(ri.proto_reserved1==0))
947
+				DBG("no tracked assoc. found for assoc_id %d, from %s\n",
948
+						sinfo->sinfo_assoc_id, 
949
+						su2a(&ri.src_su, sizeof(ri.src_su)));
950
+#if 0
951
+			ri.proto_reserved1=
952
+				sctp_con_get_id(sinfo->sinfo_assoc_id, ri.bind_address, 0);
953
+#endif
954
+		}else
955
+			ri.proto_reserved1=0;
956
+#else /* SCTP_CONN_REUSE */
957
+		ri.proto_received1=0;
958
+#endif /* SCTP_CONN_REUSE */
939 959
 		receive_msg(buf, len, &ri);
940 960
 	}
941 961
 error:
... ...
@@ -943,8 +2016,9 @@ error:
943 943
 }
944 944
 
945 945
 
946
-/* send buf:len over udp to dst using sndrcv_info (uses only the to and 
947
- * send_sock members from dst)
946
+
947
+/* send buf:len over sctp to dst using sndrcv_info (uses send_sock,
948
+ * to and id from dest_info)
948 949
  * returns the numbers of bytes sent on success (>=0) and -1 on error
949 950
  */
950 951
 static int sctp_msg_send_raw(struct dest_info* dst, char* buf, unsigned len,
... ...
@@ -955,6 +2029,7 @@ static int sctp_msg_send_raw(struct dest_info* dst, char* buf, unsigned len,
955 955
 	struct ip_addr ip; /* used only on error, for debugging */
956 956
 	struct msghdr msg;
957 957
 	struct iovec iov[1];
958
+	struct socket_info* si;
958 959
 	struct sctp_sndrcvinfo* sinfo;
959 960
 	struct cmsghdr* cmsg;
960 961
 	/* make sure msg_control will point to properly aligned data */
... ...
@@ -962,14 +2037,18 @@ static int sctp_msg_send_raw(struct dest_info* dst, char* buf, unsigned len,
962 962
 		struct cmsghdr cm;
963 963
 		char cbuf[CMSG_SPACE(sizeof(*sinfo))];
964 964
 	}ctrl_un;
965
+#ifdef SCTP_CONN_REUSE
966
+	int assoc_id;
967
+	union sockaddr_union to;
968
+#ifdef SCTP_ADDR_HASH
969
+	int tmp_id, tmp_assoc_id;
970
+#endif /* SCTP_ADDR_HASH */
971
+#endif /* SCTP_CONN_REUSE */
965 972
 	
966
-	tolen=sockaddru_len(dst->to);
967 973
 	iov[0].iov_base=buf;
968 974
 	iov[0].iov_len=len;
969 975
 	msg.msg_iov=iov;
970 976
 	msg.msg_iovlen=1;
971
-	msg.msg_name=&dst->to.s;
972
-	msg.msg_namelen=tolen;
973 977
 	msg.msg_flags=0; /* not used on send (use instead sinfo_flags) */
974 978
 	msg.msg_control=ctrl_un.cbuf;
975 979
 	msg.msg_controllen=sizeof(ctrl_un.cbuf);
... ...
@@ -982,19 +2061,101 @@ static int sctp_msg_send_raw(struct dest_info* dst, char* buf, unsigned len,
982 982
 	/* some systems need msg_controllen set to the actual size and not
983 983
 	 * something bigger (e.g. openbsd) */
984 984
 	msg.msg_controllen=cmsg->cmsg_len;
985
+	si=dst->send_sock;
986
+#ifdef SCTP_CONN_REUSE
987
+	/* if dst->id is set it means we want to send on association with