Browse code

- io_wait.h: when calling handle_io() in a loop (e.g. io_watch_add() & SIGIO_RT or EPOLL_ET or io_wait_loop_* and repeat mode) always check & stop if the fd was removed inside the handle_io() call

- tcp_main.c: always add the connection & clear the coresponding flags before
io_watch_add-ing its fd, to avoid the possibility of the handle_* being
called without fully init. parameters

Andrei Pelinescu-Onciul authored on 22/11/2007 19:32:35
Showing 2 changed files
... ...
@@ -49,6 +49,8 @@
49 49
  *  2005-06-26  added kqueue (andrei)
50 50
  *  2005-07-01  added /dev/poll (andrei)
51 51
  *  2006-05-30  sigio 64 bit workarround enabled for kernels < 2.6.5 (andrei)
52
+ *  2007-11-22  when handle_io() is called in a loop check & stop if the fd was
53
+ *               removed inside handle_io() (andrei)
52 54
  */
53 55
 
54 56
 
... ...
@@ -253,6 +255,9 @@ again:
253 255
 
254 256
 /* generic io_watch_add function
255 257
  * returns 0 on success, -1 on error
258
+ * WARNING: handle_io() can be called immediately (from io_watch_add()) so
259
+ *  make sure that any dependent init. (e.g. data stuff) is made before
260
+ *  calling io_watch_add
256 261
  *
257 262
  * this version should be faster than pointers to poll_method specific
258 263
  * functions (it avoids functions calls, the overhead being only an extra
... ...
@@ -442,8 +447,8 @@ again_devpoll:
442 447
 		pf.fd=fd;
443 448
 		pf.events=POLLIN;
444 449
 check_io_again:
445
-		while( ((n=poll(&pf, 1, 0))>0) && (handle_io(e, idx)>0));
446
-		if (n==-1){
450
+		while(e->type && ((n=poll(&pf, 1, 0))>0) && (handle_io(e, idx)>0));
451
+		if (e->type && (n==-1)){
447 452
 			if (errno==EINTR) goto check_io_again;
448 453
 			LOG(L_ERR, "ERROR: io_watch_add: check_io poll: %s [%d]\n",
449 454
 						strerror(errno), errno);
... ...
@@ -633,6 +638,7 @@ inline static int io_wait_loop_poll(io_wait_h* h, int t, int repeat)
633 638
 {
634 639
 	int n, r;
635 640
 	int ret;
641
+	struct fd_map* fm;
636 642
 again:
637 643
 		ret=n=poll(h->fd_array, h->fd_no, t*1000);
638 644
 		if (n==-1){
... ...
@@ -656,8 +662,8 @@ again:
656 662
 					h->fd_array[r].events=0; /* clear the events */
657 663
 					continue;
658 664
 				}
659
-				while((handle_io(get_fd_map(h, h->fd_array[r].fd), r) > 0)
660
-						 && repeat);
665
+				fm=get_fd_map(h, h->fd_array[r].fd);
666
+				while(fm->type && (handle_io(fm, r) > 0) && repeat);
661 667
 			}
662 668
 		}
663 669
 error:
... ...
@@ -674,6 +680,7 @@ inline static int io_wait_loop_select(io_wait_h* h, int t, int repeat)
674 680
 	int n, ret;
675 681
 	struct timeval timeout;
676 682
 	int r;
683
+	struct fd_map* fm;
677 684
 	
678 685
 again:
679 686
 		sel_set=h->master_set;
... ...
@@ -690,8 +697,8 @@ again:
690 697
 		/* use poll fd array */
691 698
 		for(r=0; (r<h->max_fd_no) && n; r++){
692 699
 			if (FD_ISSET(h->fd_array[r].fd, &sel_set)){
693
-				while((handle_io(get_fd_map(h, h->fd_array[r].fd), r)>0)
694
-						&& repeat);
700
+				fm=get_fd_map(h, h->fd_array[r].fd);
701
+				while(fm->type && (handle_io(fm, r)>0) && repeat);
695 702
 				n--;
696 703
 			}
697 704
 		};
... ...
@@ -705,6 +712,7 @@ again:
705 712
 inline static int io_wait_loop_epoll(io_wait_h* h, int t, int repeat)
706 713
 {
707 714
 	int n, r;
715
+	struct fd_map* fm;
708 716
 	
709 717
 again:
710 718
 		n=epoll_wait(h->epfd, h->ep_array, h->fd_no, t*1000);
... ...
@@ -728,8 +736,8 @@ again:
728 736
 #endif
729 737
 		for (r=0; r<n; r++){
730 738
 			if (h->ep_array[r].events & (EPOLLIN|EPOLLERR|EPOLLHUP)){
731
-				while((handle_io((struct fd_map*)h->ep_array[r].data.ptr,-1)>0)
732
-					&& repeat);
739
+				fm=(struct fd_map*)h->ep_array[r].data.ptr;
740
+				while(fm->type && (handle_io(fm,-1)>0) && repeat);
733 741
 			}else{
734 742
 				LOG(L_ERR, "ERROR:io_wait_loop_epoll: unexpected event %x"
735 743
 							" on %d/%d, data=%p\n", h->ep_array[r].events,
... ...
@@ -748,6 +756,7 @@ inline static int io_wait_loop_kqueue(io_wait_h* h, int t, int repeat)
748 756
 {
749 757
 	int n, r;
750 758
 	struct timespec tspec;
759
+	struct fd_map* fm;
751 760
 	
752 761
 	tspec.tv_sec=t;
753 762
 	tspec.tv_nsec=0;
... ...
@@ -778,9 +787,10 @@ again:
778 787
 							"fd %d: %s [%ld]\n", h->kq_array[r].ident,
779 788
 							strerror(h->kq_array[r].data),
780 789
 							(long)h->kq_array[r].data);
781
-			}else /* READ/EOF */
782
-				while((handle_io((struct fd_map*)h->kq_array[r].udata, -1)>0)
783
-						&& repeat);
790
+			}else{ /* READ/EOF */
791
+				fm=(struct fd_map*)h->kq_array[r].udata;
792
+				while(fm->type && (handle_io(fm, -1)>0) && repeat);
793
+			}
784 794
 		}
785 795
 error:
786 796
 	return n;
... ...
@@ -913,6 +923,7 @@ inline static int io_wait_loop_devpoll(io_wait_h* h, int t, int repeat)
913 923
 	int n, r;
914 924
 	int ret;
915 925
 	struct dvpoll dpoll;
926
+	struct fd_map* fm;
916 927
 
917 928
 		dpoll.dp_timeout=t*1000;
918 929
 		dpoll.dp_nfds=h->fd_no;
... ...
@@ -934,8 +945,8 @@ again:
934 945
 							h->fd_array[r].fd, h->fd_array[r].revents);
935 946
 			}
936 947
 			/* POLLIN|POLLHUP just go through */
937
-			while((handle_io(get_fd_map(h, h->fd_array[r].fd), r) > 0) &&
938
-						repeat);
948
+			fm=get_fd_map(h, h->fd_array[r].fd);
949
+			while(fm->type && (handle_io(fm, r) > 0) && repeat);
939 950
 		}
940 951
 error:
941 952
 	return ret;
... ...
@@ -81,6 +81,8 @@
81 81
  *  2007-08-27   split init_sock_opt into a lightweight init_sock_opt_accept() 
82 82
  *               used when accepting connections and init_sock_opt used for 
83 83
  *               connect/ new sockets (andrei)
84
+ *  2007-11-22  always add the connection & clear the coresponding flags before
85
+ *               io_watch_add-ing its fd - it's safer this way (andrei)
84 86
  */
85 87
 
86 88
 
... ...
@@ -1449,8 +1451,8 @@ inline static int handle_tcp_child(struct tcp_child* tcp_c, int fd_i)
1449 1451
 			tcpconn->timeout=get_ticks_raw()+tcp_con_lifetime;
1450 1452
 			tcpconn_put(tcpconn);
1451 1453
 			/* must be after the de-ref*/
1452
-			io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn);
1453 1454
 			tcpconn->flags&=~F_CONN_REMOVED;
1455
+			io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn);
1454 1456
 			DBG("handle_tcp_child: CONN_RELEASE  %p refcnt= %d\n", 
1455 1457
 							tcpconn, atomic_get(&tcpconn->refcnt));
1456 1458
 			break;
... ...
@@ -1587,8 +1589,8 @@ inline static int handle_ser_child(struct process_table* p, int fd_i)
1587 1589
 			tcpconn_add(tcpconn);
1588 1590
 			/* update the timeout*/
1589 1591
 			tcpconn->timeout=get_ticks_raw()+tcp_con_lifetime;
1590
-			io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn);
1591 1592
 			tcpconn->flags&=~F_CONN_REMOVED;
1593
+			io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn);
1592 1594
 			break;
1593 1595
 		default:
1594 1596
 			LOG(L_CRIT, "BUG: handle_ser_child: unknown cmd %d\n", cmd);
... ...
@@ -1735,9 +1737,9 @@ static inline int handle_new_connect(struct socket_info* si)
1735 1737
 	tcpconn=tcpconn_new(new_sock, &su, dst_su, si, si->proto, S_CONN_ACCEPT);
1736 1738
 	if (tcpconn){
1737 1739
 #ifdef TCP_PASS_NEW_CONNECTION_ON_DATA
1738
-		io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn);
1739
-		tcpconn->flags&=~F_CONN_REMOVED;
1740 1740
 		tcpconn_add(tcpconn);
1741
+		tcpconn->flags&=~F_CONN_REMOVED;
1742
+		io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn);
1741 1743
 #else
1742 1744
 		atomic_set(&tcpconn->refcnt, 1); /* safe, not yet available to the
1743 1745
 											outside world */