Browse code

- io_wait support for write - io_wait: added io_watch_chg(..) - updated tcp code to the io_wait api changes

Andrei Pelinescu-Onciul authored on 29/11/2007 21:01:45
Showing 5 changed files
... ...
@@ -3,26 +3,17 @@
3 3
  * 
4 4
  * Copyright (C) 2005 iptelorg GmbH
5 5
  *
6
- * This file is part of ser, a free SIP server.
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
7 9
  *
8
- * ser is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version
12
- *
13
- * For a license to use the ser software under conditions
14
- * other than those described here, or to purchase support for this
15
- * software, please contact iptel.org by e-mail at the following addresses:
16
- *    info@iptel.org
17
- *
18
- * ser is distributed in the hope that it will be useful,
19
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
- * GNU General Public License for more details.
22
- *
23
- * You should have received a copy of the GNU General Public License
24
- * along with this program; if not, write to the Free Software
25
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 17
  */
27 18
 /* 
28 19
  * tcp io wait common stuff used by tcp_main.c & tcp_read.c
... ...
@@ -289,7 +280,8 @@ static void destroy_devpoll(io_wait_h* h)
289 280
 #ifdef HAVE_SELECT
290 281
 static int init_select(io_wait_h* h)
291 282
 {
292
-	FD_ZERO(&h->master_set);
283
+	FD_ZERO(&h->master_rset);
284
+	FD_ZERO(&h->master_wset);
293 285
 	return 0;
294 286
 }
295 287
 #endif
... ...
@@ -3,33 +3,24 @@
3 3
  * 
4 4
  * Copyright (C) 2005 iptelorg GmbH
5 5
  *
6
- * This file is part of ser, a free SIP server.
6
+ * Permission to use, copy, modify, and distribute this software for any
7
+ * purpose with or without fee is hereby granted, provided that the above
8
+ * copyright notice and this permission notice appear in all copies.
7 9
  *
8
- * ser is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version
12
- *
13
- * For a license to use the ser software under conditions
14
- * other than those described here, or to purchase support for this
15
- * software, please contact iptel.org by e-mail at the following addresses:
16
- *    info@iptel.org
17
- *
18
- * ser is distributed in the hope that it will be useful,
19
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
- * GNU General Public License for more details.
22
- *
23
- * You should have received a copy of the GNU General Public License
24
- * along with this program; if not, write to the Free Software
25
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
10
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 17
  */
27 18
 /*
28 19
  * tcp io wait common stuff used by tcp_main.c & tcp_read.c
29 20
  * All the functions are inline because of speed reasons and because they are
30 21
  * used only from 2 places.
31 22
  * You also have to define:
32
- *     int handle_io(struct fd_map* fm, int idx) (see below)
23
+ *     int handle_io(struct fd_map* fm, short events, int idx) (see below)
33 24
  *     (this could be trivially replaced by a callback pointer entry attached
34 25
  *      to the io_wait handler if more flexibility rather then performance
35 26
  *      is needed)
... ...
@@ -51,6 +42,7 @@
51 42
  *  2006-05-30  sigio 64 bit workarround enabled for kernels < 2.6.5 (andrei)
52 43
  *  2007-11-22  when handle_io() is called in a loop check & stop if the fd was
53 44
  *               removed inside handle_io() (andrei)
45
+ *  2007-11-29  support for write (POLLOUT); added io_watch_chg() (andrei)
54 46
  */
55 47
 
56 48
 
... ...
@@ -116,12 +108,13 @@ struct fd_map{
116 108
 	int fd;               /* fd no */
117 109
 	fd_type type;         /* "data" type */
118 110
 	void* data;           /* pointer to the corresponding structure */
111
+	short events;         /* events we are interested int */
119 112
 };
120 113
 
121 114
 
122 115
 #ifdef HAVE_KQUEUE
123 116
 #ifndef KQ_CHANGES_ARRAY_SIZE
124
-#define KQ_CHANGES_ARRAY_SIZE 128
117
+#define KQ_CHANGES_ARRAY_SIZE 256
125 118
 
126 119
 #ifdef __OS_netbsd
127 120
 #define KEV_UDATA_CAST (intptr_t)
... ...
@@ -154,7 +147,8 @@ struct io_wait_handler{
154 147
 	int dpoll_fd;
155 148
 #endif
156 149
 #ifdef HAVE_SELECT
157
-	fd_set master_set;
150
+	fd_set master_rset; /* read set */
151
+	fd_set master_wset; /* write set */
158 152
 	int max_fd_select; /* maximum select used fd */
159 153
 #endif
160 154
 	/* common stuff for POLL, SIGIO_RT and SELECT
... ...
@@ -184,10 +178,12 @@ typedef struct io_wait_handler io_wait_h;
184 178
 /* add a fd_map structure to the fd hash */
185 179
 static inline struct fd_map* hash_fd_map(	io_wait_h* h,
186 180
 											int fd,
181
+											short events,
187 182
 											fd_type type,
188 183
 											void* data)
189 184
 {
190 185
 	h->fd_hash[fd].fd=fd;
186
+	h->fd_hash[fd].events=events;
191 187
 	h->fd_hash[fd].type=type;
192 188
 	h->fd_hash[fd].data=data;
193 189
 	return &h->fd_hash[fd];
... ...
@@ -199,8 +195,9 @@ static inline struct fd_map* hash_fd_map(	io_wait_h* h,
199 195
 /* generic handle io routine, this must be defined in the including file
200 196
  * (faster then registering a callback pointer)
201 197
  *
202
- * params:  fm  - pointer to a fd hash entry
203
- *          idx - index in the fd_array (or -1 if not known)
198
+ * params:  fm     - pointer to a fd hash entry
199
+ *          events - combinations of POLLIN, POLLOUT, POLLERR & POLLHUP
200
+ *          idx    - index in the fd_array (or -1 if not known)
204 201
  * return: -1 on error
205 202
  *          0 on EAGAIN or when by some other way it is known that no more 
206 203
  *            io events are queued on the fd (the receive buffer is empty).
... ...
@@ -209,9 +206,9 @@ static inline struct fd_map* hash_fd_map(	io_wait_h* h,
209 206
  *         >0 on successfull read from the fd (when there might be more io
210 207
  *            queued -- the receive buffer might still be non-empty)
211 208
  */
212
-inline static int handle_io(struct fd_map* fm, int idx);
209
+inline static int handle_io(struct fd_map* fm, short events, int idx);
213 210
 #else
214
-int handle_io(struct fd_map* fm, int idx);
211
+int handle_io(struct fd_map* fm, short events, int idx);
215 212
 #endif
216 213
 
217 214
 
... ...
@@ -220,6 +217,10 @@ int handle_io(struct fd_map* fm, int idx);
220 217
 /*
221 218
  * kqueue specific function: register a change
222 219
  * (adds a change to the kevent change array, and if full flushes it first)
220
+ *
221
+ * TODO: check if the event already exists in the change list or if it's
222
+ *       complementary to an event in the list (e.g. EVFILT_WRITE, EV_DELETE
223
+ *       and EVFILT_WRITE, EV_ADD for the same fd).
223 224
  * returns: -1 on error, 0 on success
224 225
  */
225 226
 static inline int kq_ev_change(io_wait_h* h, int fd, int filter, int flag, 
... ...
@@ -254,7 +255,15 @@ again:
254 255
 
255 256
 
256 257
 /* generic io_watch_add function
258
+ * Params:
259
+ *     h      - pointer to initialized io_wait handle
260
+ *     fd     - fd to watch
261
+ *     events - bitmap with the fd events for which the fd should be watched
262
+ *              (combination of POLLIN and POLLOUT)
263
+ *     type   - fd type (non 0 value, returned in the call to handle_io)
264
+ *     data   - pointer/private data returned in the handle_io call
257 265
  * returns 0 on success, -1 on error
266
+ *
258 267
  * WARNING: handle_io() can be called immediately (from io_watch_add()) so
259 268
  *  make sure that any dependent init. (e.g. data stuff) is made before
260 269
  *  calling io_watch_add
... ...
@@ -264,15 +273,16 @@ again:
264 273
  *  switch())*/
265 274
 inline static int io_watch_add(	io_wait_h* h,
266 275
 								int fd,
276
+								short events,
267 277
 								fd_type type,
268 278
 								void* data)
269 279
 {
270 280
 
271 281
 	/* helper macros */
272
-#define fd_array_setup \
282
+#define fd_array_setup(ev) \
273 283
 	do{ \
274 284
 		h->fd_array[h->fd_no].fd=fd; \
275
-		h->fd_array[h->fd_no].events=POLLIN; /* useless for select */ \
285
+		h->fd_array[h->fd_no].events=(ev); /* useless for select */ \
276 286
 		h->fd_array[h->fd_no].revents=0;     /* useless for select */ \
277 287
 	}while(0)
278 288
 	
... ...
@@ -311,12 +321,17 @@ inline static int io_watch_add(	io_wait_h* h,
311 321
 	idx=-1;
312 322
 #endif
313 323
 	e=0;
314
-	if (fd==-1){
324
+	/* sanity checks */
325
+	if (unlikely(fd==-1)){
315 326
 		LOG(L_CRIT, "BUG: io_watch_add: fd is -1!\n");
316 327
 		goto error;
317 328
 	}
329
+	if (unlikely((events&(POLLIN|POLLOUT))==0)){
330
+		LOG(L_CRIT, "BUG: io_watch_add: invalid events: 0x%0x\n", events);
331
+		goto error;
332
+	}
318 333
 	/* check if not too big */
319
-	if (h->fd_no>=h->max_fd_no){
334
+	if (unlikely(h->fd_no>=h->max_fd_no)){
320 335
 		LOG(L_CRIT, "ERROR: io_watch_add: maximum fd number exceeded:"
321 336
 				" %d/%d\n", h->fd_no, h->max_fd_no);
322 337
 		goto error;
... ...
@@ -325,35 +340,38 @@ inline static int io_watch_add(	io_wait_h* h,
325 340
 			h, fd, type, data, h->fd_no);
326 341
 	/*  hash sanity check */
327 342
 	e=get_fd_map(h, fd);
328
-	if (e && (e->type!=0 /*F_NONE*/)){
343
+	if (unlikely(e && (e->type!=0 /*F_NONE*/))){
329 344
 		LOG(L_ERR, "ERROR: io_watch_add: trying to overwrite entry %d"
330 345
 				" in the hash(%d, %d, %p) with (%d, %d, %p)\n",
331 346
 				fd, e->fd, e->type, e->data, fd, type, data);
332 347
 		goto error;
333 348
 	}
334 349
 	
335
-	if ((e=hash_fd_map(h, fd, type, data))==0){
350
+	if (unlikely((e=hash_fd_map(h, fd, events, type, data))==0)){
336 351
 		LOG(L_ERR, "ERROR: io_watch_add: failed to hash the fd %d\n", fd);
337 352
 		goto error;
338 353
 	}
339 354
 	switch(h->poll_method){ /* faster then pointer to functions */
340 355
 		case POLL_POLL:
341
-			fd_array_setup;
356
+			fd_array_setup(events);
342 357
 			set_fd_flags(O_NONBLOCK);
343 358
 			break;
344 359
 #ifdef HAVE_SELECT
345 360
 		case POLL_SELECT:
346
-			fd_array_setup;
347
-			FD_SET(fd, &h->master_set);
361
+			fd_array_setup(events);
362
+			if (likely(events & POLLIN))
363
+				FD_SET(fd, &h->master_rset);
364
+			if (unlikely(events & POLLOUT))
365
+				FD_SET(fd, &h->master_wset);
348 366
 			if (h->max_fd_select<fd) h->max_fd_select=fd;
349 367
 			break;
350 368
 #endif
351 369
 #ifdef HAVE_SIGIO_RT
352 370
 		case POLL_SIGIO_RT:
353
-			fd_array_setup;
371
+			fd_array_setup(events);
354 372
 			/* re-set O_ASYNC might be needed, if not done from 
355 373
 			 * io_watch_del (or if somebody wants to add a fd which has
356
-			 * already O_ASYNC/F_SETSIG set on a dupplicate)
374
+			 * already O_ASYNC/F_SETSIG set on a duplicate)
357 375
 			 */
358 376
 			/* set async & signal */
359 377
 			if (fcntl(fd, F_SETOWN, my_pid())==-1){
... ...
@@ -384,11 +402,12 @@ inline static int io_watch_add(	io_wait_h* h,
384 402
 #endif
385 403
 #ifdef HAVE_EPOLL
386 404
 		case POLL_EPOLL_LT:
387
-			ep_event.events=EPOLLIN;
405
+			ep_event.events=(EPOLLIN & ((int)!(events & POLLIN)-1) ) |
406
+							 (EPOLLOUT & ((int)!(events & POLLOUT)-1) );
388 407
 			ep_event.data.ptr=e;
389 408
 again1:
390 409
 			n=epoll_ctl(h->epfd, EPOLL_CTL_ADD, fd, &ep_event);
391
-			if (n==-1){
410
+			if (unlikely(n==-1)){
392 411
 				if (errno==EAGAIN) goto again1;
393 412
 				LOG(L_ERR, "ERROR: io_watch_add: epoll_ctl failed: %s [%d]\n",
394 413
 					strerror(errno), errno);
... ...
@@ -397,11 +416,13 @@ again1:
397 416
 			break;
398 417
 		case POLL_EPOLL_ET:
399 418
 			set_fd_flags(O_NONBLOCK);
400
-			ep_event.events=EPOLLIN|EPOLLET;
419
+			ep_event.events=(EPOLLIN & ((int)!(events & POLLIN)-1) )  |
420
+							 (EPOLLOUT & ((int)!(events & POLLOUT)-1) ) |
421
+							  EPOLLET;
401 422
 			ep_event.data.ptr=e;
402 423
 again2:
403 424
 			n=epoll_ctl(h->epfd, EPOLL_CTL_ADD, fd, &ep_event);
404
-			if (n==-1){
425
+			if (unlikely(n==-1)){
405 426
 				if (errno==EAGAIN) goto again2;
406 427
 				LOG(L_ERR, "ERROR: io_watch_add: epoll_ctl failed: %s [%d]\n",
407 428
 					strerror(errno), errno);
... ...
@@ -413,14 +434,20 @@ again2:
413 434
 #endif
414 435
 #ifdef HAVE_KQUEUE
415 436
 		case POLL_KQUEUE:
416
-			if (kq_ev_change(h, fd, EVFILT_READ, EV_ADD, e)==-1)
437
+			if (likely( events & POLLINT)){
438
+				if (unlikely(kq_ev_change(h, fd, EVFILT_READ, EV_ADD, e)==-1))
439
+				goto error;
440
+			}
441
+			if (unlikely( events & POLLOUT)){
442
+				if (unlikely(kq_ev_change(h, fd, EVFILT_WRITE, EV_ADD, e)==-1))
417 443
 				goto error;
444
+			}
418 445
 			break;
419 446
 #endif
420 447
 #ifdef HAVE_DEVPOLL
421 448
 		case POLL_DEVPOLL:
422 449
 			pfd.fd=fd;
423
-			pfd.events=POLLIN;
450
+			pfd.events=events;
424 451
 			pfd.revents=0;
425 452
 again_devpoll:
426 453
 			if (write(h->dpoll_fd, &pfd, sizeof(pfd))==-1){
... ...
@@ -445,10 +472,12 @@ again_devpoll:
445 472
 	if (check_io){
446 473
 		/* handle possible pre-existing events */
447 474
 		pf.fd=fd;
448
-		pf.events=POLLIN;
475
+		pf.events=events;
449 476
 check_io_again:
450
-		while(e->type && ((n=poll(&pf, 1, 0))>0) && (handle_io(e, idx)>0));
451
-		if (e->type && (n==-1)){
477
+		while(e->type && ((n=poll(&pf, 1, 0))>0) && 
478
+				(handle_io(e, pf.revents, idx)>0) &&
479
+				(pf.revents & e->events));
480
+		if (unlikely(e->type && (n==-1))){
452 481
 			if (errno==EINTR) goto check_io_again;
453 482
 			LOG(L_ERR, "ERROR: io_watch_add: check_io poll: %s [%d]\n",
454 483
 						strerror(errno), errno);
... ...
@@ -482,18 +511,19 @@ inline static int io_watch_del(io_wait_h* h, int fd, int idx, int flags)
482 511
 	
483 512
 #define fix_fd_array \
484 513
 	do{\
485
-			if (idx==-1){ \
514
+			if (unlikely(idx==-1)){ \
486 515
 				/* fix idx if -1 and needed */ \
487 516
 				for (idx=0; (idx<h->fd_no) && \
488 517
 							(h->fd_array[idx].fd!=fd); idx++); \
489 518
 			} \
490
-			if (idx<h->fd_no){ \
519
+			if (likely(idx<h->fd_no)){ \
491 520
 				memmove(&h->fd_array[idx], &h->fd_array[idx+1], \
492 521
 					(h->fd_no-(idx+1))*sizeof(*(h->fd_array))); \
493 522
 			} \
494 523
 	}while(0)
495 524
 	
496 525
 	struct fd_map* e;
526
+	int events;
497 527
 #ifdef HAVE_EPOLL
498 528
 	int n;
499 529
 	struct epoll_event ep_event;
... ...
@@ -505,7 +535,7 @@ inline static int io_watch_del(io_wait_h* h, int fd, int idx, int flags)
505 535
 	int fd_flags;
506 536
 #endif
507 537
 	
508
-	if ((fd<0) || (fd>=h->max_fd_no)){
538
+	if (unlikely((fd<0) || (fd>=h->max_fd_no))){
509 539
 		LOG(L_CRIT, "BUG: io_watch_del: invalid fd %d, not in [0, %d) \n",
510 540
 						fd, h->fd_no);
511 541
 		goto error;
... ...
@@ -514,18 +544,18 @@ inline static int io_watch_del(io_wait_h* h, int fd, int idx, int flags)
514 544
 			h, fd, idx, flags, h->fd_no);
515 545
 	e=get_fd_map(h, fd);
516 546
 	/* more sanity checks */
517
-	if (e==0){
547
+	if (unlikely(e==0)){
518 548
 		LOG(L_CRIT, "BUG: io_watch_del: no corresponding hash entry for %d\n",
519 549
 					fd);
520 550
 		goto error;
521 551
 	}
522
-	if (e->type==0 /*F_NONE*/){
552
+	if (unlikely(e->type==0 /*F_NONE*/)){
523 553
 		LOG(L_ERR, "ERROR: io_watch_del: trying to delete already erased"
524 554
 				" entry %d in the hash(%d, %d, %p) )\n",
525 555
 				fd, e->fd, e->type, e->data);
526 556
 		goto error;
527 557
 	}
528
-	
558
+	events=e->events;
529 559
 	unhash_fd_map(e);
530 560
 	
531 561
 	switch(h->poll_method){
... ...
@@ -534,11 +564,14 @@ inline static int io_watch_del(io_wait_h* h, int fd, int idx, int flags)
534 564
 			break;
535 565
 #ifdef HAVE_SELECT
536 566
 		case POLL_SELECT:
537
-			fix_fd_array;
538
-			FD_CLR(fd, &h->master_set);
539
-			if (h->max_fd_select && (h->max_fd_select==fd))
567
+			if (likely(events & POLLIN))
568
+				FD_CLR(fd, &h->master_rset);
569
+			if (unlikely(events & POLLOUT))
570
+				FD_CLR(fd, &h->master_wset);
571
+			if (unlikely(h->max_fd_select && (h->max_fd_select==fd)))
540 572
 				/* we don't know the prev. max, so we just decrement it */
541 573
 				h->max_fd_select--; 
574
+			fix_fd_array;
542 575
 			break;
543 576
 #endif
544 577
 #ifdef HAVE_SIGIO_RT
... ...
@@ -553,12 +586,12 @@ inline static int io_watch_del(io_wait_h* h, int fd, int idx, int flags)
553 586
 			/*if (!(flags & IO_FD_CLOSING)){*/
554 587
 				/* reset ASYNC */
555 588
 				fd_flags=fcntl(fd, F_GETFL); 
556
-				if (fd_flags==-1){ 
589
+				if (unlikely(fd_flags==-1)){ 
557 590
 					LOG(L_ERR, "ERROR: io_watch_del: fnctl: GETFL failed:" 
558 591
 							" %s [%d]\n", strerror(errno), errno); 
559 592
 					goto error; 
560 593
 				} 
561
-				if (fcntl(fd, F_SETFL, fd_flags&(~O_ASYNC))==-1){ 
594
+				if (unlikely(fcntl(fd, F_SETFL, fd_flags&(~O_ASYNC))==-1)){ 
562 595
 					LOG(L_ERR, "ERROR: io_watch_del: fnctl: SETFL" 
563 596
 								" failed: %s [%d]\n", strerror(errno), errno); 
564 597
 					goto error; 
... ...
@@ -569,15 +602,17 @@ inline static int io_watch_del(io_wait_h* h, int fd, int idx, int flags)
569 602
 		case POLL_EPOLL_LT:
570 603
 		case POLL_EPOLL_ET:
571 604
 			/* epoll doesn't seem to automatically remove sockets,
572
-			 * if the socket is a dupplicate/moved and the original
605
+			 * if the socket is a duplicate/moved and the original
573 606
 			 * is still open. The fd is removed from the epoll set
574 607
 			 * only when the original (and all the  copies?) is/are 
575 608
 			 * closed. This is probably a bug in epoll. --andrei */
576 609
 #ifdef EPOLL_NO_CLOSE_BUG
577 610
 			if (!(flags & IO_FD_CLOSING)){
578 611
 #endif
612
+again_epoll:
579 613
 				n=epoll_ctl(h->epfd, EPOLL_CTL_DEL, fd, &ep_event);
580
-				if (n==-1){
614
+				if (unlikely(n==-1)){
615
+					if (errno==EAGAIN) goto again_epoll;
581 616
 					LOG(L_ERR, "ERROR: io_watch_del: removing fd from epoll "
582 617
 							"list failed: %s [%d]\n", strerror(errno), errno);
583 618
 					goto error;
... ...
@@ -590,8 +625,21 @@ inline static int io_watch_del(io_wait_h* h, int fd, int idx, int flags)
590 625
 #ifdef HAVE_KQUEUE
591 626
 		case POLL_KQUEUE:
592 627
 			if (!(flags & IO_FD_CLOSING)){
593
-				if (kq_ev_change(h, fd, EVFILT_READ, EV_DELETE, 0)==-1)
594
-					goto error;
628
+				if (likely(events & POLLIN)){
629
+					if (unlikely(kq_ev_change(h, fd, EVFILT_READ,
630
+													EV_DELETE, 0) ==-1)){
631
+						/* try to delete the write filter anyway */
632
+						if (events & POLLOUT){
633
+							kq_ev_change(h, fd, EVFILT_WRITE, EV_DELETE, 0);
634
+						}
635
+						goto error;
636
+					}
637
+				}
638
+				if (unlikely(events & POLLOUT)){
639
+					if (unlikely(kq_ev_change(h, fd, EVFILT_WRITE,
640
+													EV_DELETE, 0) ==-1))
641
+						goto error;
642
+				}
595 643
 			}
596 644
 			break;
597 645
 #endif
... ...
@@ -627,6 +675,180 @@ error:
627 675
 
628 676
 
629 677
 
678
+/* parameters:    h - handler 
679
+ *               fd - file descriptor
680
+ *           events - new events to watch for
681
+ *              idx - index in the fd_array if known, -1 if not
682
+ *                    (if index==-1 fd_array will be searched for the
683
+ *                     corresponding fd* entry -- slower but unavoidable in 
684
+ *                     some cases). index is not used (no fd_array) for epoll,
685
+ *                     /dev/poll and kqueue
686
+ * returns 0 if ok, -1 on error */
687
+inline static int io_watch_chg(io_wait_h* h, int fd, short events, int idx )
688
+{
689
+	
690
+#define fd_array_chg(ev) \
691
+	do{\
692
+			if (unlikely(idx==-1)){ \
693
+				/* fix idx if -1 and needed */ \
694
+				for (idx=0; (idx<h->fd_no) && \
695
+							(h->fd_array[idx].fd!=fd); idx++); \
696
+			} \
697
+			if (likely(idx<h->fd_no)){ \
698
+				h->fd_array[idx].events=(ev); \
699
+			} \
700
+	}while(0)
701
+	
702
+	struct fd_map* e;
703
+	int add_events;
704
+	int del_events;
705
+#ifdef HAVE_EPOLL
706
+	int n;
707
+	struct epoll_event ep_event;
708
+#endif
709
+#ifdef HAVE_DEVPOLL
710
+	struct pollfd pfd;
711
+#endif
712
+	
713
+	if (unlikely((fd<0) || (fd>=h->max_fd_no))){
714
+		LOG(L_CRIT, "BUG: io_watch_chg: invalid fd %d, not in [0, %d) \n",
715
+						fd, h->fd_no);
716
+		goto error;
717
+	}
718
+	if (unlikely((events&(POLLIN|POLLOUT))==0)){
719
+		LOG(L_CRIT, "BUG: io_watch_chg: invalid events: 0x%0x\n", events);
720
+		goto error;
721
+	}
722
+	DBG("DBG: io_watch_chg (%p, %d, 0x%x, 0x%x) fd_no=%d called\n",
723
+			h, fd, events, idx, h->fd_no);
724
+	e=get_fd_map(h, fd);
725
+	/* more sanity checks */
726
+	if (unlikely(e==0)){
727
+		LOG(L_CRIT, "BUG: io_watch_chg: no corresponding hash entry for %d\n",
728
+					fd);
729
+		goto error;
730
+	}
731
+	if (unlikely(e->type==0 /*F_NONE*/)){
732
+		LOG(L_ERR, "ERROR: io_watch_chg: trying to change an already erased"
733
+				" entry %d in the hash(%d, %d, %p) )\n",
734
+				fd, e->fd, e->type, e->data);
735
+		goto error;
736
+	}
737
+	
738
+	add_events=events & ~e->events;
739
+	del_events=e->events & ~events;
740
+	e->events=events;
741
+	switch(h->poll_method){
742
+		case POLL_POLL:
743
+			fd_array_chg(events);
744
+			break;
745
+#ifdef HAVE_SELECT
746
+		case POLL_SELECT:
747
+			fd_array_chg(events);
748
+			if (unlikely(del_events & POLLIN))
749
+				FD_CLR(fd, &h->master_rset);
750
+			else if (unlikely(add_events & POLLIN))
751
+				FD_SET(fd, &h->master_rset);
752
+			if (likely(del_events & POLLOUT))
753
+				FD_CLR(fd, &h->master_wset);
754
+			else if (likely(add_events & POLLOUT))
755
+				FD_SET(fd, &h->master_wset);
756
+			break;
757
+#endif
758
+#ifdef HAVE_SIGIO_RT
759
+		case POLL_SIGIO_RT:
760
+			fd_array_chg(events);
761
+			break;
762
+#endif
763
+#ifdef HAVE_EPOLL
764
+		case POLL_EPOLL_LT:
765
+				ep_event.events=(EPOLLIN & ((int)!(events & POLLIN)-1) ) |
766
+								 (EPOLLOUT & ((int)!(events & POLLOUT)-1) );
767
+				ep_event.data.ptr=e;
768
+again_epoll_lt:
769
+				n=epoll_ctl(h->epfd, EPOLL_CTL_MOD, fd, &ep_event);
770
+				if (unlikely(n==-1)){
771
+					if (errno==EAGAIN) goto again_epoll_lt;
772
+					LOG(L_ERR, "ERROR: io_watch_chg: modifying epoll events"
773
+							" failed: %s [%d]\n", strerror(errno), errno);
774
+					goto error;
775
+				}
776
+			break;
777
+		case POLL_EPOLL_ET:
778
+				ep_event.events=(EPOLLIN & ((int)!(events & POLLIN)-1) ) |
779
+								 (EPOLLOUT & ((int)!(events & POLLOUT)-1) ) |
780
+								 EPOLLET;
781
+				ep_event.data.ptr=e;
782
+again_epoll_et:
783
+				n=epoll_ctl(h->epfd, EPOLL_CTL_MOD, fd, &ep_event);
784
+				if (unlikely(n==-1)){
785
+					if (errno==EAGAIN) goto again_epoll_et;
786
+					LOG(L_ERR, "ERROR: io_watch_chg: modifying epoll events"
787
+							" failed: %s [%d]\n", strerror(errno), errno);
788
+					goto error;
789
+				}
790
+			break;
791
+#endif
792
+#ifdef HAVE_KQUEUE
793
+		case POLL_KQUEUE:
794
+			if (unlikely(del_events & POLLIN)){
795
+				if (unlikely(kq_ev_change(h, fd, EVFILT_READ,
796
+														EV_DELETE, 0) ==-1))
797
+						goto error;
798
+			}else if (unlikely(add_events & POLLIN)){
799
+				if (unlikely(kq_ev_change(h, fd, EVFILT_READ, EV_ADD, e) ==-1))
800
+					goto error;
801
+			}
802
+			if (likely(del_events & POLLOUT)){
803
+				if (unlikely(kq_ev_change(h, fd, EVFILT_WRITE,
804
+														EV_DELETE, 0) ==-1))
805
+						goto error;
806
+			}else if (likely(add_events & POLLOUT)){
807
+				if (unlikely(kq_ev_change(h, fd, EVFILT_WRITE, EV_ADD, e)==-1))
808
+					goto error;
809
+			}
810
+			break;
811
+#endif
812
+#ifdef HAVE_DEVPOLL
813
+		case POLL_DEVPOLL:
814
+				/* for /dev/poll the closed fds _must_ be removed
815
+				   (they are not removed automatically on close()) */
816
+				pfd.fd=fd;
817
+				pfd.events=POLLREMOVE;
818
+				pfd.revents=0;
819
+again_devpoll1:
820
+				if (unlikely(write(h->dpoll_fd, &pfd, sizeof(pfd))==-1)){
821
+					if (errno==EINTR) goto again_devpoll1;
822
+					LOG(L_ERR, "ERROR: io_watch_chg: removing fd from "
823
+								"/dev/poll failed: %s [%d]\n", 
824
+								strerror(errno), errno);
825
+					goto error;
826
+				}
827
+again_devpoll2:
828
+				pfd.events=events;
829
+				pfd.revents=0;
830
+				if (unlikely(write(h->dpoll_fd, &pfd, sizeof(pfd))==-1)){
831
+					if (errno==EINTR) goto again_devpoll2;
832
+					LOG(L_ERR, "ERROR: io_watch_chg: re-adding fd to "
833
+								"/dev/poll failed: %s [%d]\n", 
834
+								strerror(errno), errno);
835
+					goto error;
836
+				}
837
+				break;
838
+#endif
839
+		default:
840
+			LOG(L_CRIT, "BUG: io_watch_chg: no support for poll method "
841
+					" %s (%d)\n", poll_method_str[h->poll_method], 
842
+					h->poll_method);
843
+			goto error;
844
+	}
845
+	h->fd_no--;
846
+	return 0;
847
+error:
848
+	return -1;
849
+#undef fix_fd_array
850
+}
851
+
630 852
 /* io_wait_loop_x style function 
631 853
  * wait for io using poll()
632 854
  * params: h      - io_wait handle
... ...
@@ -650,11 +872,12 @@ again:
650 872
 			}
651 873
 		}
652 874
 		for (r=0; (r<h->fd_no) && n; r++){
653
-			if (h->fd_array[r].revents & (POLLIN|POLLERR|POLLHUP)){
875
+			fm=get_fd_map(h, h->fd_array[r].fd);
876
+			if (h->fd_array[r].revents & (fm->events|POLLERR|POLLHUP)){
654 877
 				n--;
655 878
 				/* sanity checks */
656
-				if ((h->fd_array[r].fd >= h->max_fd_no)||
657
-						(h->fd_array[r].fd < 0)){
879
+				if (unlikely((h->fd_array[r].fd >= h->max_fd_no)||
880
+								(h->fd_array[r].fd < 0))){
658 881
 					LOG(L_CRIT, "BUG: io_wait_loop_poll: bad fd %d "
659 882
 							"(no in the 0 - %d range)\n",
660 883
 							h->fd_array[r].fd, h->max_fd_no);
... ...
@@ -662,8 +885,13 @@ again:
662 885
 					h->fd_array[r].events=0; /* clear the events */
663 886
 					continue;
664 887
 				}
665
-				fm=get_fd_map(h, h->fd_array[r].fd);
666
-				while(fm->type && (handle_io(fm, r) > 0) && repeat);
888
+				/* repeat handle_io if repeat, fd still watched (not deleted
889
+				 *  inside handle_io), handle_io returns that there's still
890
+				 *  IO and the fd is still watched for the triggering event */
891
+				while(fm->type && 
892
+						(handle_io(fm, h->fd_array[r].revents, r) > 0) &&
893
+						repeat &&
894
+						(fm->events & h->fd_array[r].revents) );
667 895
 			}
668 896
 		}
669 897
 error:
... ...
@@ -676,17 +904,20 @@ error:
676 904
 /* wait for io using select */
677 905
 inline static int io_wait_loop_select(io_wait_h* h, int t, int repeat)
678 906
 {
679
-	fd_set sel_set;
907
+	fd_set sel_rset;
908
+	fd_set sel_wset;
680 909
 	int n, ret;
681 910
 	struct timeval timeout;
682 911
 	int r;
683 912
 	struct fd_map* fm;
913
+	int revents;
684 914
 	
685 915
 again:
686
-		sel_set=h->master_set;
916
+		sel_rset=h->master_rset;
917
+		sel_wset=h->master_wset;
687 918
 		timeout.tv_sec=t;
688 919
 		timeout.tv_usec=0;
689
-		ret=n=select(h->max_fd_select+1, &sel_set, 0, 0, &timeout);
920
+		ret=n=select(h->max_fd_select+1, &sel_rset, &sel_wset, 0, &timeout);
690 921
 		if (n<0){
691 922
 			if (errno==EINTR) goto again; /* just a signal */
692 923
 			LOG(L_ERR, "ERROR: io_wait_loop_select: select: %s [%d]\n",
... ...
@@ -696,9 +927,15 @@ again:
696 927
 		}
697 928
 		/* use poll fd array */
698 929
 		for(r=0; (r<h->max_fd_no) && n; r++){
699
-			if (FD_ISSET(h->fd_array[r].fd, &sel_set)){
930
+			revents=0;
931
+			if (likely(FD_ISSET(h->fd_array[r].fd, &sel_rset)))
932
+				revents|=POLLIN;
933
+			if (unlikely(FD_ISSET(h->fd_array[r].fd, &sel_wset)))
934
+				revents|=POLLOUT;
935
+			if (likely(revents)){
700 936
 				fm=get_fd_map(h, h->fd_array[r].fd);
701
-				while(fm->type && (handle_io(fm, r)>0) && repeat);
937
+				while(fm->type && (fm->events & revents) && 
938
+						(handle_io(fm, revents, r)>0) && repeat);
702 939
 				n--;
703 940
 			}
704 941
 		};
... ...
@@ -713,10 +950,11 @@ inline static int io_wait_loop_epoll(io_wait_h* h, int t, int repeat)
713 950
 {
714 951
 	int n, r;
715 952
 	struct fd_map* fm;
953
+	int revents;
716 954
 	
717 955
 again:
718 956
 		n=epoll_wait(h->epfd, h->ep_array, h->fd_no, t*1000);
719
-		if (n==-1){
957
+		if (unlikely(n==-1)){
720 958
 			if (errno==EINTR) goto again; /* signal, ignore it */
721 959
 			else{
722 960
 				LOG(L_ERR, "ERROR:io_wait_loop_epoll: "
... ...
@@ -735,9 +973,14 @@ again:
735 973
 		}
736 974
 #endif
737 975
 		for (r=0; r<n; r++){
738
-			if (h->ep_array[r].events & (EPOLLIN|EPOLLERR|EPOLLHUP)){
976
+			revents= (POLLIN & (!(h->ep_array[r].events & EPOLLIN)-1)) |
977
+					 (POLLOUT & (!(h->ep_array[r].events & EPOLLOUT)-1)) |
978
+					 (POLLERR & (!(h->ep_array[r].events & EPOLLERR)-1)) |
979
+					 (POLLHUP & (!(h->ep_array[r].events & EPOLLHUP)-1));
980
+			if (likely(revents)){
739 981
 				fm=(struct fd_map*)h->ep_array[r].data.ptr;
740
-				while(fm->type && (handle_io(fm,-1)>0) && repeat);
982
+				while(fm->type && (fm->events & revents) && 
983
+						(handle_io(fm, revents, -1)>0) && repeat);
741 984
 			}else{
742 985
 				LOG(L_ERR, "ERROR:io_wait_loop_epoll: unexpected event %x"
743 986
 							" on %d/%d, data=%p\n", h->ep_array[r].events,
... ...
@@ -763,7 +1006,7 @@ inline static int io_wait_loop_kqueue(io_wait_h* h, int t, int repeat)
763 1006
 again:
764 1007
 		n=kevent(h->kq_fd, h->kq_changes, h->kq_nchanges,  h->kq_array,
765 1008
 					h->fd_no, &tspec);
766
-		if (n==-1){
1009
+		if (unlikely(n==-1)){
767 1010
 			if (errno==EINTR) goto again; /* signal, ignore it */
768 1011
 			else{
769 1012
 				LOG(L_ERR, "ERROR: io_wait_loop_kqueue: kevent:"
... ...
@@ -778,18 +1021,30 @@ again:
778 1021
 					r, n, h->kq_array[r].ident, (long)h->kq_array[r].udata,
779 1022
 					h->kq_array[r].flags);
780 1023
 #endif
781
-			if (h->kq_array[r].flags & EV_ERROR){
1024
+#if 0
1025
+			if (unlikely(h->kq_array[r].flags & EV_ERROR)){
782 1026
 				/* error in changes: we ignore it, it can be caused by
783 1027
 				   trying to remove an already closed fd: race between
784
-				   adding smething to the changes array, close() and
1028
+				   adding something to the changes array, close() and
785 1029
 				   applying the changes */
786 1030
 				LOG(L_INFO, "INFO: io_wait_loop_kqueue: kevent error on "
787 1031
 							"fd %d: %s [%ld]\n", h->kq_array[r].ident,
788 1032
 							strerror(h->kq_array[r].data),
789 1033
 							(long)h->kq_array[r].data);
790
-			}else{ /* READ/EOF */
1034
+			}else{ 
1035
+#endif
791 1036
 				fm=(struct fd_map*)h->kq_array[r].udata;
792
-				while(fm->type && (handle_io(fm, -1)>0) && repeat);
1037
+				if (likely(h->kq_array[r].filter==EVFILT_READ)){
1038
+					revents=POLLIN | 
1039
+						(((int)!(h->kq_array[r].flags & EV_EOF)-1)&POLLHUP);
1040
+					while(fm->type && (fm->events & revents) && 
1041
+							(handle_io(fm, revents, -1)>0) && repeat);
1042
+				}else if (h->kq_array[r].filter==EVFILT_WRITE){
1043
+					revents=POLLOUT | 
1044
+						(((int)!(h->kq_array[r].flags & EV_EOF)-1)&POLLHUP);
1045
+					while(fm->type && (fm->events & revents) && 
1046
+							(handle_io(fm, revents, -1)>0) && repeat);
1047
+				}
793 1048
 			}
794 1049
 		}
795 1050
 error:
... ...
@@ -810,12 +1065,14 @@ inline static int io_wait_loop_sigio_rt(io_wait_h* h, int t)
810 1065
 	int sigio_band;
811 1066
 	int sigio_fd;
812 1067
 	struct fd_map* fm;
1068
+	int revents;
813 1069
 	
814 1070
 	
815 1071
 	ret=1; /* 1 event per call normally */
816 1072
 	ts.tv_sec=t;
817 1073
 	ts.tv_nsec=0;
818
-	if (!sigismember(&h->sset, h->signo) || !sigismember(&h->sset, SIGIO)){
1074
+	if (unlikely(!sigismember(&h->sset, h->signo) ||
1075
+					!sigismember(&h->sset, SIGIO))) {
819 1076
 		LOG(L_CRIT, "BUG: io_wait_loop_sigio_rt: the signal mask"
820 1077
 				" is not properly set!\n");
821 1078
 		goto error;
... ...
@@ -823,7 +1080,7 @@ inline static int io_wait_loop_sigio_rt(io_wait_h* h, int t)
823 1080
 
824 1081
 again:
825 1082
 	n=sigtimedwait(&h->sset, &siginfo, &ts);
826
-	if (n==-1){
1083
+	if (unlikely(n==-1)){
827 1084
 		if (errno==EINTR) goto again; /* some other signal, ignore it */
828 1085
 		else if (errno==EAGAIN){ /* timeout */
829 1086
 			ret=0;
... ...
@@ -834,7 +1091,7 @@ again:
834 1091
 			goto error;
835 1092
 		}
836 1093
 	}
837
-	if (n!=SIGIO){
1094
+	if (likely(n!=SIGIO)){
838 1095
 #ifdef SIGINFO64_WORKARROUND
839 1096
 		/* on linux siginfo.si_band is defined as long in userspace
840 1097
 		 * and as int in kernel (< 2.6.5) => on 64 bits things will break!
... ...
@@ -853,7 +1110,7 @@ again:
853 1110
 			sigio_band=siginfo.si_band;
854 1111
 			sigio_fd=siginfo.si_fd;
855 1112
 		}
856
-		if (siginfo.si_code==SI_SIGIO){
1113
+		if (unlikely(siginfo.si_code==SI_SIGIO)){
857 1114
 			/* old style, we don't know the event (linux 2.2.?) */
858 1115
 			LOG(L_WARN, "WARNING: io_wait_loop_sigio_rt: old style sigio"
859 1116
 					" interface\n");
... ...
@@ -861,7 +1118,7 @@ again:
861 1118
 			/* we can have queued signals generated by fds not watched
862 1119
 			 * any more, or by fds in transition, to a child => ignore them*/
863 1120
 			if (fm->type)
864
-				handle_io(fm, -1);
1121
+				handle_io(fm, POLLIN|POLLOUT, -1);
865 1122
 		}else{
866 1123
 #ifdef EXTRA_DEBUG
867 1124
 			DBG("io_wait_loop_sigio_rt: siginfo: signal=%d (%d),"
... ...
@@ -874,13 +1131,17 @@ again:
874 1131
 			/* on some errors (e.g. when receving TCP RST), sigio_band will
875 1132
 			 * be set to 0x08 (undocumented, no corresp. POLL_xx), so better
876 1133
 			 * catch all events --andrei */
877
-			if (sigio_band/*&(POLL_IN|POLL_ERR|POLL_HUP)*/){
1134
+			if (likely(sigio_band)/*&(POLL_IN|POLL_ERR|POLL_HUP)*/){
878 1135
 				fm=get_fd_map(h, sigio_fd);
1136
+				revents=(POLLIN & (!(sigio_band & POLL_IN)-1)) |
1137
+						(POLLOUT & (!(sigio_band & POLL_OUT)-1)) |
1138
+						(POLLERR & (!(sigio_band & POLL_ERR)-1)) |
1139
+						(POLLHUP & (!(sigio_band & POLL_HUP)-1));
879 1140
 				/* we can have queued signals generated by fds not watched
880 1141
 			 	 * any more, or by fds in transition, to a child 
881 1142
 				 * => ignore them */
882
-				if (fm->type)
883
-					handle_io(fm, -1);
1143
+				if (fm->type && (fm->events & revents))
1144
+					handle_io(fm, revents, -1);
884 1145
 				else
885 1146
 					LOG(L_ERR, "WARNING: io_wait_loop_sigio_rt: ignoring event"
886 1147
 							" %x on fd %d (fm->fd=%d, fm->data=%p)\n",
... ...
@@ -930,7 +1191,7 @@ inline static int io_wait_loop_devpoll(io_wait_h* h, int t, int repeat)
930 1191
 		dpoll.dp_fds=h->fd_array;
931 1192
 again:
932 1193
 		ret=n=ioctl(h->dpoll_fd, DP_POLL, &dpoll);
933
-		if (n==-1){
1194
+		if (unlikely(n==-1)){
934 1195
 			if (errno==EINTR) goto again; /* signal, ignore it */
935 1196
 			else{
936 1197
 				LOG(L_ERR, "ERROR:io_wait_loop_devpoll: ioctl: %s [%d]\n",
... ...
@@ -946,7 +1207,8 @@ again:
946 1207
 			}
947 1208
 			/* POLLIN|POLLHUP just go through */
948 1209
 			fm=get_fd_map(h, h->fd_array[r].fd);
949
-			while(fm->type && (handle_io(fm, r) > 0) && repeat);
1210
+			while(fm->type && (fm->events & h->fd_array[r].revents) &&
1211
+					(handle_io(fm, h->fd_array[r].revents, r) > 0) && repeat);
950 1212
 		}
951 1213
 error:
952 1214
 	return ret;
... ...
@@ -1730,7 +1730,8 @@ inline static int handle_tcp_child(struct tcp_child* tcp_c, int fd_i)
1730 1730
 			/* must be after the de-ref*/
1731 1731
 			tcpconn->flags&=~(F_CONN_REMOVED|F_CONN_READER);
1732 1732
 			if (unlikely(
1733
-					io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn)<0)){
1733
+					io_watch_add(&io_h, tcpconn->s, POLLIN,
1734
+												F_TCPCONN, tcpconn)<0)){
1734 1735
 				LOG(L_CRIT, "ERROR: tcp_main: handle_tcp_child: failed to add"
1735 1736
 						" new socket to the fd list\n");
1736 1737
 				tcpconn->flags|=F_CONN_REMOVED;
... ...
@@ -1879,7 +1880,8 @@ inline static int handle_ser_child(struct process_table* p, int fd_i)
1879 1880
 								tcp_con_lifetime, t);
1880 1881
 			tcpconn->flags&=~F_CONN_REMOVED;
1881 1882
 			if (unlikely(
1882
-					io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn)<0)){
1883
+					io_watch_add(&io_h, tcpconn->s, POLLIN,
1884
+												F_TCPCONN, tcpconn)<0)){
1883 1885
 				LOG(L_CRIT, "ERROR: tcp_main: handle_ser_child: failed to add"
1884 1886
 						" new socket to the fd list\n");
1885 1887
 				tcpconn->flags|=F_CONN_REMOVED;
... ...
@@ -2036,7 +2038,8 @@ static inline int handle_new_connect(struct socket_info* si)
2036 2038
 		local_timer_add(&tcp_main_ltimer, &tcpconn->timer, 
2037 2039
 								tcp_con_lifetime, get_ticks_raw());
2038 2040
 		tcpconn->flags&=~F_CONN_REMOVED;
2039
-		if (unlikely(io_watch_add(&io_h, tcpconn->s, F_TCPCONN, tcpconn)<0)){
2041
+		if (unlikely(io_watch_add(&io_h, tcpconn->s, POLLIN, 
2042
+													F_TCPCONN, tcpconn)<0)){
2040 2043
 			LOG(L_CRIT, "ERROR: tcp_main: handle_new_connect: failed to add"
2041 2044
 						" new socket to the fd list\n");
2042 2045
 			tcpconn->flags|=F_CONN_REMOVED;
... ...
@@ -2128,7 +2131,7 @@ error:
2128 2131
  *         >0 on successfull read from the fd (when there might be more io
2129 2132
  *            queued -- the receive buffer might still be non-empty)
2130 2133
  */
2131
-inline static int handle_io(struct fd_map* fm, int idx)
2134
+inline static int handle_io(struct fd_map* fm, short events, int idx)
2132 2135
 {	
2133 2136
 	int ret;
2134 2137
 	
... ...
@@ -2310,7 +2313,7 @@ void tcp_main_loop()
2310 2313
 	/* add all the sockets we listen on for connections */
2311 2314
 	for (si=tcp_listen; si; si=si->next){
2312 2315
 		if ((si->proto==PROTO_TCP) &&(si->socket!=-1)){
2313
-			if (io_watch_add(&io_h, si->socket, F_SOCKINFO, si)<0){
2316
+			if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
2314 2317
 				LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
2315 2318
 							"listen socket to the fd list\n");
2316 2319
 				goto error;
... ...
@@ -2323,7 +2326,7 @@ void tcp_main_loop()
2323 2326
 	if (!tls_disable && tls_loaded()){
2324 2327
 		for (si=tls_listen; si; si=si->next){
2325 2328
 			if ((si->proto==PROTO_TLS) && (si->socket!=-1)){
2326
-				if (io_watch_add(&io_h, si->socket, F_SOCKINFO, si)<0){
2329
+				if (io_watch_add(&io_h, si->socket, POLLIN, F_SOCKINFO, si)<0){
2327 2330
 					LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
2328 2331
 							"tls listen socket to the fd list\n");
2329 2332
 					goto error;
... ...
@@ -2339,7 +2342,7 @@ void tcp_main_loop()
2339 2342
 	 *  (get fd, new connection a.s.o) */
2340 2343
 	for (r=1; r<process_no; r++){
2341 2344
 		if (pt[r].unix_sock>0) /* we can't have 0, we never close it!*/
2342
-			if (io_watch_add(&io_h, pt[r].unix_sock, F_PROC, &pt[r])<0){
2345
+			if (io_watch_add(&io_h, pt[r].unix_sock, POLLIN,F_PROC, &pt[r])<0){
2343 2346
 					LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
2344 2347
 							"process %d unix socket to the fd list\n", r);
2345 2348
 					goto error;
... ...
@@ -2348,8 +2351,8 @@ void tcp_main_loop()
2348 2351
 	/* add all the unix sokets used for communication with the tcp childs */
2349 2352
 	for (r=0; r<tcp_children_no; r++){
2350 2353
 		if (tcp_children[r].unix_sock>0)/*we can't have 0, we never close it!*/
2351
-			if (io_watch_add(&io_h, tcp_children[r].unix_sock, F_TCPCHILD,
2352
-							&tcp_children[r]) <0){
2354
+			if (io_watch_add(&io_h, tcp_children[r].unix_sock, POLLIN,
2355
+									F_TCPCHILD, &tcp_children[r]) <0){
2353 2356
 				LOG(L_CRIT, "ERROR: tcp_main_loop: init: failed to add "
2354 2357
 						"tcp child %d unix socket to the fd list\n", r);
2355 2358
 				goto error;
... ...
@@ -56,6 +56,7 @@ void init_tcp_options()
56 56
 	if (tcp_options.option){\
57 57
 		WARN("tcp_options: tcp_" ##option \
58 58
 				"cannot be enabled (recompile needed)\n"); \
59
+		tcp_options.option=0; \
59 60
 	}
60 61
 
61 62
 
... ...
@@ -64,6 +65,7 @@ void init_tcp_options()
64 65
 	if (tcp_options.option){\
65 66
 		WARN("tcp_options: tcp_" ##option \
66 67
 				"cannot be enabled (no OS support)\n"); \
68
+		tcp_options.option=0; \
67 69
 	}
68 70
 
69 71
 
... ...
@@ -718,7 +718,7 @@ static ticks_t tcpconn_read_timeout(ticks_t t, struct timer_ln* tl, void* data)
718 718
  *         >0 on successfull read from the fd (when there might be more io
719 719
  *            queued -- the receive buffer might still be non-empty)
720 720
  */
721
-inline static int handle_io(struct fd_map* fm, int idx)
721
+inline static int handle_io(struct fd_map* fm, short events, int idx)
722 722
 {	
723 723
 	int ret;
724 724
 	int n;
... ...
@@ -778,7 +778,7 @@ again:
778 778
 			timer_reinit(&con->timer);
779 779
 			local_timer_add(&tcp_reader_ltimer, &con->timer,
780 780
 								S_TO_TICKS(TCP_CHILD_TIMEOUT), t);
781
-			if (unlikely(io_watch_add(&io_w, s, F_TCPCONN, con))<0){
781
+			if (unlikely(io_watch_add(&io_w, s, POLLIN, F_TCPCONN, con))<0){
782 782
 				LOG(L_CRIT, "ERROR: tcp_receive: handle_io: failed to add"
783 783
 						" new socket to the fd list\n");
784 784
 				tcpconn_listrm(tcp_conn_lst, con, c_next, c_prev);
... ...
@@ -845,7 +845,7 @@ void tcp_receive_loop(int unix_sock)
845 845
 	if (init_local_timer(&tcp_reader_ltimer, get_ticks_raw())!=0)
846 846
 		goto error;
847 847
 	/* add the unix socket */
848
-	if (io_watch_add(&io_w, tcpmain_sock, F_TCPMAIN, 0)<0){
848
+	if (io_watch_add(&io_w, tcpmain_sock, POLLIN,  F_TCPMAIN, 0)<0){
849 849
 		LOG(L_CRIT, "ERROR: tcp_receive_loop: init: failed to add socket "
850 850
 							" to the fd list\n");
851 851
 		goto error;