commit - abb1abeb772c0876655c066d396f92ee46292c62
commit + 7f44a2ad1c8e6c28b6d7f384a3af9da12e22fcab
blob - d40b4850ce7d2794b8daaf37df261b49e8fc55bc
blob + 34066b2b3958928910e588cf6ba879b99caeb722
--- src/ngircd/io.c
+++ src/ngircd/io.c
#include "portab.h"
-static char UNUSED id[] = "$Id: io.c,v 1.28 2008/01/02 10:29:51 fw Exp $";
+static char UNUSED id[] = "$Id: io.c,v 1.29 2008/03/27 15:47:21 fw Exp $";
#include <assert.h>
#include <stdlib.h>
static void io_docallback PARAMS((int fd, short what));
+#ifdef DEBUG_IO
+static void io_debug(const char *s, int fd, int what)
+{
+ Log(LOG_DEBUG, "%s: %d, %d\n", s, fd, what);
+}
+#else
+static inline void io_debug(const char UNUSED *s,int UNUSED a, int UNUSED b) {/*NOTHING*/}
+#endif
+
static io_event *
io_event_get(int fd)
{
#ifdef IO_USE_DEVPOLL
+static int
+io_dispatch_devpoll(struct timeval *tv)
+{
+ struct dvpoll dvp;
+ time_t sec = tv->tv_sec * 1000;
+ int i, total, ret, timeout = tv->tv_usec + sec;
+ short what;
+ struct pollfd p[100];
+
+ if (timeout < 0)
+ timeout = 1000;
+
+ total = 0;
+ do {
+ dvp.dp_timeout = timeout;
+ dvp.dp_nfds = 100;
+ dvp.dp_fds = p;
+ ret = ioctl(io_masterfd, DP_POLL, &dvp);
+ total += ret;
+ if (ret <= 0)
+ return total;
+ for (i=0; i < ret ; i++) {
+ what = 0;
+ if (p[i].revents & (POLLIN|POLLPRI))
+ what = IO_WANTREAD;
+
+ if (p[i].revents & POLLOUT)
+ what |= IO_WANTWRITE;
+
+ if (p[i].revents && !what) {
+ /* other flag is set, probably POLLERR */
+ what = IO_ERROR;
+ }
+ io_docallback(p[i].fd, what);
+ }
+ } while (ret == 100);
+
+ return total;
+}
+
+
+static bool
+io_event_change_devpoll(int fd, short what)
+{
+ struct pollfd p;
+
+ p.events = 0;
+
+ if (what & IO_WANTREAD)
+ p.events = POLLIN | POLLPRI;
+ if (what & IO_WANTWRITE)
+ p.events |= POLLOUT;
+
+ p.fd = fd;
+ return write(io_masterfd, &p, sizeof p) == (ssize_t)sizeof p;
+}
+
static void
+io_close_devpoll(int fd)
+{
+ struct pollfd p;
+ p.events = POLLREMOVE;
+ p.fd = fd;
+ write(io_masterfd, &p, sizeof p);
+}
+
+static void
io_library_init_devpoll(unsigned int eventsize)
{
io_masterfd = open("/dev/poll", O_RDWR);
Log(LOG_INFO, "IO subsystem: /dev/poll (initial maxfd %u, masterfd %d).",
eventsize, io_masterfd);
}
+#else
+static inline void io_close_devpoll(int UNUSED x) {/* NOTHING */}
+static inline void io_library_init_devpoll(unsigned int UNUSED ev) {/*NOTHING*/}
#endif
#ifdef IO_USE_POLL
+static int
+io_dispatch_poll(struct timeval *tv)
+{
+ time_t sec = tv->tv_sec * 1000;
+ int i, ret, timeout = tv->tv_usec + sec;
+ int fds_ready;
+ short what;
+ struct pollfd *p = array_start(&pollfds);
+
+ if (timeout < 0)
+ timeout = 1000;
+
+ ret = poll(p, poll_maxfd + 1, timeout);
+ if (ret <= 0)
+ return ret;
+
+ fds_ready = ret;
+ for (i=0; i <= poll_maxfd; i++) {
+ what = 0;
+ if (p[i].revents & (POLLIN|POLLPRI))
+ what = IO_WANTREAD;
+
+ if (p[i].revents & POLLOUT)
+ what |= IO_WANTWRITE;
+
+ if (p[i].revents && !what) {
+ /* other flag is set, probably POLLERR */
+ what = IO_ERROR;
+ }
+ if (what) {
+ fds_ready--;
+ io_docallback(i, what);
+ }
+ if (fds_ready <= 0)
+ break;
+ }
+
+ return ret;
+}
+
+static bool
+io_event_change_poll(int fd, short what)
+{
+ struct pollfd *p;
+ short events = 0;
+
+ if (what & IO_WANTREAD)
+ events = POLLIN | POLLPRI;
+ if (what & IO_WANTWRITE)
+ events |= POLLOUT;
+
+ p = array_alloc(&pollfds, sizeof *p, fd);
+ if (p) {
+ p->events = events;
+ p->fd = fd;
+ if (fd > poll_maxfd)
+ poll_maxfd = fd;
+ }
+ return p != NULL;
+}
+
static void
+io_close_poll(int fd)
+{
+ struct pollfd *p;
+ p = array_get(&pollfds, sizeof *p, fd);
+ if (!p) return;
+
+ p->fd = -1;
+ if (fd == poll_maxfd) {
+ while (poll_maxfd > 0) {
+ --poll_maxfd;
+ p = array_get(&pollfds, sizeof *p, poll_maxfd);
+ if (p && p->fd >= 0)
+ break;
+ }
+ }
+}
+
+static void
io_library_init_poll(unsigned int eventsize)
{
struct pollfd *p;
library_initialized = true;
}
}
+#else
+static inline void io_close_poll(int UNUSED x) {/* NOTHING */}
+static inline void io_library_init_poll(unsigned int UNUSED ev) {/*NOTHING*/}
#endif
#ifdef IO_USE_SELECT
+static int
+io_dispatch_select(struct timeval *tv)
+{
+ fd_set readers_tmp = readers;
+ fd_set writers_tmp = writers;
+ short what;
+ int ret, i;
+ int fds_ready;
+ ret = select(select_maxfd + 1, &readers_tmp, &writers_tmp, NULL, tv);
+ if (ret <= 0)
+ return ret;
+
+ fds_ready = ret;
+
+ for (i = 0; i <= select_maxfd; i++) {
+ what = 0;
+ if (FD_ISSET(i, &readers_tmp)) {
+ what = IO_WANTREAD;
+ fds_ready--;
+ }
+
+ if (FD_ISSET(i, &writers_tmp)) {
+ what |= IO_WANTWRITE;
+ fds_ready--;
+ }
+ if (what)
+ io_docallback(i, what);
+ if (fds_ready <= 0)
+ break;
+ }
+
+ return ret;
+}
+
static void
io_library_init_select(unsigned int eventsize)
{
+ if (library_initialized)
+ return;
Log(LOG_INFO, "IO subsystem: select (initial maxfd %u).",
eventsize);
FD_ZERO(&readers);
#endif /* FD_SETSIZE */
library_initialized = true;
}
+
+static void
+io_close_select(int fd)
+{
+ io_event *i;
+
+ if (io_masterfd >= 0) /* Are we using epoll()? */
+ return;
+
+ FD_CLR(fd, &writers);
+ FD_CLR(fd, &readers);
+
+ i = io_event_get(fd);
+ if (!i) return;
+
+ if (fd == select_maxfd) {
+ while (select_maxfd>0) {
+ --select_maxfd; /* find largest fd */
+ i = io_event_get(select_maxfd);
+ if (i && i->callback) break;
+ }
+ }
+}
+#else
+static inline void io_library_init_select(int UNUSED x) {/* NOTHING */}
+static inline void io_close_select(int UNUSED x) {/* NOTHING */}
#endif /* SELECT */
#ifdef IO_USE_EPOLL
+static bool
+io_event_change_epoll(int fd, short what, const int action)
+{
+ struct epoll_event ev = { 0, {0} };
+ ev.data.fd = fd;
+
+ if (what & IO_WANTREAD)
+ ev.events = EPOLLIN | EPOLLPRI;
+ if (what & IO_WANTWRITE)
+ ev.events |= EPOLLOUT;
+
+ return epoll_ctl(io_masterfd, action, fd, &ev) == 0;
+}
+
+static int
+io_dispatch_epoll(struct timeval *tv)
+{
+ time_t sec = tv->tv_sec * 1000;
+ int i, total = 0, ret, timeout = tv->tv_usec + sec;
+ struct epoll_event epoll_ev[100];
+ short type;
+
+ if (timeout < 0)
+ timeout = 1000;
+
+ do {
+ ret = epoll_wait(io_masterfd, epoll_ev, 100, timeout);
+ total += ret;
+ if (ret <= 0)
+ return total;
+
+ for (i = 0; i < ret; i++) {
+ type = 0;
+ if (epoll_ev[i].events & (EPOLLERR | EPOLLHUP))
+ type = IO_ERROR;
+
+ if (epoll_ev[i].events & (EPOLLIN | EPOLLPRI))
+ type |= IO_WANTREAD;
+
+ if (epoll_ev[i].events & EPOLLOUT)
+ type |= IO_WANTWRITE;
+
+ io_docallback(epoll_ev[i].data.fd, type);
+ }
+
+ timeout = 0;
+ } while (ret == 100);
+
+ return total;
+}
+
static void
io_library_init_epoll(unsigned int eventsize)
{
Log(LOG_INFO,
"IO subsystem: epoll (hint size %d, initial maxfd %u, masterfd %d).",
ecreate_hint, eventsize, io_masterfd);
+ return;
}
-}
+#ifdef IO_USE_SELECT
+ Log(LOG_INFO, "Can't initialize epoll() IO interface, falling back to select() ...");
#endif
+}
+#else
+static inline void io_library_init_epoll(unsigned int UNUSED ev) {/* NOTHING */}
+#endif /* IO_USE_EPOLL */
#ifdef IO_USE_KQUEUE
+static bool
+io_event_kqueue_commit_cache(void)
+{
+ struct kevent *events;
+ bool ret;
+ int len = (int) array_length(&io_evcache, sizeof (struct kevent));
+
+ if (!len) /* nothing to do */
+ return true;
+
+ assert(len>0);
+
+ if (len < 0) {
+ array_free(&io_evcache);
+ return false;
+ }
+
+ events = array_start(&io_evcache);
+
+ assert(events != NULL);
+
+ ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0;
+ if (ret)
+ array_trunc(&io_evcache);
+ return ret;
+}
+
+static bool
+io_event_change_kqueue(int fd, short what, const int action)
+{
+ struct kevent kev;
+ bool ret = true;
+
+ if (what & IO_WANTREAD) {
+ EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0);
+ ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
+ if (!ret)
+ ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0;
+ }
+
+ if (ret && (what & IO_WANTWRITE)) {
+ EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0);
+ ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
+ if (!ret)
+ ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0;
+ }
+
+ if (array_length(&io_evcache, sizeof kev) >= 100)
+ io_event_kqueue_commit_cache();
+ return ret;
+}
+
+static int
+io_dispatch_kqueue(struct timeval *tv)
+{
+ int i, total = 0, ret;
+ struct kevent kev[100];
+ struct kevent *newevents;
+ struct timespec ts;
+ int newevents_len;
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec * 1000;
+
+ do {
+ newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent));
+ newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL;
+ assert(newevents_len >= 0);
+
+ ret = kevent(io_masterfd, newevents, newevents_len, kev, 100, &ts);
+ if (newevents && ret != -1)
+ array_trunc(&io_evcache);
+
+ total += ret;
+ if (ret <= 0)
+ return total;
+
+ for (i = 0; i < ret; i++) {
+ io_debug("dispatch_kqueue: fd, kev.flags", (int)kev[i].ident, kev[i].flags);
+ if (kev[i].flags & (EV_EOF|EV_ERROR)) {
+ if (kev[i].flags & EV_ERROR)
+ Log(LOG_ERR, "kevent fd %d: EV_ERROR (%s)",
+ (int)kev[i].ident, strerror((int)kev[i].data));
+ io_docallback((int)kev[i].ident, IO_ERROR);
+ continue;
+ }
+
+ switch (kev[i].filter) {
+ case EVFILT_READ:
+ io_docallback((int)kev[i].ident, IO_WANTREAD);
+ break;
+ case EVFILT_WRITE:
+ io_docallback((int)kev[i].ident, IO_WANTWRITE);
+ break;
+ default:
+ LogDebug("Unknown kev.filter number %d for fd %d",
+ kev[i].filter, kev[i].ident);
+ /* Fall through */
+ case EV_ERROR:
+ io_docallback((int)kev[i].ident, IO_ERROR);
+ break;
+ }
+ }
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ } while (ret == 100);
+
+ return total;
+}
+
static void
io_library_init_kqueue(unsigned int eventsize)
{
if (io_masterfd >= 0)
library_initialized = true;
}
+#else
+static inline void io_library_init_kqueue(unsigned int UNUSED ev) {/* NOTHING */}
#endif
#endif /* IO_USE_SELECT */
if ((eventsize > 0) && !array_alloc(&io_events, sizeof(io_event), (size_t)eventsize))
eventsize = 0;
-#ifdef IO_USE_EPOLL
+
io_library_init_epoll(eventsize);
-#ifdef IO_USE_SELECT
- if (io_masterfd < 0)
- Log(LOG_INFO, "Can't initialize epoll() IO interface, falling back to select() ...");
-#endif
-#endif
-#ifdef IO_USE_KQUEUE
io_library_init_kqueue(eventsize);
-#endif
-#ifdef IO_USE_DEVPOLL
io_library_init_devpoll(eventsize);
-#endif
-#ifdef IO_USE_POLL
io_library_init_poll(eventsize);
-#endif
-#ifdef IO_USE_SELECT
- if (! library_initialized)
- io_library_init_select(eventsize);
-#endif
+ io_library_init_select(eventsize);
+
return library_initialized;
}
FD_ZERO(&readers);
FD_ZERO(&writers);
#endif
-#ifdef IO_USE_EPOLL
+#if defined(IO_USE_EPOLL) || defined(IO_USE_KQUEUE) || defined(IO_USE_DEVPOLL)
if (io_masterfd >= 0)
close(io_masterfd);
io_masterfd = -1;
#endif
#ifdef IO_USE_KQUEUE
- close(io_masterfd);
- io_masterfd = -1;
array_free(&io_evcache);
#endif
library_initialized = false;
}
-bool
-io_event_create(int fd, short what, void (*cbfunc) (int, short))
+static bool
+backend_create_ev(int fd, short what)
{
bool ret;
- io_event *i;
-
- assert(fd >= 0);
-#if defined(IO_USE_SELECT) && defined(FD_SETSIZE)
- if (fd >= FD_SETSIZE) {
- Log(LOG_ERR,
- "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)",
- fd, FD_SETSIZE);
- return false;
- }
-#endif
- i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd);
- if (!i) {
- Log(LOG_WARNING,
- "array_alloc failed: could not allocate space for %d io_event structures",
- fd);
- return false;
- }
-
- i->callback = cbfunc;
- i->what = 0;
#ifdef IO_USE_DEVPOLL
ret = io_event_change_devpoll(fd, what);
#endif
if (io_masterfd < 0)
ret = io_event_add(fd, what);
#endif
- if (ret) i->what = what;
return ret;
}
-#ifdef IO_USE_DEVPOLL
-static bool
-io_event_change_devpoll(int fd, short what)
+bool
+io_event_create(int fd, short what, void (*cbfunc) (int, short))
{
- struct pollfd p;
+ bool ret;
+ io_event *i;
- p.events = 0;
-
- if (what & IO_WANTREAD)
- p.events = POLLIN | POLLPRI;
- if (what & IO_WANTWRITE)
- p.events |= POLLOUT;
-
- p.fd = fd;
- return write(io_masterfd, &p, sizeof p) == (ssize_t)sizeof p;
-}
-#endif
-
-
-
-#ifdef IO_USE_POLL
-static bool
-io_event_change_poll(int fd, short what)
-{
- struct pollfd *p;
- short events = 0;
-
- if (what & IO_WANTREAD)
- events = POLLIN | POLLPRI;
- if (what & IO_WANTWRITE)
- events |= POLLOUT;
-
- p = array_alloc(&pollfds, sizeof *p, fd);
- if (p) {
- p->events = events;
- p->fd = fd;
- if (fd > poll_maxfd)
- poll_maxfd = fd;
+ assert(fd >= 0);
+#if defined(IO_USE_SELECT) && defined(FD_SETSIZE)
+ if (fd >= FD_SETSIZE) {
+ Log(LOG_ERR,
+ "fd %d exceeds FD_SETSIZE (%u) (select can't handle more file descriptors)",
+ fd, FD_SETSIZE);
+ return false;
}
- return p != NULL;
-}
#endif
-
-#ifdef IO_USE_EPOLL
-static bool
-io_event_change_epoll(int fd, short what, const int action)
-{
- struct epoll_event ev = { 0, {0} };
- ev.data.fd = fd;
-
- if (what & IO_WANTREAD)
- ev.events = EPOLLIN | EPOLLPRI;
- if (what & IO_WANTWRITE)
- ev.events |= EPOLLOUT;
-
- return epoll_ctl(io_masterfd, action, fd, &ev) == 0;
-}
-#endif
-
-#ifdef IO_USE_KQUEUE
-static bool
-io_event_kqueue_commit_cache(void)
-{
- struct kevent *events;
- bool ret;
- int len = (int) array_length(&io_evcache, sizeof (struct kevent));
-
- if (!len) /* nothing to do */
- return true;
-
- assert(len>0);
-
- if (len < 0) {
- array_free(&io_evcache);
+ i = (io_event *) array_alloc(&io_events, sizeof(io_event), (size_t) fd);
+ if (!i) {
+ Log(LOG_WARNING,
+ "array_alloc failed: could not allocate space for %d io_event structures",
+ fd);
return false;
}
- events = array_start(&io_evcache);
-
- assert(events != NULL);
-
- ret = kevent(io_masterfd, events, len, NULL, 0, NULL) == 0;
+ i->callback = cbfunc;
+ i->what = 0;
+ ret = backend_create_ev(fd, what);
if (ret)
- array_trunc(&io_evcache);
+ i->what = what;
return ret;
}
-static bool
-io_event_change_kqueue(int fd, short what, const int action)
-{
- struct kevent kev;
- bool ret = true;
-
- if (what & IO_WANTREAD) {
- EV_SET(&kev, fd, EVFILT_READ, action, 0, 0, 0);
- ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
- if (!ret)
- ret = kevent(io_masterfd, &kev,1, NULL, 0, NULL) == 0;
- }
-
- if (ret && (what & IO_WANTWRITE)) {
- EV_SET(&kev, fd, EVFILT_WRITE, action, 0, 0, 0);
- ret = array_catb(&io_evcache, (char*) &kev, sizeof (kev));
- if (!ret)
- ret = kevent(io_masterfd, &kev, 1, NULL, 0, NULL) == 0;
- }
-
- if (array_length(&io_evcache, sizeof kev) >= 100)
- io_event_kqueue_commit_cache();
- return ret;
-}
-#endif
-
-
bool
io_event_add(int fd, short what)
{
if ((i->what & what) == what) /* event type is already registered */
return true;
-#ifdef DEBUG_IO
- Log(LOG_DEBUG, "io_event_add(): fd %d, what %d.", fd, what);
-#endif
+
+ io_debug("io_event_add: fd, what", fd, what);
+
i->what |= what;
#ifdef IO_USE_EPOLL
if (io_masterfd >= 0)
return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
#endif
-
#ifdef IO_USE_KQUEUE
return io_event_change_kqueue(fd, what, EV_ADD | EV_ENABLE);
#endif
return true;
#endif
+ return false;
}
int flags = fcntl(fd, F_GETFL);
if (flags == -1)
return false;
-
#ifndef O_NONBLOCK
#define O_NONBLOCK O_NDELAY
#endif
}
-#ifdef IO_USE_DEVPOLL
-static void
-io_close_devpoll(int fd)
-{
- struct pollfd p;
- p.events = POLLREMOVE;
- p.fd = fd;
- write(io_masterfd, &p, sizeof p);
-}
-#else
-static inline void
-io_close_devpoll(int UNUSED x)
-{
- /* NOTHING */
-}
-#endif
-
-
-
-#ifdef IO_USE_POLL
-static void
-io_close_poll(int fd)
-{
- struct pollfd *p;
- p = array_get(&pollfds, sizeof *p, fd);
- if (!p) return;
-
- p->fd = -1;
- if (fd == poll_maxfd) {
- while (poll_maxfd > 0) {
- --poll_maxfd;
- p = array_get(&pollfds, sizeof *p, poll_maxfd);
- if (p && p->fd >= 0)
- break;
- }
- }
-}
-#else
-static inline void io_close_poll(int UNUSED x) { /* NOTHING */ }
-#endif
-
-
-#ifdef IO_USE_SELECT
-static void
-io_close_select(int fd)
-{
- io_event *i;
-
- if (io_masterfd >= 0) /* Are we using epoll()? */
- return;
-
- FD_CLR(fd, &writers);
- FD_CLR(fd, &readers);
-
- i = io_event_get(fd);
- if (!i) return;
-
- if (fd == select_maxfd) {
- while (select_maxfd>0) {
- --select_maxfd; /* find largest fd */
- i = io_event_get(select_maxfd);
- if (i && i->callback) break;
- }
- }
-}
-#else
-static inline void
-io_close_select(int UNUSED x)
-{
- /* NOTHING */
-}
-#endif
-
-
bool
io_close(int fd)
{
io_event_kqueue_commit_cache();
}
#endif
-
io_close_devpoll(fd);
io_close_poll(fd);
io_close_select(fd);
-
#ifdef IO_USE_EPOLL
io_event_change_epoll(fd, 0, EPOLL_CTL_DEL);
#endif
io_event_del(int fd, short what)
{
io_event *i = io_event_get(fd);
-#ifdef DEBUG_IO
- Log(LOG_DEBUG, "io_event_del(): trying to delete eventtype %d on fd %d", what, fd);
-#endif
+
+ io_debug("io_event_del: trying to delete eventtype; fd, what", fd, what);
if (!i) return false;
if (!(i->what & what)) /* event is already disabled */
return true;
i->what &= ~what;
-
#ifdef IO_USE_DEVPOLL
return io_event_change_devpoll(fd, i->what);
#endif
if (io_masterfd >= 0)
return io_event_change_epoll(fd, i->what, EPOLL_CTL_MOD);
#endif
-
#ifdef IO_USE_KQUEUE
return io_event_change_kqueue(fd, what, EV_DISABLE);
#endif
if (what & IO_WANTREAD)
FD_CLR(fd, &readers);
-
return true;
#endif
-}
-
-
-#ifdef IO_USE_SELECT
-static int
-io_dispatch_select(struct timeval *tv)
-{
- fd_set readers_tmp = readers;
- fd_set writers_tmp = writers;
- short what;
- int ret, i;
- int fds_ready;
- ret = select(select_maxfd + 1, &readers_tmp, &writers_tmp, NULL, tv);
- if (ret <= 0)
- return ret;
-
- fds_ready = ret;
-
- for (i = 0; i <= select_maxfd; i++) {
- what = 0;
- if (FD_ISSET(i, &readers_tmp)) {
- what = IO_WANTREAD;
- fds_ready--;
- }
-
- if (FD_ISSET(i, &writers_tmp)) {
- what |= IO_WANTWRITE;
- fds_ready--;
- }
- if (what)
- io_docallback(i, what);
- if (fds_ready <= 0)
- break;
- }
-
- return ret;
+ return false;
}
-#endif
-#ifdef IO_USE_DEVPOLL
-static int
-io_dispatch_devpoll(struct timeval *tv)
-{
- struct dvpoll dvp;
- time_t sec = tv->tv_sec * 1000;
- int i, total, ret, timeout = tv->tv_usec + sec;
- short what;
- struct pollfd p[100];
-
- if (timeout < 0)
- timeout = 1000;
-
- total = 0;
- do {
- dvp.dp_timeout = timeout;
- dvp.dp_nfds = 100;
- dvp.dp_fds = p;
- ret = ioctl(io_masterfd, DP_POLL, &dvp);
- total += ret;
- if (ret <= 0)
- return total;
- for (i=0; i < ret ; i++) {
- what = 0;
- if (p[i].revents & (POLLIN|POLLPRI))
- what = IO_WANTREAD;
-
- if (p[i].revents & POLLOUT)
- what |= IO_WANTWRITE;
-
- if (p[i].revents && !what) {
- /* other flag is set, probably POLLERR */
- what = IO_ERROR;
- }
- io_docallback(p[i].fd, what);
- }
- } while (ret == 100);
-
- return total;
-}
-#endif
-
-
-#ifdef IO_USE_POLL
-static int
-io_dispatch_poll(struct timeval *tv)
-{
- time_t sec = tv->tv_sec * 1000;
- int i, ret, timeout = tv->tv_usec + sec;
- int fds_ready;
- short what;
- struct pollfd *p = array_start(&pollfds);
-
- if (timeout < 0)
- timeout = 1000;
-
- ret = poll(p, poll_maxfd + 1, timeout);
- if (ret <= 0)
- return ret;
-
- fds_ready = ret;
- for (i=0; i <= poll_maxfd; i++) {
- what = 0;
- if (p[i].revents & (POLLIN|POLLPRI))
- what = IO_WANTREAD;
-
- if (p[i].revents & POLLOUT)
- what |= IO_WANTWRITE;
-
- if (p[i].revents && !what) {
- /* other flag is set, probably POLLERR */
- what = IO_ERROR;
- }
- if (what) {
- fds_ready--;
- io_docallback(i, what);
- }
- if (fds_ready <= 0)
- break;
- }
-
- return ret;
-}
-#endif
-
-
-#ifdef IO_USE_EPOLL
-static int
-io_dispatch_epoll(struct timeval *tv)
-{
- time_t sec = tv->tv_sec * 1000;
- int i, total = 0, ret, timeout = tv->tv_usec + sec;
- struct epoll_event epoll_ev[100];
- short type;
-
- if (timeout < 0)
- timeout = 1000;
-
- do {
- ret = epoll_wait(io_masterfd, epoll_ev, 100, timeout);
- total += ret;
- if (ret <= 0)
- return total;
-
- for (i = 0; i < ret; i++) {
- type = 0;
- if (epoll_ev[i].events & (EPOLLERR | EPOLLHUP))
- type = IO_ERROR;
-
- if (epoll_ev[i].events & (EPOLLIN | EPOLLPRI))
- type |= IO_WANTREAD;
-
- if (epoll_ev[i].events & EPOLLOUT)
- type |= IO_WANTWRITE;
-
- io_docallback(epoll_ev[i].data.fd, type);
- }
-
- timeout = 0;
- } while (ret == 100);
-
- return total;
-}
-#endif
-
-
-#ifdef IO_USE_KQUEUE
-static int
-io_dispatch_kqueue(struct timeval *tv)
-{
- int i, total = 0, ret;
- struct kevent kev[100];
- struct kevent *newevents;
- struct timespec ts;
- int newevents_len;
- ts.tv_sec = tv->tv_sec;
- ts.tv_nsec = tv->tv_usec * 1000;
-
- do {
- newevents_len = (int) array_length(&io_evcache, sizeof (struct kevent));
- newevents = (newevents_len > 0) ? array_start(&io_evcache) : NULL;
- assert(newevents_len >= 0);
-
- ret = kevent(io_masterfd, newevents, newevents_len, kev, 100, &ts);
- if (newevents && ret != -1)
- array_trunc(&io_evcache);
-
- total += ret;
- if (ret <= 0)
- return total;
-
- for (i = 0; i < ret; i++) {
-#ifdef DEBUG_IO
- LogDebug("fd %d, kev.flags: %x", (int)kev[i].ident, kev[i].flags);
-#endif
- if (kev[i].flags & (EV_EOF|EV_ERROR)) {
- if (kev[i].flags & EV_ERROR)
- Log(LOG_ERR, "kevent fd %d: EV_ERROR (%s)",
- (int)kev[i].ident, strerror((int)kev[i].data));
- io_docallback((int)kev[i].ident, IO_ERROR);
- continue;
- }
-
- switch (kev[i].filter) {
- case EVFILT_READ:
- io_docallback((int)kev[i].ident, IO_WANTREAD);
- break;
- case EVFILT_WRITE:
- io_docallback((int)kev[i].ident, IO_WANTWRITE);
- break;
- default:
- LogDebug("Unknown kev.filter number %d for fd %d",
- kev[i].filter, kev[i].ident);
- /* Fall through */
- case EV_ERROR:
- io_docallback((int)kev[i].ident, IO_ERROR);
- break;
- }
- }
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- } while (ret == 100);
-
- return total;
-}
-#endif
-
-
int
io_dispatch(struct timeval *tv)
{
#ifdef IO_USE_POLL
return io_dispatch_poll(tv);
#endif
+ return -1;
}
static void
io_docallback(int fd, short what)
{
- io_event *i;
-#ifdef DEBUG_IO
- Log(LOG_DEBUG, "doing callback for fd %d, what %d", fd, what);
-#endif
- i = io_event_get(fd);
+ io_event *i = io_event_get(fd);
+ io_debug("io_docallback; fd, what", fd, what);
+
if (i->callback) { /* callback might be NULL if a previous callback function
called io_close on this fd */
i->callback(fd, (what & IO_ERROR) ? i->what : what);