-
-bool event_loop(void) {
- event_init();
- running = true;
-
-#ifndef HAVE_WINDOWS
-
-#ifdef HAVE_SELECT
- fd_set readable;
- fd_set writable;
-#endif
-
- while(running) {
- struct timeval diff;
- struct timeval *tv = timeout_execute(&diff);
-
-#ifdef HAVE_SELECT
- memcpy(&readable, &readfds, sizeof(readable));
- memcpy(&writable, &writefds, sizeof(writable));
-#endif
-
-#ifdef HAVE_EPOLL
- struct epoll_event events[MAX_EVENTS_PER_LOOP];
- long timeout = (tv->tv_sec * 1000) + (tv->tv_usec / 1000);
-
- if(timeout > INT_MAX) {
- timeout = INT_MAX;
- }
-
- int n = epoll_wait(event_fd, events, MAX_EVENTS_PER_LOOP, (int)timeout);
-#endif
-
-#ifdef HAVE_KQUEUE
- struct kevent events[MAX_EVENTS_PER_LOOP];
-
- const struct timespec ts = {
- .tv_sec = tv->tv_sec,
- .tv_nsec = tv->tv_usec * 1000,
- };
-
- int n = kevent(event_fd, NULL, 0, events, MAX_EVENTS_PER_LOOP, &ts);
-#endif
-
-#ifdef HAVE_SELECT
- int maxfds = 0;
-
- if(io_tree.tail) {
- io_t *last = io_tree.tail->data;
- maxfds = last->fd + 1;
- }
-
- int n = select(maxfds, &readable, &writable, NULL, tv);
-#endif
-
- if(n < 0) {
- if(sockwouldblock(sockerrno)) {
- continue;
- } else {
- return false;
- }
- }
-
- if(!n) {
- continue;
- }
-
- unsigned int curgen = io_tree.generation;
-
-
-#ifdef HAVE_EPOLL
-
- for(int i = 0; i < n; i++) {
- io_t *io = events[i].data.ptr;
-
- if(events[i].events & EPOLLOUT && io->flags & IO_WRITE) {
- io->cb(io->data, IO_WRITE);
- }
-
- if(curgen != io_tree.generation) {
- break;
- }
-
- if(events[i].events & EPOLLIN && io->flags & IO_READ) {
- io->cb(io->data, IO_READ);
- }
-
- if(curgen != io_tree.generation) {
- break;
- }
- }
-
-#endif
-
-#ifdef HAVE_KQUEUE
-
- for(int i = 0; i < n; i++) {
- const struct kevent *evt = &events[i];
- const io_t *io = evt->udata;
-
- if(evt->filter == EVFILT_WRITE) {
- io->cb(io->data, IO_WRITE);
- } else if(evt->filter == EVFILT_READ) {
- io->cb(io->data, IO_READ);
- } else {
- continue;
- }
-
- if(curgen != io_tree.generation) {
- break;
- }
- }
-
-#endif
-
-#ifdef HAVE_SELECT
-
- for splay_each(io_t, io, &io_tree) {
- if(FD_ISSET(io->fd, &writable)) {
- io->cb(io->data, IO_WRITE);
- } else if(FD_ISSET(io->fd, &readable)) {
- io->cb(io->data, IO_READ);
- } else {
- continue;
- }
-
- /*
- There are scenarios in which the callback will remove another io_t from the tree
- (e.g. closing a double connection). Since splay_each does not support that, we
- need to exit the loop if that happens. That's okay, since any remaining events will
- get picked up by the next select() call.
- */
- if(curgen != io_tree.generation) {
- break;
- }
- }
-
-#endif
- }
-
-#else
- assert(WSA_WAIT_EVENT_0 == 0);
-
- while(running) {
- struct timeval diff;
- struct timeval *tv = timeout_execute(&diff);
- DWORD timeout_ms = tv ? (DWORD)(tv->tv_sec * 1000 + tv->tv_usec / 1000 + 1) : WSA_INFINITE;
-
- if(!event_count) {
- Sleep(timeout_ms);
- continue;
- }
-
- /*
- For some reason, Microsoft decided to make the FD_WRITE event edge-triggered instead of level-triggered,
- which is the opposite of what select() does. In practice, that means that if a FD_WRITE event triggers,
- it will never trigger again until a send() returns EWOULDBLOCK. Since the semantics of this event loop
- is that write events are level-triggered (i.e. they continue firing until the socket is full), we need
- to emulate these semantics by making sure we fire each IO_WRITE that is still writeable.
-
- Note that technically FD_CLOSE has the same problem, but it's okay because user code does not rely on
- this event being fired again if ignored.
- */
- unsigned int curgen = io_tree.generation;
-
- for splay_each(io_t, io, &io_tree) {
- if(io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
- io->cb(io->data, IO_WRITE);
-
- if(curgen != io_tree.generation) {
- break;
- }
- }
- }
-
- if(event_count > WSA_MAXIMUM_WAIT_EVENTS) {
- WSASetLastError(WSA_INVALID_PARAMETER);
- return(false);
- }
-
- WSAEVENT events[WSA_MAXIMUM_WAIT_EVENTS];
- io_t *io_map[WSA_MAXIMUM_WAIT_EVENTS];
- DWORD event_index = 0;
-
- for splay_each(io_t, io, &io_tree) {
- events[event_index] = io->event;
- io_map[event_index] = io;
- event_index++;
- }
-
- /*
- * If the generation number changes due to event addition
- * or removal by a callback we restart the loop.
- */
- curgen = io_tree.generation;
-
- for(DWORD event_offset = 0; event_offset < event_count;) {
- DWORD result = WSAWaitForMultipleEvents(event_count - event_offset, &events[event_offset], FALSE, timeout_ms, FALSE);
-
- if(result == WSA_WAIT_TIMEOUT) {
- break;
- }
-
- if(result >= event_count - event_offset) {
- return false;
- }
-
- /* Look up io in the map by index. */
- event_index = result + event_offset;
- io_t *io = io_map[event_index];
-
- if(io->fd == -1) {
- io->cb(io->data, 0);
-
- if(curgen != io_tree.generation) {
- break;
- }
- } else {
- WSANETWORKEVENTS network_events;
-
- if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0) {
- return(false);
- }
-
- if(network_events.lNetworkEvents & READ_EVENTS) {
- io->cb(io->data, IO_READ);
-
- if(curgen != io_tree.generation) {
- break;
- }
- }
-
- /*
- The fd might be available for write too. However, if we already fired the read callback, that
- callback might have deleted the io (e.g. through terminate_connection()), so we can't fire the
- write callback here. Instead, we loop back and let the writable io loop above handle it.
- */
- }
-
- /* Continue checking the rest of the events. */
- event_offset = event_index + 1;
-
- /* Just poll the next time through. */
- timeout_ms = 0;
- }
- }
-
-#endif
-
- event_deinit();
- return true;
-}
-
-void event_exit(void) {
- running = false;
-}