2 event.c -- I/O, timeout and signal event handling
3 Copyright (C) 2012-2013 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 static fd_set readfds;
32 static fd_set writefds;
34 static const long READ_EVENTS = FD_READ | FD_ACCEPT | FD_CLOSE;
35 static const long WRITE_EVENTS = FD_WRITE | FD_CONNECT;
36 static DWORD event_count = 0;
40 static int io_compare(const io_t *a, const io_t *b) {
45 if(a->event < b->event) {
49 if(a->event > b->event) {
57 static int timeout_compare(const timeout_t *a, const timeout_t *b) {
59 timersub(&a->tv, &b->tv, &diff);
69 if(diff.tv_usec < 0) {
73 if(diff.tv_usec > 0) {
88 static splay_tree_t io_tree = {.compare = (splay_compare_t)io_compare};
89 static splay_tree_t timeout_tree = {.compare = (splay_compare_t)timeout_compare};
91 void io_add(io_t *io, io_cb_t cb, void *data, int fd, int flags) {
100 io->event = WSACreateEvent();
102 if(io->event == WSA_INVALID_EVENT) {
115 if(!splay_insert_node(&io_tree, &io->node)) {
121 void io_add_event(io_t *io, io_cb_t cb, void *data, WSAEVENT event) {
123 io_add(io, cb, data, -1, 0);
127 void io_set(io_t *io, int flags) {
128 if(flags == io->flags) {
140 if(flags & IO_READ) {
141 FD_SET(io->fd, &readfds);
143 FD_CLR(io->fd, &readfds);
146 if(flags & IO_WRITE) {
147 FD_SET(io->fd, &writefds);
149 FD_CLR(io->fd, &writefds);
155 if(flags & IO_WRITE) {
156 events |= WRITE_EVENTS;
159 if(flags & IO_READ) {
160 events |= READ_EVENTS;
163 if(WSAEventSelect(io->fd, io->event, events) != 0) {
170 void io_del(io_t *io) {
178 if(io->fd != -1 && WSACloseEvent(io->event) == FALSE) {
185 splay_unlink_node(&io_tree, &io->node);
189 void timeout_add(timeout_t *timeout, timeout_cb_t cb, void *data, struct timeval *tv) {
191 timeout->data = data;
192 timeout->node.data = timeout;
194 timeout_set(timeout, tv);
197 void timeout_set(timeout_t *timeout, struct timeval *tv) {
198 if(timerisset(&timeout->tv)) {
199 splay_unlink_node(&timeout_tree, &timeout->node);
203 gettimeofday(&now, NULL);
206 timeradd(&now, tv, &timeout->tv);
208 if(!splay_insert_node(&timeout_tree, &timeout->node)) {
213 void timeout_del(timeout_t *timeout) {
218 splay_unlink_node(&timeout_tree, &timeout->node);
220 timeout->tv = (struct timeval) {
226 static int signal_compare(const signal_t *a, const signal_t *b) {
227 return a->signum - b->signum;
230 static io_t signalio;
231 static int pipefd[2] = {-1, -1};
232 static splay_tree_t signal_tree = {.compare = (splay_compare_t)signal_compare};
234 static void signal_handler(int signum) {
235 unsigned char num = signum;
236 write(pipefd[1], &num, 1);
239 static void signalio_handler(void *data, int flags) {
240 unsigned char signum;
242 if(read(pipefd[0], &signum, 1) != 1) {
246 signal_t *sig = splay_search(&signal_tree, &((signal_t) {
255 static void pipe_init(void) {
257 io_add(&signalio, signalio_handler, NULL, pipefd[0], IO_READ);
261 void signal_add(signal_t *sig, signal_cb_t cb, void *data, int signum) {
268 sig->signum = signum;
269 sig->node.data = sig;
271 if(pipefd[0] == -1) {
275 signal(sig->signum, signal_handler);
277 if(!splay_insert_node(&signal_tree, &sig->node)) {
282 void signal_del(signal_t *sig) {
287 signal(sig->signum, SIG_DFL);
289 splay_unlink_node(&signal_tree, &sig->node);
294 static struct timeval *get_time_remaining(struct timeval *diff) {
295 gettimeofday(&now, NULL);
296 struct timeval *tv = NULL;
298 while(timeout_tree.head) {
299 timeout_t *timeout = timeout_tree.head->data;
300 timersub(&timeout->tv, &now, diff);
302 if(diff->tv_sec < 0) {
303 timeout->cb(timeout->data);
305 if(timercmp(&timeout->tv, &now, <)) {
306 timeout_del(timeout);
317 bool event_loop(void) {
326 struct timeval *tv = get_time_remaining(&diff);
327 memcpy(&readable, &readfds, sizeof(readable));
328 memcpy(&writable, &writefds, sizeof(writable));
333 io_t *last = io_tree.tail->data;
337 int n = select(fds, &readable, &writable, NULL, tv);
340 if(sockwouldblock(sockerrno)) {
351 unsigned int curgen = io_tree.generation;
353 for splay_each(io_t, io, &io_tree) {
354 if(FD_ISSET(io->fd, &writable)) {
355 io->cb(io->data, IO_WRITE);
356 } else if(FD_ISSET(io->fd, &readable)) {
357 io->cb(io->data, IO_READ);
363 There are scenarios in which the callback will remove another io_t from the tree
364 (e.g. closing a double connection). Since splay_each does not support that, we
365 need to exit the loop if that happens. That's okay, since any remaining events will
366 get picked up by the next select() call.
368 if(curgen != io_tree.generation) {
378 struct timeval *tv = get_time_remaining(&diff);
379 DWORD timeout_ms = tv ? (tv->tv_sec * 1000 + tv->tv_usec / 1000 + 1) : WSA_INFINITE;
387 For some reason, Microsoft decided to make the FD_WRITE event edge-triggered instead of level-triggered,
388 which is the opposite of what select() does. In practice, that means that if a FD_WRITE event triggers,
389 it will never trigger again until a send() returns EWOULDBLOCK. Since the semantics of this event loop
390 is that write events are level-triggered (i.e. they continue firing until the socket is full), we need
391 to emulate these semantics by making sure we fire each IO_WRITE that is still writeable.
393 Note that technically FD_CLOSE has the same problem, but it's okay because user code does not rely on
394 this event being fired again if ignored.
396 unsigned int curgen = io_tree.generation;
398 for splay_each(io_t, io, &io_tree) {
399 if(io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
400 io->cb(io->data, IO_WRITE);
402 if(curgen != io_tree.generation) {
408 if(event_count > WSA_MAXIMUM_WAIT_EVENTS) {
409 WSASetLastError(WSA_INVALID_PARAMETER);
413 WSAEVENT events[WSA_MAXIMUM_WAIT_EVENTS];
414 io_t *io_map[WSA_MAXIMUM_WAIT_EVENTS];
415 DWORD event_index = 0;
417 for splay_each(io_t, io, &io_tree) {
418 events[event_index] = io->event;
419 io_map[event_index] = io;
424 * If the generation number changes due to event addition
425 * or removal by a callback we restart the loop.
427 curgen = io_tree.generation;
429 for(DWORD event_offset = 0; event_offset < event_count;) {
430 DWORD result = WSAWaitForMultipleEvents(event_count - event_offset, &events[event_offset], FALSE, timeout_ms, FALSE);
432 if(result == WSA_WAIT_TIMEOUT) {
436 if(result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 + event_count - event_offset) {
440 /* Look up io in the map by index. */
441 event_index = result - WSA_WAIT_EVENT_0 + event_offset;
442 io_t *io = io_map[event_index];
447 if(curgen != io_tree.generation) {
451 WSANETWORKEVENTS network_events;
453 if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0) {
457 if(network_events.lNetworkEvents & READ_EVENTS) {
458 io->cb(io->data, IO_READ);
460 if(curgen != io_tree.generation) {
466 The fd might be available for write too. However, if we already fired the read callback, that
467 callback might have deleted the io (e.g. through terminate_connection()), so we can't fire the
468 write callback here. Instead, we loop back and let the writable io loop above handle it.
472 /* Continue checking the rest of the events. */
473 event_offset = event_index + 1;
475 /* Just poll the next time through. */
485 void event_exit(void) {