2 event.c -- I/O, timeout and signal event handling
3 Copyright (C) 2012-2021 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 static fd_set readfds;
28 static fd_set writefds;
30 static const long READ_EVENTS = FD_READ | FD_ACCEPT | FD_CLOSE;
31 static const long WRITE_EVENTS = FD_WRITE | FD_CONNECT;
32 static DWORD event_count = 0;
36 static int io_compare(const io_t *a, const io_t *b) {
41 if(a->event < b->event) {
45 if(a->event > b->event) {
53 static int timeout_compare(const timeout_t *a, const timeout_t *b) {
55 timersub(&a->tv, &b->tv, &diff);
65 if(diff.tv_usec < 0) {
69 if(diff.tv_usec > 0) {
84 static splay_tree_t io_tree = {.compare = (splay_compare_t)io_compare};
85 static splay_tree_t timeout_tree = {.compare = (splay_compare_t)timeout_compare};
87 void io_add(io_t *io, io_cb_t cb, void *data, int fd, int flags) {
96 io->event = WSACreateEvent();
98 if(io->event == WSA_INVALID_EVENT) {
111 if(!splay_insert_node(&io_tree, &io->node)) {
117 void io_add_event(io_t *io, io_cb_t cb, void *data, WSAEVENT event) {
119 io_add(io, cb, data, -1, 0);
123 void io_set(io_t *io, int flags) {
124 if(flags == io->flags) {
136 if(flags & IO_READ) {
137 FD_SET(io->fd, &readfds);
139 FD_CLR(io->fd, &readfds);
142 if(flags & IO_WRITE) {
143 FD_SET(io->fd, &writefds);
145 FD_CLR(io->fd, &writefds);
151 if(flags & IO_WRITE) {
152 events |= WRITE_EVENTS;
155 if(flags & IO_READ) {
156 events |= READ_EVENTS;
159 if(WSAEventSelect(io->fd, io->event, events) != 0) {
166 void io_del(io_t *io) {
174 if(io->fd != -1 && WSACloseEvent(io->event) == FALSE) {
181 splay_unlink_node(&io_tree, &io->node);
185 void timeout_add(timeout_t *timeout, timeout_cb_t cb, void *data, struct timeval *tv) {
187 timeout->data = data;
188 timeout->node.data = timeout;
190 timeout_set(timeout, tv);
193 void timeout_set(timeout_t *timeout, struct timeval *tv) {
194 if(timerisset(&timeout->tv)) {
195 splay_unlink_node(&timeout_tree, &timeout->node);
199 gettimeofday(&now, NULL);
202 timeradd(&now, tv, &timeout->tv);
204 if(!splay_insert_node(&timeout_tree, &timeout->node)) {
209 void timeout_del(timeout_t *timeout) {
214 splay_unlink_node(&timeout_tree, &timeout->node);
216 timeout->tv = (struct timeval) {
222 static int signal_compare(const signal_t *a, const signal_t *b) {
223 return a->signum - b->signum;
226 static io_t signalio;
227 static int pipefd[2] = {-1, -1};
228 static splay_tree_t signal_tree = {.compare = (splay_compare_t)signal_compare};
230 static void signal_handler(int signum) {
231 unsigned char num = signum;
232 write(pipefd[1], &num, 1);
235 static void signalio_handler(void *data, int flags) {
238 unsigned char signum;
240 if(read(pipefd[0], &signum, 1) != 1) {
244 signal_t *sig = splay_search(&signal_tree, &((signal_t) {
253 static void pipe_init(void) {
255 io_add(&signalio, signalio_handler, NULL, pipefd[0], IO_READ);
259 void signal_add(signal_t *sig, signal_cb_t cb, void *data, int signum) {
266 sig->signum = signum;
267 sig->node.data = sig;
269 if(pipefd[0] == -1) {
273 signal(sig->signum, signal_handler);
275 if(!splay_insert_node(&signal_tree, &sig->node)) {
280 void signal_del(signal_t *sig) {
285 signal(sig->signum, SIG_DFL);
287 splay_unlink_node(&signal_tree, &sig->node);
292 static struct timeval *get_time_remaining(struct timeval *diff) {
293 gettimeofday(&now, NULL);
294 struct timeval *tv = NULL;
296 while(timeout_tree.head) {
297 timeout_t *timeout = timeout_tree.head->data;
298 timersub(&timeout->tv, &now, diff);
300 if(diff->tv_sec < 0) {
301 timeout->cb(timeout->data);
303 if(timercmp(&timeout->tv, &now, <)) {
304 timeout_del(timeout);
315 bool event_loop(void) {
324 struct timeval *tv = get_time_remaining(&diff);
325 memcpy(&readable, &readfds, sizeof(readable));
326 memcpy(&writable, &writefds, sizeof(writable));
331 io_t *last = io_tree.tail->data;
335 int n = select(fds, &readable, &writable, NULL, tv);
338 if(sockwouldblock(sockerrno)) {
349 unsigned int curgen = io_tree.generation;
351 for splay_each(io_t, io, &io_tree) {
352 if(FD_ISSET(io->fd, &writable)) {
353 io->cb(io->data, IO_WRITE);
354 } else if(FD_ISSET(io->fd, &readable)) {
355 io->cb(io->data, IO_READ);
361 There are scenarios in which the callback will remove another io_t from the tree
362 (e.g. closing a double connection). Since splay_each does not support that, we
363 need to exit the loop if that happens. That's okay, since any remaining events will
364 get picked up by the next select() call.
366 if(curgen != io_tree.generation) {
376 struct timeval *tv = get_time_remaining(&diff);
377 DWORD timeout_ms = tv ? (DWORD)(tv->tv_sec * 1000 + tv->tv_usec / 1000 + 1) : WSA_INFINITE;
385 For some reason, Microsoft decided to make the FD_WRITE event edge-triggered instead of level-triggered,
386 which is the opposite of what select() does. In practice, that means that if a FD_WRITE event triggers,
387 it will never trigger again until a send() returns EWOULDBLOCK. Since the semantics of this event loop
388 is that write events are level-triggered (i.e. they continue firing until the socket is full), we need
389 to emulate these semantics by making sure we fire each IO_WRITE that is still writeable.
391 Note that technically FD_CLOSE has the same problem, but it's okay because user code does not rely on
392 this event being fired again if ignored.
394 unsigned int curgen = io_tree.generation;
396 for splay_each(io_t, io, &io_tree) {
397 if(io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
398 io->cb(io->data, IO_WRITE);
400 if(curgen != io_tree.generation) {
406 if(event_count > WSA_MAXIMUM_WAIT_EVENTS) {
407 WSASetLastError(WSA_INVALID_PARAMETER);
411 WSAEVENT events[WSA_MAXIMUM_WAIT_EVENTS];
412 io_t *io_map[WSA_MAXIMUM_WAIT_EVENTS];
413 DWORD event_index = 0;
415 for splay_each(io_t, io, &io_tree) {
416 events[event_index] = io->event;
417 io_map[event_index] = io;
422 * If the generation number changes due to event addition
423 * or removal by a callback we restart the loop.
425 curgen = io_tree.generation;
427 for(DWORD event_offset = 0; event_offset < event_count;) {
428 DWORD result = WSAWaitForMultipleEvents(event_count - event_offset, &events[event_offset], FALSE, timeout_ms, FALSE);
430 if(result == WSA_WAIT_TIMEOUT) {
434 if(result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 + event_count - event_offset) {
438 /* Look up io in the map by index. */
439 event_index = result - WSA_WAIT_EVENT_0 + event_offset;
440 io_t *io = io_map[event_index];
445 if(curgen != io_tree.generation) {
449 WSANETWORKEVENTS network_events;
451 if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0) {
455 if(network_events.lNetworkEvents & READ_EVENTS) {
456 io->cb(io->data, IO_READ);
458 if(curgen != io_tree.generation) {
464 The fd might be available for write too. However, if we already fired the read callback, that
465 callback might have deleted the io (e.g. through terminate_connection()), so we can't fire the
466 write callback here. Instead, we loop back and let the writable io loop above handle it.
470 /* Continue checking the rest of the events. */
471 event_offset = event_index + 1;
473 /* Just poll the next time through. */
483 void event_exit(void) {