2 event.c -- I/O, timeout and signal event handling
3 Copyright (C) 2012-2021 Guus Sliepen <guus@tinc-vpn.org>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
31 static fd_set readfds;
32 static fd_set writefds;
34 static const long READ_EVENTS = FD_READ | FD_ACCEPT | FD_CLOSE;
35 static const long WRITE_EVENTS = FD_WRITE | FD_CONNECT;
36 static DWORD event_count = 0;
40 static int io_compare(const io_t *a, const io_t *b) {
45 if(a->event < b->event) {
49 if(a->event > b->event) {
57 static int timeout_compare(const timeout_t *a, const timeout_t *b) {
59 timersub(&a->tv, &b->tv, &diff);
69 if(diff.tv_usec < 0) {
73 if(diff.tv_usec > 0) {
88 static splay_tree_t io_tree = {.compare = (splay_compare_t)io_compare};
89 static splay_tree_t timeout_tree = {.compare = (splay_compare_t)timeout_compare};
91 void io_add(io_t *io, io_cb_t cb, void *data, int fd, int flags) {
100 io->event = WSACreateEvent();
102 if(io->event == WSA_INVALID_EVENT) {
115 if(!splay_insert_node(&io_tree, &io->node)) {
121 void io_add_event(io_t *io, io_cb_t cb, void *data, WSAEVENT event) {
123 io_add(io, cb, data, -1, 0);
127 void io_set(io_t *io, int flags) {
128 if(flags == io->flags) {
140 if(flags & IO_READ) {
141 FD_SET(io->fd, &readfds);
143 FD_CLR(io->fd, &readfds);
146 if(flags & IO_WRITE) {
147 FD_SET(io->fd, &writefds);
149 FD_CLR(io->fd, &writefds);
155 if(flags & IO_WRITE) {
156 events |= WRITE_EVENTS;
159 if(flags & IO_READ) {
160 events |= READ_EVENTS;
163 if(WSAEventSelect(io->fd, io->event, events) != 0) {
170 void io_del(io_t *io) {
178 if(io->fd != -1 && WSACloseEvent(io->event) == FALSE) {
185 splay_unlink_node(&io_tree, &io->node);
189 void timeout_add(timeout_t *timeout, timeout_cb_t cb, void *data, struct timeval *tv) {
191 timeout->data = data;
192 timeout->node.data = timeout;
194 timeout_set(timeout, tv);
197 void timeout_set(timeout_t *timeout, struct timeval *tv) {
198 if(timerisset(&timeout->tv)) {
199 splay_unlink_node(&timeout_tree, &timeout->node);
203 gettimeofday(&now, NULL);
206 timeradd(&now, tv, &timeout->tv);
208 if(!splay_insert_node(&timeout_tree, &timeout->node)) {
213 void timeout_del(timeout_t *timeout) {
218 splay_unlink_node(&timeout_tree, &timeout->node);
220 timeout->tv = (struct timeval) {
226 static int signal_compare(const signal_t *a, const signal_t *b) {
227 return a->signum - b->signum;
230 static io_t signalio;
231 static int pipefd[2] = {-1, -1};
232 static splay_tree_t signal_tree = {.compare = (splay_compare_t)signal_compare};
234 static void signal_handler(int signum) {
235 unsigned char num = signum;
236 write(pipefd[1], &num, 1);
239 static void signalio_handler(void *data, int flags) {
242 unsigned char signum;
244 if(read(pipefd[0], &signum, 1) != 1) {
248 signal_t *sig = splay_search(&signal_tree, &((signal_t) {
257 static void pipe_init(void) {
259 io_add(&signalio, signalio_handler, NULL, pipefd[0], IO_READ);
263 void signal_add(signal_t *sig, signal_cb_t cb, void *data, int signum) {
270 sig->signum = signum;
271 sig->node.data = sig;
273 if(pipefd[0] == -1) {
277 signal(sig->signum, signal_handler);
279 if(!splay_insert_node(&signal_tree, &sig->node)) {
284 void signal_del(signal_t *sig) {
289 signal(sig->signum, SIG_DFL);
291 splay_unlink_node(&signal_tree, &sig->node);
296 static struct timeval *get_time_remaining(struct timeval *diff) {
297 gettimeofday(&now, NULL);
298 struct timeval *tv = NULL;
300 while(timeout_tree.head) {
301 timeout_t *timeout = timeout_tree.head->data;
302 timersub(&timeout->tv, &now, diff);
304 if(diff->tv_sec < 0) {
305 timeout->cb(timeout->data);
307 if(timercmp(&timeout->tv, &now, <)) {
308 timeout_del(timeout);
319 bool event_loop(void) {
328 struct timeval *tv = get_time_remaining(&diff);
329 memcpy(&readable, &readfds, sizeof(readable));
330 memcpy(&writable, &writefds, sizeof(writable));
335 io_t *last = io_tree.tail->data;
339 int n = select(fds, &readable, &writable, NULL, tv);
342 if(sockwouldblock(sockerrno)) {
353 unsigned int curgen = io_tree.generation;
355 for splay_each(io_t, io, &io_tree) {
356 if(FD_ISSET(io->fd, &writable)) {
357 io->cb(io->data, IO_WRITE);
358 } else if(FD_ISSET(io->fd, &readable)) {
359 io->cb(io->data, IO_READ);
365 There are scenarios in which the callback will remove another io_t from the tree
366 (e.g. closing a double connection). Since splay_each does not support that, we
367 need to exit the loop if that happens. That's okay, since any remaining events will
368 get picked up by the next select() call.
370 if(curgen != io_tree.generation) {
380 struct timeval *tv = get_time_remaining(&diff);
381 DWORD timeout_ms = tv ? (DWORD)(tv->tv_sec * 1000 + tv->tv_usec / 1000 + 1) : WSA_INFINITE;
389 For some reason, Microsoft decided to make the FD_WRITE event edge-triggered instead of level-triggered,
390 which is the opposite of what select() does. In practice, that means that if a FD_WRITE event triggers,
391 it will never trigger again until a send() returns EWOULDBLOCK. Since the semantics of this event loop
392 is that write events are level-triggered (i.e. they continue firing until the socket is full), we need
393 to emulate these semantics by making sure we fire each IO_WRITE that is still writeable.
395 Note that technically FD_CLOSE has the same problem, but it's okay because user code does not rely on
396 this event being fired again if ignored.
398 unsigned int curgen = io_tree.generation;
400 for splay_each(io_t, io, &io_tree) {
401 if(io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
402 io->cb(io->data, IO_WRITE);
404 if(curgen != io_tree.generation) {
410 if(event_count > WSA_MAXIMUM_WAIT_EVENTS) {
411 WSASetLastError(WSA_INVALID_PARAMETER);
415 WSAEVENT events[WSA_MAXIMUM_WAIT_EVENTS];
416 io_t *io_map[WSA_MAXIMUM_WAIT_EVENTS];
417 DWORD event_index = 0;
419 for splay_each(io_t, io, &io_tree) {
420 events[event_index] = io->event;
421 io_map[event_index] = io;
426 * If the generation number changes due to event addition
427 * or removal by a callback we restart the loop.
429 curgen = io_tree.generation;
431 for(DWORD event_offset = 0; event_offset < event_count;) {
432 DWORD result = WSAWaitForMultipleEvents(event_count - event_offset, &events[event_offset], FALSE, timeout_ms, FALSE);
434 if(result == WSA_WAIT_TIMEOUT) {
438 if(result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 + event_count - event_offset) {
442 /* Look up io in the map by index. */
443 event_index = result - WSA_WAIT_EVENT_0 + event_offset;
444 io_t *io = io_map[event_index];
449 if(curgen != io_tree.generation) {
453 WSANETWORKEVENTS network_events;
455 if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0) {
459 if(network_events.lNetworkEvents & READ_EVENTS) {
460 io->cb(io->data, IO_READ);
462 if(curgen != io_tree.generation) {
468 The fd might be available for write too. However, if we already fired the read callback, that
469 callback might have deleted the io (e.g. through terminate_connection()), so we can't fire the
470 write callback here. Instead, we loop back and let the writable io loop above handle it.
474 /* Continue checking the rest of the events. */
475 event_offset = event_index + 1;
477 /* Just poll the next time through. */
487 void event_exit(void) {