2 Unix SMB/CIFS implementation.
4 main select loop and event handling - epoll implementation
6 Copyright (C) Andrew Tridgell 2003-2005
7 Copyright (C) Stefan Metzmacher 2005-2009
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "system/filesys.h"
29 #include "system/select.h"
31 #include "tevent_internal.h"
32 #include "tevent_util.h"
34 struct epoll_event_context {
35 /* a pointer back to the generic event_context */
36 struct tevent_context *ev;
38 /* when using epoll this is the handle from epoll_create */
43 bool panic_force_replay;
45 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
48 #ifdef TEST_PANIC_FALLBACK
50 static int epoll_create_panic_fallback(struct epoll_event_context *epoll_ev,
53 if (epoll_ev->panic_fallback == NULL) {
54 return epoll_create(size);
57 /* 50% of the time, fail... */
58 if ((random() % 2) == 0) {
63 return epoll_create(size);
66 static int epoll_ctl_panic_fallback(struct epoll_event_context *epoll_ev,
67 int epfd, int op, int fd,
68 struct epoll_event *event)
70 if (epoll_ev->panic_fallback == NULL) {
71 return epoll_ctl(epfd, op, fd, event);
74 /* 50% of the time, fail... */
75 if ((random() % 2) == 0) {
80 return epoll_ctl(epfd, op, fd, event);
83 static int epoll_wait_panic_fallback(struct epoll_event_context *epoll_ev,
85 struct epoll_event *events,
89 if (epoll_ev->panic_fallback == NULL) {
90 return epoll_wait(epfd, events, maxevents, timeout);
93 /* 50% of the time, fail... */
94 if ((random() % 2) == 0) {
99 return epoll_wait(epfd, events, maxevents, timeout);
102 #define epoll_create(_size) \
103 epoll_create_panic_fallback(epoll_ev, _size)
104 #define epoll_ctl(_epfd, _op, _fd, _event) \
105 epoll_ctl_panic_fallback(epoll_ev,_epfd, _op, _fd, _event)
106 #define epoll_wait(_epfd, _events, _maxevents, _timeout) \
107 epoll_wait_panic_fallback(epoll_ev, _epfd, _events, _maxevents, _timeout)
111 called to set the panic fallback function.
113 _PRIVATE_ bool tevent_epoll_set_panic_fallback(struct tevent_context *ev,
114 bool (*panic_fallback)(struct tevent_context *ev,
117 struct epoll_event_context *epoll_ev;
119 if (ev->additional_data == NULL) {
123 epoll_ev = talloc_get_type(ev->additional_data,
124 struct epoll_event_context);
125 if (epoll_ev == NULL) {
128 epoll_ev->panic_fallback = panic_fallback;
133 called when a epoll call fails
135 static void epoll_panic(struct epoll_event_context *epoll_ev,
136 const char *reason, bool replay)
138 struct tevent_context *ev = epoll_ev->ev;
139 bool (*panic_fallback)(struct tevent_context *ev, bool replay);
141 panic_fallback = epoll_ev->panic_fallback;
143 if (epoll_ev->panic_state != NULL) {
144 *epoll_ev->panic_state = true;
147 if (epoll_ev->panic_force_replay) {
151 TALLOC_FREE(ev->additional_data);
153 if (panic_fallback == NULL) {
154 tevent_debug(ev, TEVENT_DEBUG_FATAL,
155 "%s (%s) replay[%u] - calling abort()\n",
156 reason, strerror(errno), (unsigned)replay);
160 tevent_debug(ev, TEVENT_DEBUG_WARNING,
161 "%s (%s) replay[%u] - calling panic_fallback\n",
162 reason, strerror(errno), (unsigned)replay);
164 if (!panic_fallback(ev, replay)) {
165 /* Fallback failed. */
166 tevent_debug(ev, TEVENT_DEBUG_FATAL,
167 "%s (%s) replay[%u] - calling abort()\n",
168 reason, strerror(errno), (unsigned)replay);
174 map from TEVENT_FD_* to EPOLLIN/EPOLLOUT
176 static uint32_t epoll_map_flags(uint16_t flags)
179 if (flags & TEVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
180 if (flags & TEVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
187 static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
189 close(epoll_ev->epoll_fd);
190 epoll_ev->epoll_fd = -1;
197 static int epoll_init_ctx(struct epoll_event_context *epoll_ev)
199 epoll_ev->epoll_fd = epoll_create(64);
200 if (epoll_ev->epoll_fd == -1) {
201 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
202 "Failed to create epoll handle.\n");
206 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
207 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
208 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
211 epoll_ev->pid = getpid();
212 talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
217 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde);
220 reopen the epoll handle when our pid changes
221 see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
222 demonstration of why this is needed
224 static void epoll_check_reopen(struct epoll_event_context *epoll_ev)
226 struct tevent_fd *fde;
227 bool *caller_panic_state = epoll_ev->panic_state;
228 bool panic_triggered = false;
230 if (epoll_ev->pid == getpid()) {
234 close(epoll_ev->epoll_fd);
235 epoll_ev->epoll_fd = epoll_create(64);
236 if (epoll_ev->epoll_fd == -1) {
237 epoll_panic(epoll_ev, "epoll_create() failed", false);
241 if (!ev_set_close_on_exec(epoll_ev->epoll_fd)) {
242 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_WARNING,
243 "Failed to set close-on-exec, file descriptor may be leaked to children.\n");
246 epoll_ev->pid = getpid();
247 epoll_ev->panic_state = &panic_triggered;
248 for (fde=epoll_ev->ev->fd_events;fde;fde=fde->next) {
249 epoll_add_event(epoll_ev, fde);
250 if (panic_triggered) {
251 if (caller_panic_state != NULL) {
252 *caller_panic_state = true;
257 epoll_ev->panic_state = NULL;
260 #define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
261 #define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
262 #define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
265 add the epoll event to the given fd_event
267 static void epoll_add_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
269 struct epoll_event event;
271 if (epoll_ev->epoll_fd == -1) return;
273 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
275 /* if we don't want events yet, don't add an epoll_event */
276 if (fde->flags == 0) return;
279 event.events = epoll_map_flags(fde->flags);
280 event.data.ptr = fde;
281 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event) != 0) {
282 epoll_panic(epoll_ev, "EPOLL_CTL_ADD failed", false);
285 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
287 /* only if we want to read we want to tell the event handler about errors */
288 if (fde->flags & TEVENT_FD_READ) {
289 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
294 delete the epoll event for given fd_event
296 static void epoll_del_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
298 struct epoll_event event;
300 if (epoll_ev->epoll_fd == -1) return;
302 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
304 /* if there's no epoll_event, we don't need to delete it */
305 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
308 event.events = epoll_map_flags(fde->flags);
309 event.data.ptr = fde;
310 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event) != 0) {
311 tevent_debug(epoll_ev->ev, TEVENT_DEBUG_FATAL,
312 "epoll_del_event failed! probable early close bug (%s)\n",
315 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
319 change the epoll event to the given fd_event
321 static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
323 struct epoll_event event;
324 if (epoll_ev->epoll_fd == -1) return;
326 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
329 event.events = epoll_map_flags(fde->flags);
330 event.data.ptr = fde;
331 if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event) != 0) {
332 epoll_panic(epoll_ev, "EPOLL_CTL_MOD failed", false);
336 /* only if we want to read we want to tell the event handler about errors */
337 if (fde->flags & TEVENT_FD_READ) {
338 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
342 static void epoll_change_event(struct epoll_event_context *epoll_ev, struct tevent_fd *fde)
344 bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
345 bool want_read = (fde->flags & TEVENT_FD_READ);
346 bool want_write= (fde->flags & TEVENT_FD_WRITE);
348 if (epoll_ev->epoll_fd == -1) return;
350 fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
352 /* there's already an event */
353 if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
354 if (want_read || (want_write && !got_error)) {
355 epoll_mod_event(epoll_ev, fde);
359 * if we want to match the select behavior, we need to remove the epoll_event
360 * when the caller isn't interested in events.
362 * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
364 epoll_del_event(epoll_ev, fde);
368 /* there's no epoll_event attached to the fde */
369 if (want_read || (want_write && !got_error)) {
370 epoll_add_event(epoll_ev, fde);
376 event loop handling using epoll
378 static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
382 struct epoll_event events[MAXEVENTS];
386 if (epoll_ev->epoll_fd == -1) return -1;
389 /* it's better to trigger timed events a bit later than too early */
390 timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
393 if (epoll_ev->ev->signal_events &&
394 tevent_common_check_signal(epoll_ev->ev)) {
398 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_BEFORE_WAIT);
399 ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
401 tevent_trace_point_callback(epoll_ev->ev, TEVENT_TRACE_AFTER_WAIT);
403 if (ret == -1 && wait_errno == EINTR && epoll_ev->ev->signal_events) {
404 if (tevent_common_check_signal(epoll_ev->ev)) {
409 if (ret == -1 && wait_errno != EINTR) {
410 epoll_panic(epoll_ev, "epoll_wait() failed", true);
414 if (ret == 0 && tvalp) {
415 /* we don't care about a possible delay here */
416 tevent_common_loop_timer_delay(epoll_ev->ev);
420 for (i=0;i<ret;i++) {
421 struct tevent_fd *fde = talloc_get_type(events[i].data.ptr,
426 epoll_panic(epoll_ev, "epoll_wait() gave bad data", true);
429 if (events[i].events & (EPOLLHUP|EPOLLERR)) {
430 fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
432 * if we only wait for TEVENT_FD_WRITE, we should not tell the
433 * event handler about it, and remove the epoll_event,
434 * as we only report errors when waiting for read events,
435 * to match the select() behavior
437 if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
438 epoll_del_event(epoll_ev, fde);
441 flags |= TEVENT_FD_READ;
443 if (events[i].events & EPOLLIN) flags |= TEVENT_FD_READ;
444 if (events[i].events & EPOLLOUT) flags |= TEVENT_FD_WRITE;
446 fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
455 create a epoll_event_context structure.
457 static int epoll_event_context_init(struct tevent_context *ev)
460 struct epoll_event_context *epoll_ev;
463 * We might be called during tevent_re_initialise()
464 * which means we need to free our old additional_data.
466 TALLOC_FREE(ev->additional_data);
468 epoll_ev = talloc_zero(ev, struct epoll_event_context);
469 if (!epoll_ev) return -1;
471 epoll_ev->epoll_fd = -1;
473 ret = epoll_init_ctx(epoll_ev);
475 talloc_free(epoll_ev);
479 ev->additional_data = epoll_ev;
486 static int epoll_event_fd_destructor(struct tevent_fd *fde)
488 struct tevent_context *ev = fde->event_ctx;
489 struct epoll_event_context *epoll_ev = NULL;
490 bool panic_triggered = false;
493 return tevent_common_fd_destructor(fde);
496 epoll_ev = talloc_get_type_abort(ev->additional_data,
497 struct epoll_event_context);
500 * we must remove the event from the list
501 * otherwise a panic fallback handler may
502 * reuse invalid memory
504 DLIST_REMOVE(ev->fd_events, fde);
506 epoll_ev->panic_state = &panic_triggered;
507 epoll_check_reopen(epoll_ev);
508 if (panic_triggered) {
509 return tevent_common_fd_destructor(fde);
512 epoll_del_event(epoll_ev, fde);
513 if (panic_triggered) {
514 return tevent_common_fd_destructor(fde);
516 epoll_ev->panic_state = NULL;
518 return tevent_common_fd_destructor(fde);
523 return NULL on failure (memory allocation error)
525 static struct tevent_fd *epoll_event_add_fd(struct tevent_context *ev, TALLOC_CTX *mem_ctx,
526 int fd, uint16_t flags,
527 tevent_fd_handler_t handler,
529 const char *handler_name,
530 const char *location)
532 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
533 struct epoll_event_context);
534 struct tevent_fd *fde;
535 bool panic_triggered = false;
537 fde = tevent_common_add_fd(ev, mem_ctx, fd, flags,
538 handler, private_data,
539 handler_name, location);
540 if (!fde) return NULL;
542 talloc_set_destructor(fde, epoll_event_fd_destructor);
544 epoll_ev->panic_state = &panic_triggered;
545 epoll_check_reopen(epoll_ev);
546 if (panic_triggered) {
549 epoll_ev->panic_state = NULL;
551 epoll_add_event(epoll_ev, fde);
557 set the fd event flags
559 static void epoll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
561 struct tevent_context *ev;
562 struct epoll_event_context *epoll_ev;
563 bool panic_triggered = false;
565 if (fde->flags == flags) return;
568 epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
572 epoll_ev->panic_state = &panic_triggered;
573 epoll_check_reopen(epoll_ev);
574 if (panic_triggered) {
577 epoll_ev->panic_state = NULL;
579 epoll_change_event(epoll_ev, fde);
583 do a single event loop using the events defined in ev
585 static int epoll_event_loop_once(struct tevent_context *ev, const char *location)
587 struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
588 struct epoll_event_context);
590 bool panic_triggered = false;
592 if (ev->signal_events &&
593 tevent_common_check_signal(ev)) {
597 if (ev->immediate_events &&
598 tevent_common_loop_immediate(ev)) {
602 tval = tevent_common_loop_timer_delay(ev);
603 if (tevent_timeval_is_zero(&tval)) {
607 epoll_ev->panic_state = &panic_triggered;
608 epoll_ev->panic_force_replay = true;
609 epoll_check_reopen(epoll_ev);
610 if (panic_triggered) {
614 epoll_ev->panic_force_replay = false;
615 epoll_ev->panic_state = NULL;
617 return epoll_event_loop(epoll_ev, &tval);
620 static const struct tevent_ops epoll_event_ops = {
621 .context_init = epoll_event_context_init,
622 .add_fd = epoll_event_add_fd,
623 .set_fd_close_fn = tevent_common_fd_set_close_fn,
624 .get_fd_flags = tevent_common_fd_get_flags,
625 .set_fd_flags = epoll_event_set_fd_flags,
626 .add_timer = tevent_common_add_timer,
627 .schedule_immediate = tevent_common_schedule_immediate,
628 .add_signal = tevent_common_add_signal,
629 .loop_once = epoll_event_loop_once,
630 .loop_wait = tevent_common_loop_wait,
633 _PRIVATE_ bool tevent_epoll_init(void)
635 return tevent_register_backend("epoll", &epoll_event_ops);