--- /dev/null
+# check for EPOLL and native Linux AIO interface
+SMB_ENABLE(EVENTS_EPOLL, NO)
+SMB_ENABLE(EVENTS_AIO, NO)
+AC_CHECK_HEADERS(sys/epoll.h)
+AC_CHECK_FUNCS(epoll_create)
+if test x"$ac_cv_header_sys_epoll_h" = x"yes" -a x"$ac_cv_func_epoll_create" = x"yes";then
+ SMB_ENABLE(EVENTS_EPOLL,YES)
+ AC_DEFINE(HAVE_EVENTS_EPOLL, 1, [Whether epoll is available])
+
+ # check for native Linux AIO interface
+ AC_CHECK_HEADERS(libaio.h)
+ AC_CHECK_LIB_EXT(aio, AIO_LIBS, io_getevents)
+ if test x"$ac_cv_header_libaio_h" = x"yes" -a x"$ac_cv_lib_ext_aio_io_getevents" = x"yes";then
+ SMB_ENABLE(EVENTS_AIO,YES)
+ AC_DEFINE(HAVE_LINUX_AIO, 1, [Whether Linux AIO is available])
+ fi
+fi
+SMB_EXT_LIB(LIBAIO_LINUX, $AIO_LIBS)
--- /dev/null
+##############################
+[MODULE::EVENTS_AIO]
+OBJ_FILES = events_aio.o
+PRIVATE_DEPENDENCIES = LIBAIO_LINUX
+SUBSYSTEM = LIBEVENTS
+INIT_FUNCTION = s4_events_aio_init
+##############################
+
+##############################
+[MODULE::EVENTS_EPOLL]
+OBJ_FILES = events_epoll.o
+SUBSYSTEM = LIBEVENTS
+INIT_FUNCTION = s4_events_epoll_init
+##############################
+
+##############################
+[MODULE::EVENTS_SELECT]
+OBJ_FILES = events_select.o
+SUBSYSTEM = LIBEVENTS
+INIT_FUNCTION = s4_events_select_init
+##############################
+
+##############################
+[MODULE::EVENTS_STANDARD]
+OBJ_FILES = events_standard.o
+SUBSYSTEM = LIBEVENTS
+INIT_FUNCTION = s4_events_standard_init
+##############################
+
+
+##############################
+# Start SUBSYSTEM LIBEVENTS
+[SUBSYSTEM::LIBEVENTS]
+OBJ_FILES = events.o events_timed.o events_signal.o
+PUBLIC_HEADERS = events.h events_internal.h
+PUBLIC_DEPENDENCIES = LIBTALLOC
+# End SUBSYSTEM LIBEVENTS
+##############################
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+ main select loop and event handling
+ Copyright (C) Andrew Tridgell 2003
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ PLEASE READ THIS BEFORE MODIFYING!
+
+ This module is a general abstraction for the main select loop and
+ event handling. Do not ever put any localised hacks in here, instead
+ register one of the possible event types and implement that event
+ somewhere else.
+
+ There are 2 types of event handling that are handled in this module:
+
+ 1) a file descriptor becoming readable or writeable. This is mostly
+ used for network sockets, but can be used for any type of file
+ descriptor. You may only register one handler for each file
+ descriptor/io combination or you will get unpredictable results
+ (this means that you can have a handler for read events, and a
+ separate handler for write events, but not two handlers that are
+ both handling read events)
+
+ 2) a timed event. You can register an event that happens at a
+ specific time. You can register as many of these as you
+ like. They are single shot - add a new timed event in the event
+ handler to get another event.
+
+ To setup a set of events you first need to create a event_context
+ structure using the function event_context_init(); This returns a
+ 'struct event_context' that you use in all subsequent calls.
+
+ After that you can add/remove events that you are interested in
+ using event_add_*() and talloc_free()
+
+ Finally, you call event_loop_wait_once() to block waiting for one of the
+ events to occor or event_loop_wait() which will loop
+ forever.
+
+*/
+
+#include "includes.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+#include "lib/util/dlinklist.h"
+#if _SAMBA_BUILD_
+#include "build.h"
+#endif
+
+struct event_ops_list {
+ struct event_ops_list *next, *prev;
+ const char *name;
+ const struct event_ops *ops;
+};
+
+/* list of registered event backends */
+static struct event_ops_list *event_backends;
+
+static char *event_default_backend = NULL;
+
+/*
+ register an events backend
+*/
+bool event_register_backend(const char *name, const struct event_ops *ops)
+{
+ struct event_ops_list *e;
+ e = talloc(talloc_autofree_context(), struct event_ops_list);
+ if (e == NULL) return False;
+ e->name = name;
+ e->ops = ops;
+ DLIST_ADD(event_backends, e);
+ return True;
+}
+
+/*
+ set the default event backend
+ */
+void event_set_default_backend(const char *backend)
+{
+ if (event_default_backend) free(event_default_backend);
+ event_default_backend = strdup(backend);
+}
+
+/*
+ initialise backends if not already done
+*/
+static void event_backend_init(void)
+{
+#if _SAMBA_BUILD_
+ init_module_fn static_init[] = STATIC_LIBEVENTS_MODULES;
+ init_module_fn *shared_init;
+ if (event_backends) return;
+ shared_init = load_samba_modules(NULL, "LIBEVENTS");
+ run_init_functions(static_init);
+ run_init_functions(shared_init);
+#else
+ bool events_standard_init(void);
+ bool events_select_init(void);
+ events_select_init();
+ events_standard_init();
+#if HAVE_EVENTS_EPOLL
+ {
+ bool events_epoll_init(void);
+ events_epoll_init();
+ }
+#endif
+#endif
+}
+
+/*
+ list available backends
+*/
+const char **event_backend_list(TALLOC_CTX *mem_ctx)
+{
+ const char **list = NULL;
+ struct event_ops_list *e;
+
+ event_backend_init();
+
+ for (e=event_backends;e;e=e->next) {
+ list = str_list_add(list, e->name);
+ }
+
+ talloc_steal(mem_ctx, list);
+
+ return list;
+}
+
+/*
+ create a event_context structure for a specific implemementation.
+ This must be the first events call, and all subsequent calls pass
+ this event_context as the first element. Event handlers also
+ receive this as their first argument.
+
+ This function is for allowing third-party-applications to hook in gluecode
+ to their own event loop code, so that they can make async usage of our client libs
+
+ NOTE: use event_context_init() inside of samba!
+*/
+static struct event_context *event_context_init_ops(TALLOC_CTX *mem_ctx,
+ const struct event_ops *ops)
+{
+ struct event_context *ev;
+ int ret;
+
+ ev = talloc_zero(mem_ctx, struct event_context);
+ if (!ev) return NULL;
+
+ ev->ops = ops;
+
+ ret = ev->ops->context_init(ev);
+ if (ret != 0) {
+ talloc_free(ev);
+ return NULL;
+ }
+
+ return ev;
+}
+
+/*
+ create a event_context structure. This must be the first events
+ call, and all subsequent calls pass this event_context as the first
+ element. Event handlers also receive this as their first argument.
+*/
+struct event_context *event_context_init_byname(TALLOC_CTX *mem_ctx, const char *name)
+{
+ struct event_ops_list *e;
+
+ event_backend_init();
+
+#if _SAMBA_BUILD_
+ if (name == NULL) {
+ name = lp_parm_string(-1, "event", "backend");
+ }
+#endif
+ if (name == NULL) {
+ name = event_default_backend;
+ }
+ if (name == NULL) {
+ name = "standard";
+ }
+
+ for (e=event_backends;e;e=e->next) {
+ if (strcmp(name, e->name) == 0) {
+ return event_context_init_ops(mem_ctx, e->ops);
+ }
+ }
+ return NULL;
+}
+
+
+/*
+ create a event_context structure. This must be the first events
+ call, and all subsequent calls pass this event_context as the first
+ element. Event handlers also receive this as their first argument.
+*/
+struct event_context *event_context_init(TALLOC_CTX *mem_ctx)
+{
+ return event_context_init_byname(mem_ctx, NULL);
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+
+ if flags contains EVENT_FD_AUTOCLOSE then the fd will be closed when
+ the returned fd_event context is freed
+*/
+struct fd_event *event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags, event_fd_handler_t handler,
+ void *private_data)
+{
+ return ev->ops->add_fd(ev, mem_ctx, fd, flags, handler, private_data);
+}
+
+/*
+ add a disk aio event
+*/
+struct aio_event *event_add_aio(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private_data)
+{
+ if (ev->ops->add_aio == NULL) return NULL;
+ return ev->ops->add_aio(ev, mem_ctx, iocb, handler, private_data);
+}
+
+/*
+ return the fd event flags
+*/
+uint16_t event_get_fd_flags(struct fd_event *fde)
+{
+ if (!fde) return 0;
+ return fde->event_ctx->ops->get_fd_flags(fde);
+}
+
+/*
+ set the fd event flags
+*/
+void event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ if (!fde) return;
+ fde->event_ctx->ops->set_fd_flags(fde, flags);
+}
+
+/*
+ add a timed event
+ return NULL on failure
+*/
+struct timed_event *event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ event_timed_handler_t handler,
+ void *private_data)
+{
+ return ev->ops->add_timed(ev, mem_ctx, next_event, handler, private_data);
+}
+
+/*
+ add a signal event
+
+ sa_flags are flags to sigaction(2)
+
+ return NULL on failure
+*/
+struct signal_event *event_add_signal(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int signum,
+ int sa_flags,
+ event_signal_handler_t handler,
+ void *private_data)
+{
+ return ev->ops->add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data);
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+_PUBLIC_ int event_loop_once(struct event_context *ev)
+{
+ return ev->ops->loop_once(ev);
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+int event_loop_wait(struct event_context *ev)
+{
+ return ev->ops->loop_wait(ev);
+}
+
+/*
+ find an event context that is a parent of the given memory context,
+ or create a new event context as a child of the given context if
+ none is found
+
+ This should be used in preference to event_context_init() in places
+ where you would prefer to use the existing event context if possible
+ (which is most situations)
+*/
+struct event_context *event_context_find(TALLOC_CTX *mem_ctx)
+{
+ struct event_context *ev = talloc_find_parent_bytype(mem_ctx, struct event_context);
+ if (ev == NULL) {
+ ev = event_context_init(mem_ctx);
+ }
+ return ev;
+}
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+
+ generalised event loop handling
+
+ Copyright (C) Andrew Tridgell 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __EVENTS_H__
+#define __EVENTS_H__
+
+#include "talloc/talloc.h"
+#include <stdlib.h>
+
+struct event_context;
+struct event_ops;
+struct fd_event;
+struct timed_event;
+struct aio_event;
+struct signal_event;
+
+/* event handler types */
+typedef void (*event_fd_handler_t)(struct event_context *, struct fd_event *,
+ uint16_t , void *);
+typedef void (*event_timed_handler_t)(struct event_context *, struct timed_event *,
+ struct timeval , void *);
+typedef void (*event_signal_handler_t)(struct event_context *, struct signal_event *,
+ int , int, void *, void *);
+typedef void (*event_aio_handler_t)(struct event_context *, struct aio_event *,
+ int, void *);
+
+struct event_context *event_context_init(TALLOC_CTX *mem_ctx);
+struct event_context *event_context_init_byname(TALLOC_CTX *mem_ctx, const char *name);
+const char **event_backend_list(TALLOC_CTX *mem_ctx);
+void event_set_default_backend(const char *backend);
+
+struct fd_event *event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags, event_fd_handler_t handler,
+ void *private);
+
+struct timed_event *event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ event_timed_handler_t handler,
+ void *private);
+
+struct signal_event *event_add_signal(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int signum, int sa_flags,
+ event_signal_handler_t handler,
+ void *private);
+
+struct iocb;
+struct aio_event *event_add_aio(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private);
+
+int event_loop_once(struct event_context *ev);
+int event_loop_wait(struct event_context *ev);
+
+uint16_t event_get_fd_flags(struct fd_event *fde);
+void event_set_fd_flags(struct fd_event *fde, uint16_t flags);
+
+struct event_context *event_context_find(TALLOC_CTX *mem_ctx);
+
+/* bits for file descriptor event flags */
+#define EVENT_FD_READ 1
+#define EVENT_FD_WRITE 2
+#define EVENT_FD_AUTOCLOSE 4
+
+#define EVENT_FD_WRITEABLE(fde) \
+ event_set_fd_flags(fde, event_get_fd_flags(fde) | EVENT_FD_WRITE)
+#define EVENT_FD_READABLE(fde) \
+ event_set_fd_flags(fde, event_get_fd_flags(fde) | EVENT_FD_READ)
+
+#define EVENT_FD_NOT_WRITEABLE(fde) \
+ event_set_fd_flags(fde, event_get_fd_flags(fde) & ~EVENT_FD_WRITE)
+#define EVENT_FD_NOT_READABLE(fde) \
+ event_set_fd_flags(fde, event_get_fd_flags(fde) & ~EVENT_FD_READ)
+
+#endif /* __EVENTS_H__ */
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+
+ main select loop and event handling - aio/epoll hybrid implementation
+
+ Copyright (C) Andrew Tridgell 2006
+
+ based on events_standard.c
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+/*
+ this is a very strange beast. The Linux AIO implementation doesn't
+ yet integrate properly with epoll, but there is a kernel patch that
+ allows the aio wait primitives to be used to wait for epoll events,
+ and this can be used to give us a unified event system incorporating
+ both aio events and epoll events
+
+ this is _very_ experimental code
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/network.h"
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+#include <sys/epoll.h>
+#include <libaio.h>
+
+#define MAX_AIO_QUEUE_DEPTH 100
+#ifndef IOCB_CMD_EPOLL_WAIT
+#define IOCB_CMD_EPOLL_WAIT 9
+#endif
+
+struct aio_event_context {
+ /* a pointer back to the generic event_context */
+ struct event_context *ev;
+
+ /* list of filedescriptor events */
+ struct fd_event *fd_events;
+
+ /* number of registered fd event handlers */
+ int num_fd_events;
+
+ uint32_t destruction_count;
+
+ io_context_t ioctx;
+
+ struct epoll_event epevent[MAX_AIO_QUEUE_DEPTH];
+
+ struct iocb *epoll_iocb;
+
+ int epoll_fd;
+ int is_epoll_set;
+ pid_t pid;
+};
+
+struct aio_event {
+ struct event_context *event_ctx;
+ struct iocb iocb;
+ void *private_data;
+ event_aio_handler_t handler;
+};
+
+/*
+ map from EVENT_FD_* to EPOLLIN/EPOLLOUT
+*/
+static uint32_t epoll_map_flags(uint16_t flags)
+{
+ uint32_t ret = 0;
+ if (flags & EVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
+ if (flags & EVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
+ return ret;
+}
+
+/*
+ free the epoll fd
+*/
+static int aio_ctx_destructor(struct aio_event_context *aio_ev)
+{
+ io_queue_release(aio_ev->ioctx);
+ close(aio_ev->epoll_fd);
+ aio_ev->epoll_fd = -1;
+ return 0;
+}
+
+static void epoll_add_event(struct aio_event_context *aio_ev, struct fd_event *fde);
+
+/*
+ reopen the epoll handle when our pid changes
+ see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
+ demonstration of why this is needed
+ */
+static void epoll_check_reopen(struct aio_event_context *aio_ev)
+{
+ struct fd_event *fde;
+
+ if (aio_ev->pid == getpid()) {
+ return;
+ }
+
+ close(aio_ev->epoll_fd);
+ aio_ev->epoll_fd = epoll_create(MAX_AIO_QUEUE_DEPTH);
+ if (aio_ev->epoll_fd == -1) {
+ DEBUG(0,("Failed to recreate epoll handle after fork\n"));
+ return;
+ }
+ aio_ev->pid = getpid();
+ for (fde=aio_ev->fd_events;fde;fde=fde->next) {
+ epoll_add_event(aio_ev, fde);
+ }
+}
+
+#define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
+#define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
+#define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
+
+/*
+ add the epoll event to the given fd_event
+*/
+static void epoll_add_event(struct aio_event_context *aio_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (aio_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if we don't want events yet, don't add an aio_event */
+ if (fde->flags == 0) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ epoll_ctl(aio_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event);
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+/*
+ delete the epoll event for given fd_event
+*/
+static void epoll_del_event(struct aio_event_context *aio_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+
+ DLIST_REMOVE(aio_ev->fd_events, fde);
+
+ if (aio_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if there's no aio_event, we don't need to delete it */
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ epoll_ctl(aio_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event);
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+}
+
+/*
+ change the epoll event to the given fd_event
+*/
+static void epoll_mod_event(struct aio_event_context *aio_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (aio_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ epoll_ctl(aio_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event);
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+static void epoll_change_event(struct aio_event_context *aio_ev, struct fd_event *fde)
+{
+ bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
+ bool want_read = (fde->flags & EVENT_FD_READ);
+ bool want_write= (fde->flags & EVENT_FD_WRITE);
+
+ if (aio_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* there's already an event */
+ if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
+ if (want_read || (want_write && !got_error)) {
+ epoll_mod_event(aio_ev, fde);
+ return;
+ }
+ epoll_del_event(aio_ev, fde);
+ return;
+ }
+
+ /* there's no aio_event attached to the fde */
+ if (want_read || (want_write && !got_error)) {
+ DLIST_ADD(aio_ev->fd_events, fde);
+ epoll_add_event(aio_ev, fde);
+ return;
+ }
+}
+
+static int setup_epoll_wait(struct aio_event_context *aio_ev)
+{
+ if (aio_ev->is_epoll_set) {
+ return 0;
+ }
+ memset(aio_ev->epoll_iocb, 0, sizeof(*aio_ev->epoll_iocb));
+ aio_ev->epoll_iocb->aio_fildes = aio_ev->epoll_fd;
+ aio_ev->epoll_iocb->aio_lio_opcode = IOCB_CMD_EPOLL_WAIT;
+ aio_ev->epoll_iocb->aio_reqprio = 0;
+
+ aio_ev->epoll_iocb->u.c.nbytes = MAX_AIO_QUEUE_DEPTH;
+ aio_ev->epoll_iocb->u.c.offset = -1;
+ aio_ev->epoll_iocb->u.c.buf = aio_ev->epevent;
+
+ if (io_submit(aio_ev->ioctx, 1, &aio_ev->epoll_iocb) != 1) {
+ return -1;
+ }
+ aio_ev->is_epoll_set = 1;
+
+ return 0;
+}
+
+
+/*
+ event loop handling using aio/epoll hybrid
+*/
+static int aio_event_loop(struct aio_event_context *aio_ev, struct timeval *tvalp)
+{
+ int ret, i;
+ uint32_t destruction_count = ++aio_ev->destruction_count;
+ struct timespec timeout;
+ struct io_event events[8];
+
+ if (aio_ev->epoll_fd == -1) return -1;
+
+ if (aio_ev->ev->num_signal_handlers &&
+ common_event_check_signal(aio_ev->ev)) {
+ return 0;
+ }
+
+ if (tvalp) {
+ timeout.tv_sec = tvalp->tv_sec;
+ timeout.tv_nsec = tvalp->tv_usec;
+ timeout.tv_nsec *= 1000;
+ }
+
+ if (setup_epoll_wait(aio_ev) < 0)
+ return -1;
+
+ ret = io_getevents(aio_ev->ioctx, 1, 8,
+ events, tvalp?&timeout:NULL);
+
+ if (ret == -EINTR) {
+ if (aio_ev->ev->num_signal_handlers) {
+ common_event_check_signal(aio_ev->ev);
+ }
+ return 0;
+ }
+
+ if (ret == 0 && tvalp) {
+ /* we don't care about a possible delay here */
+ common_event_loop_timer_delay(aio_ev->ev);
+ return 0;
+ }
+
+ for (i=0;i<ret;i++) {
+ struct io_event *event = &events[i];
+ struct iocb *finished = event->obj;
+
+ switch (finished->aio_lio_opcode) {
+ case IO_CMD_PWRITE:
+ case IO_CMD_PREAD: {
+ struct aio_event *ae = talloc_get_type(finished->data,
+ struct aio_event);
+ if (ae) {
+ talloc_set_destructor(ae, NULL);
+ ae->handler(ae->event_ctx, ae,
+ event->res, ae->private_data);
+ talloc_free(ae);
+ }
+ break;
+ }
+ case IOCB_CMD_EPOLL_WAIT: {
+ struct epoll_event *ep = (struct epoll_event *)finished->u.c.buf;
+ struct fd_event *fde;
+ uint16_t flags = 0;
+ int j;
+
+ aio_ev->is_epoll_set = 0;
+
+ for (j=0; j<event->res; j++, ep++) {
+ fde = talloc_get_type(ep->data.ptr,
+ struct fd_event);
+ if (fde == NULL) {
+ return -1;
+ }
+ if (ep->events & (EPOLLHUP|EPOLLERR)) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
+ epoll_del_event(aio_ev, fde);
+ continue;
+ }
+ flags |= EVENT_FD_READ;
+ }
+ if (ep->events & EPOLLIN) flags |= EVENT_FD_READ;
+ if (ep->events & EPOLLOUT) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(aio_ev->ev, fde, flags, fde->private_data);
+ }
+ }
+ break;
+ }
+ }
+ if (destruction_count != aio_ev->destruction_count) {
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ create a aio_event_context structure.
+*/
+static int aio_event_context_init(struct event_context *ev)
+{
+ struct aio_event_context *aio_ev;
+
+ aio_ev = talloc_zero(ev, struct aio_event_context);
+ if (!aio_ev) return -1;
+
+ aio_ev->ev = ev;
+ aio_ev->epoll_iocb = talloc(aio_ev, struct iocb);
+
+ if (io_queue_init(MAX_AIO_QUEUE_DEPTH, &aio_ev->ioctx) != 0) {
+ talloc_free(aio_ev);
+ return -1;
+ }
+
+ aio_ev->epoll_fd = epoll_create(MAX_AIO_QUEUE_DEPTH);
+ if (aio_ev->epoll_fd == -1) {
+ talloc_free(aio_ev);
+ return -1;
+ }
+ aio_ev->pid = getpid();
+
+ talloc_set_destructor(aio_ev, aio_ctx_destructor);
+
+ ev->additional_data = aio_ev;
+
+ if (setup_epoll_wait(aio_ev) < 0) {
+ talloc_free(aio_ev);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ destroy an fd_event
+*/
+static int aio_event_fd_destructor(struct fd_event *fde)
+{
+ struct event_context *ev = fde->event_ctx;
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+
+ epoll_check_reopen(aio_ev);
+
+ aio_ev->num_fd_events--;
+ aio_ev->destruction_count++;
+
+ epoll_del_event(aio_ev, fde);
+
+ if (fde->flags & EVENT_FD_AUTOCLOSE) {
+ close(fde->fd);
+ fde->fd = -1;
+ }
+
+ return 0;
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct fd_event *aio_event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data)
+{
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+ struct fd_event *fde;
+
+ epoll_check_reopen(aio_ev);
+
+ fde = talloc(mem_ctx?mem_ctx:ev, struct fd_event);
+ if (!fde) return NULL;
+
+ fde->event_ctx = ev;
+ fde->fd = fd;
+ fde->flags = flags;
+ fde->handler = handler;
+ fde->private_data = private_data;
+ fde->additional_flags = 0;
+ fde->additional_data = NULL;
+
+ aio_ev->num_fd_events++;
+ talloc_set_destructor(fde, aio_event_fd_destructor);
+
+ DLIST_ADD(aio_ev->fd_events, fde);
+ epoll_add_event(aio_ev, fde);
+
+ return fde;
+}
+
+
+/*
+ return the fd event flags
+*/
+static uint16_t aio_event_get_fd_flags(struct fd_event *fde)
+{
+ return fde->flags;
+}
+
+/*
+ set the fd event flags
+*/
+static void aio_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ struct event_context *ev;
+ struct aio_event_context *aio_ev;
+
+ if (fde->flags == flags) return;
+
+ ev = fde->event_ctx;
+ aio_ev = talloc_get_type(ev->additional_data, struct aio_event_context);
+
+ fde->flags = flags;
+
+ epoll_check_reopen(aio_ev);
+
+ epoll_change_event(aio_ev, fde);
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int aio_event_loop_once(struct event_context *ev)
+{
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+ struct timeval tval;
+
+ tval = common_event_loop_timer_delay(ev);
+ if (timeval_is_zero(&tval)) {
+ return 0;
+ }
+
+ epoll_check_reopen(aio_ev);
+
+ return aio_event_loop(aio_ev, &tval);
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+static int aio_event_loop_wait(struct event_context *ev)
+{
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+ while (aio_ev->num_fd_events) {
+ if (aio_event_loop_once(ev) != 0) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ called when a disk IO event needs to be cancelled
+*/
+static int aio_destructor(struct aio_event *ae)
+{
+ struct event_context *ev = ae->event_ctx;
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+ struct io_event result;
+ io_cancel(aio_ev->ioctx, &ae->iocb, &result);
+ /* TODO: handle errors from io_cancel()! */
+ return 0;
+}
+
+/* submit an aio disk IO event */
+static struct aio_event *aio_event_add_aio(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private_data)
+{
+ struct aio_event_context *aio_ev = talloc_get_type(ev->additional_data,
+ struct aio_event_context);
+ struct iocb *iocbp;
+ struct aio_event *ae = talloc(mem_ctx?mem_ctx:ev, struct aio_event);
+ if (ae == NULL) return NULL;
+
+ ae->event_ctx = ev;
+ ae->iocb = *iocb;
+ ae->handler = handler;
+ ae->private_data = private_data;
+ iocbp = &ae->iocb;
+
+ if (io_submit(aio_ev->ioctx, 1, &iocbp) != 1) {
+ talloc_free(ae);
+ return NULL;
+ }
+ ae->iocb.data = ae;
+ talloc_set_destructor(ae, aio_destructor);
+
+ return ae;
+}
+
+static const struct event_ops aio_event_ops = {
+ .context_init = aio_event_context_init,
+ .add_fd = aio_event_add_fd,
+ .add_aio = aio_event_add_aio,
+ .get_fd_flags = aio_event_get_fd_flags,
+ .set_fd_flags = aio_event_set_fd_flags,
+ .add_timed = common_event_add_timed,
+ .add_signal = common_event_add_signal,
+ .loop_once = aio_event_loop_once,
+ .loop_wait = aio_event_loop_wait,
+};
+
+bool events_aio_init(void)
+{
+ return event_register_backend("aio", &aio_event_ops);
+}
+
+#if _SAMBA_BUILD_
+NTSTATUS s4_events_aio_init(void)
+{
+ if (!events_aio_init()) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+ return NT_STATUS_OK;
+}
+#endif
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+
+ main select loop and event handling - epoll implementation
+
+ Copyright (C) Andrew Tridgell 2003-2005
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/network.h"
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+#include <sys/epoll.h>
+
+struct epoll_event_context {
+ /* a pointer back to the generic event_context */
+ struct event_context *ev;
+
+ /* list of filedescriptor events */
+ struct fd_event *fd_events;
+
+ /* number of registered fd event handlers */
+ int num_fd_events;
+
+ /* this is changed by the destructors for the fd event
+ type. It is used to detect event destruction by event
+ handlers, which means the code that is calling the event
+ handler needs to assume that the linked list is no longer
+ valid
+ */
+ uint32_t destruction_count;
+
+ /* when using epoll this is the handle from epoll_create */
+ int epoll_fd;
+
+ pid_t pid;
+};
+
+/*
+ called when a epoll call fails, and we should fallback
+ to using select
+*/
+static void epoll_fallback_to_select(struct epoll_event_context *epoll_ev, const char *reason)
+{
+ DEBUG(0,("%s (%s) - falling back to select()\n", reason, strerror(errno)));
+ close(epoll_ev->epoll_fd);
+ epoll_ev->epoll_fd = -1;
+ talloc_set_destructor(epoll_ev, NULL);
+}
+
+/*
+ map from EVENT_FD_* to EPOLLIN/EPOLLOUT
+*/
+static uint32_t epoll_map_flags(uint16_t flags)
+{
+ uint32_t ret = 0;
+ if (flags & EVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
+ if (flags & EVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
+ return ret;
+}
+
+/*
+ free the epoll fd
+*/
+static int epoll_ctx_destructor(struct epoll_event_context *epoll_ev)
+{
+ close(epoll_ev->epoll_fd);
+ epoll_ev->epoll_fd = -1;
+ return 0;
+}
+
+/*
+ init the epoll fd
+*/
+static void epoll_init_ctx(struct epoll_event_context *epoll_ev)
+{
+ unsigned v;
+
+ epoll_ev->epoll_fd = epoll_create(64);
+
+ /* on exec, don't inherit the fd */
+ v = fcntl(epoll_ev->epoll_fd, F_GETFD, 0);
+ fcntl(epoll_ev->epoll_fd, F_SETFD, v | FD_CLOEXEC);
+
+ epoll_ev->pid = getpid();
+ talloc_set_destructor(epoll_ev, epoll_ctx_destructor);
+}
+
+static void epoll_add_event(struct epoll_event_context *epoll_ev, struct fd_event *fde);
+
+/*
+ reopen the epoll handle when our pid changes
+ see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
+ demonstration of why this is needed
+ */
+static void epoll_check_reopen(struct epoll_event_context *epoll_ev)
+{
+ struct fd_event *fde;
+ unsigned v;
+
+ if (epoll_ev->pid == getpid()) {
+ return;
+ }
+
+ close(epoll_ev->epoll_fd);
+ epoll_ev->epoll_fd = epoll_create(64);
+ if (epoll_ev->epoll_fd == -1) {
+ DEBUG(0,("Failed to recreate epoll handle after fork\n"));
+ return;
+ }
+
+ /* on exec, don't inherit the fd */
+ v = fcntl(epoll_ev->epoll_fd, F_GETFD, 0);
+ fcntl(epoll_ev->epoll_fd, F_SETFD, v | FD_CLOEXEC);
+
+ epoll_ev->pid = getpid();
+ for (fde=epoll_ev->fd_events;fde;fde=fde->next) {
+ epoll_add_event(epoll_ev, fde);
+ }
+}
+
+#define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
+#define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
+#define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
+
+/*
+ add the epoll event to the given fd_event
+*/
+static void epoll_add_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if we don't want events yet, don't add an epoll_event */
+ if (fde->flags == 0) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event) != 0) {
+ epoll_fallback_to_select(epoll_ev, "EPOLL_CTL_ADD failed");
+ }
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+/*
+ delete the epoll event for given fd_event
+*/
+static void epoll_del_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+
+ DLIST_REMOVE(epoll_ev->fd_events, fde);
+
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if there's no epoll_event, we don't need to delete it */
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event) != 0) {
+ DEBUG(0,("epoll_del_event failed! probable early close bug (%s)\n", strerror(errno)));
+ }
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+}
+
+/*
+ change the epoll event to the given fd_event
+*/
+static void epoll_mod_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ if (epoll_ctl(epoll_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event) != 0) {
+ epoll_fallback_to_select(epoll_ev, "EPOLL_CTL_MOD failed");
+ }
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+static void epoll_change_event(struct epoll_event_context *epoll_ev, struct fd_event *fde)
+{
+ bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
+ bool want_read = (fde->flags & EVENT_FD_READ);
+ bool want_write= (fde->flags & EVENT_FD_WRITE);
+
+ if (epoll_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* there's already an event */
+ if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
+ if (want_read || (want_write && !got_error)) {
+ epoll_mod_event(epoll_ev, fde);
+ return;
+ }
+ /*
+ * if we want to match the select behavior, we need to remove the epoll_event
+ * when the caller isn't interested in events.
+ *
+ * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
+ */
+ epoll_del_event(epoll_ev, fde);
+ return;
+ }
+
+ /* there's no epoll_event attached to the fde */
+ if (want_read || (want_write && !got_error)) {
+ DLIST_ADD(epoll_ev->fd_events, fde);
+ epoll_add_event(epoll_ev, fde);
+ return;
+ }
+}
+
+/*
+ event loop handling using epoll
+*/
+static int epoll_event_loop(struct epoll_event_context *epoll_ev, struct timeval *tvalp)
+{
+ int ret, i;
+#define MAXEVENTS 32
+ struct epoll_event events[MAXEVENTS];
+ uint32_t destruction_count = ++epoll_ev->destruction_count;
+ int timeout = -1;
+
+ if (epoll_ev->epoll_fd == -1) return -1;
+
+ if (tvalp) {
+ /* it's better to trigger timed events a bit later than to early */
+ timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
+ }
+
+ if (epoll_ev->ev->num_signal_handlers &&
+ common_event_check_signal(epoll_ev->ev)) {
+ return 0;
+ }
+
+ ret = epoll_wait(epoll_ev->epoll_fd, events, MAXEVENTS, timeout);
+
+ if (ret == -1 && errno == EINTR && epoll_ev->ev->num_signal_handlers) {
+ if (common_event_check_signal(epoll_ev->ev)) {
+ return 0;
+ }
+ }
+
+ if (ret == -1 && errno != EINTR) {
+ epoll_fallback_to_select(epoll_ev, "epoll_wait() failed");
+ return -1;
+ }
+
+ if (ret == 0 && tvalp) {
+ /* we don't care about a possible delay here */
+ common_event_loop_timer_delay(epoll_ev->ev);
+ return 0;
+ }
+
+ for (i=0;i<ret;i++) {
+ struct fd_event *fde = talloc_get_type(events[i].data.ptr,
+ struct fd_event);
+ uint16_t flags = 0;
+
+ if (fde == NULL) {
+ epoll_fallback_to_select(epoll_ev, "epoll_wait() gave bad data");
+ return -1;
+ }
+ if (events[i].events & (EPOLLHUP|EPOLLERR)) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
+ /*
+ * if we only wait for EVENT_FD_WRITE, we should not tell the
+ * event handler about it, and remove the epoll_event,
+ * as we only report errors when waiting for read events,
+ * to match the select() behavior
+ */
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
+ epoll_del_event(epoll_ev, fde);
+ continue;
+ }
+ flags |= EVENT_FD_READ;
+ }
+ if (events[i].events & EPOLLIN) flags |= EVENT_FD_READ;
+ if (events[i].events & EPOLLOUT) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(epoll_ev->ev, fde, flags, fde->private_data);
+ if (destruction_count != epoll_ev->destruction_count) {
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ create a epoll_event_context structure.
+*/
+static int epoll_event_context_init(struct event_context *ev)
+{
+ struct epoll_event_context *epoll_ev;
+
+ epoll_ev = talloc_zero(ev, struct epoll_event_context);
+ if (!epoll_ev) return -1;
+ epoll_ev->ev = ev;
+ epoll_ev->epoll_fd = -1;
+
+ epoll_init_ctx(epoll_ev);
+
+ ev->additional_data = epoll_ev;
+ return 0;
+}
+
+/*
+ destroy an fd_event
+*/
+static int epoll_event_fd_destructor(struct fd_event *fde)
+{
+ struct event_context *ev = fde->event_ctx;
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+
+ epoll_check_reopen(epoll_ev);
+
+ epoll_ev->num_fd_events--;
+ epoll_ev->destruction_count++;
+
+ epoll_del_event(epoll_ev, fde);
+
+ if (fde->flags & EVENT_FD_AUTOCLOSE) {
+ close(fde->fd);
+ fde->fd = -1;
+ }
+
+ return 0;
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct fd_event *epoll_event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data)
+{
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+ struct fd_event *fde;
+
+ epoll_check_reopen(epoll_ev);
+
+ fde = talloc(mem_ctx?mem_ctx:ev, struct fd_event);
+ if (!fde) return NULL;
+
+ fde->event_ctx = ev;
+ fde->fd = fd;
+ fde->flags = flags;
+ fde->handler = handler;
+ fde->private_data = private_data;
+ fde->additional_flags = 0;
+ fde->additional_data = NULL;
+
+ epoll_ev->num_fd_events++;
+ talloc_set_destructor(fde, epoll_event_fd_destructor);
+
+ DLIST_ADD(epoll_ev->fd_events, fde);
+ epoll_add_event(epoll_ev, fde);
+
+ return fde;
+}
+
+
+/*
+ return the fd event flags
+*/
+static uint16_t epoll_event_get_fd_flags(struct fd_event *fde)
+{
+ return fde->flags;
+}
+
+/*
+ set the fd event flags
+*/
+static void epoll_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ struct event_context *ev;
+ struct epoll_event_context *epoll_ev;
+
+ if (fde->flags == flags) return;
+
+ ev = fde->event_ctx;
+ epoll_ev = talloc_get_type(ev->additional_data, struct epoll_event_context);
+
+ fde->flags = flags;
+
+ epoll_check_reopen(epoll_ev);
+
+ epoll_change_event(epoll_ev, fde);
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int epoll_event_loop_once(struct event_context *ev)
+{
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+ struct timeval tval;
+
+ tval = common_event_loop_timer_delay(ev);
+ if (timeval_is_zero(&tval)) {
+ return 0;
+ }
+
+ epoll_check_reopen(epoll_ev);
+
+ return epoll_event_loop(epoll_ev, &tval);
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+static int epoll_event_loop_wait(struct event_context *ev)
+{
+ struct epoll_event_context *epoll_ev = talloc_get_type(ev->additional_data,
+ struct epoll_event_context);
+ while (epoll_ev->num_fd_events) {
+ if (epoll_event_loop_once(ev) != 0) {
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct event_ops epoll_event_ops = {
+ .context_init = epoll_event_context_init,
+ .add_fd = epoll_event_add_fd,
+ .get_fd_flags = epoll_event_get_fd_flags,
+ .set_fd_flags = epoll_event_set_fd_flags,
+ .add_timed = common_event_add_timed,
+ .add_signal = common_event_add_signal,
+ .loop_once = epoll_event_loop_once,
+ .loop_wait = epoll_event_loop_wait,
+};
+
+bool events_epoll_init(void)
+{
+ return event_register_backend("epoll", &epoll_event_ops);
+}
+
+#if _SAMBA_BUILD_
+NTSTATUS s4_events_epoll_init(void)
+{
+ if (!events_epoll_init()) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+ return NT_STATUS_OK;
+}
+#endif
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+
+ generalised event loop handling
+
+ Internal structs
+
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+struct event_ops {
+ /* conntext init */
+ int (*context_init)(struct event_context *ev);
+
+ /* fd_event functions */
+ struct fd_event *(*add_fd)(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data);
+ uint16_t (*get_fd_flags)(struct fd_event *fde);
+ void (*set_fd_flags)(struct fd_event *fde, uint16_t flags);
+
+ /* timed_event functions */
+ struct timed_event *(*add_timed)(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ event_timed_handler_t handler,
+ void *private_data);
+ /* disk aio event functions */
+ struct aio_event *(*add_aio)(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct iocb *iocb,
+ event_aio_handler_t handler,
+ void *private_data);
+ /* signal functions */
+ struct signal_event *(*add_signal)(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int signum, int sa_flags,
+ event_signal_handler_t handler,
+ void *private_data);
+
+ /* loop functions */
+ int (*loop_once)(struct event_context *ev);
+ int (*loop_wait)(struct event_context *ev);
+};
+
+struct fd_event {
+ struct fd_event *prev, *next;
+ struct event_context *event_ctx;
+ int fd;
+ uint16_t flags; /* see EVENT_FD_* flags */
+ event_fd_handler_t handler;
+ /* this is private for the specific handler */
+ void *private_data;
+ /* this is private for the events_ops implementation */
+ uint16_t additional_flags;
+ void *additional_data;
+};
+
+struct signal_event {
+ struct signal_event *prev, *next;
+ struct event_context *event_ctx;
+ event_signal_handler_t handler;
+ void *private_data;
+ int signum;
+ int sa_flags;
+};
+
+/* aio event is private to the aio backend */
+struct aio_event;
+
+struct event_context {
+ /* the specific events implementation */
+ const struct event_ops *ops;
+
+ /* list of timed events - used by common code */
+ struct timed_event *timed_events;
+
+ /* this is private for the events_ops implementation */
+ void *additional_data;
+
+ /* number of signal event handlers */
+ int num_signal_handlers;
+
+ /* pipe hack used with signal handlers */
+ struct fd_event *pipe_fde;
+};
+
+
+bool event_register_backend(const char *name, const struct event_ops *ops);
+
+struct timed_event *common_event_add_timed(struct event_context *, TALLOC_CTX *,
+ struct timeval, event_timed_handler_t, void *);
+struct timeval common_event_loop_timer_delay(struct event_context *);
+
+struct signal_event *common_event_add_signal(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int signum,
+ int sa_flags,
+ event_signal_handler_t handler,
+ void *private_data);
+int common_event_check_signal(struct event_context *ev);
+
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+ main select loop and event handling
+ wrapper for http://liboop.org/
+
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+#include <oop.h>
+
+/*
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ NOTE: this code compiles fine, but is completly *UNTESTED*
+ and is only commited as example
+
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+*/
+
+static int oop_event_context_destructor(struct event_context *ev)
+{
+ oop_source_sys *oop_sys = ev->additional_data;
+
+ oop_sys_delete(oop_sys);
+
+ return 0;
+}
+
+/*
+ create a oop_event_context structure.
+*/
+static int oop_event_context_init(struct event_context *ev, void *private_data)
+{
+ oop_source_sys *oop_sys = private_data;
+
+ if (!oop_sys) {
+ oop_sys = oop_sys_new();
+ if (!oop_sys) {
+ return -1;
+ }
+
+ talloc_set_destructor(ev, oop_event_context_destructor);
+ }
+
+ ev->additional_data = oop_sys;
+
+ return 0;
+}
+
+static void *oop_event_fd_handler(oop_source *oop, int fd, oop_event oop_type, void *ptr)
+{
+ struct fd_event *fde = ptr;
+
+ if (fd != fde->fd) return OOP_ERROR;
+
+ switch(oop_type) {
+ case OOP_READ:
+ fde->handler(fde->event_ctx, fde, EVENT_FD_READ, fde->private_data);
+ return OOP_CONTINUE;
+ case OOP_WRITE:
+ fde->handler(fde->event_ctx, fde, EVENT_FD_WRITE, fde->private_data);
+ return OOP_CONTINUE;
+ case OOP_EXCEPTION:
+ return OOP_ERROR;
+ case OOP_NUM_EVENTS:
+ return OOP_ERROR;
+ }
+
+ return OOP_ERROR;
+}
+
+/*
+ destroy an fd_event
+*/
+static int oop_event_fd_destructor(struct fd_event *fde)
+{
+ struct event_context *ev = fde->event_ctx;
+ oop_source_sys *oop_sys = ev->additional_data;
+ oop_source *oop = oop_sys_source(oop_sys);
+
+ if (fde->flags & EVENT_FD_READ)
+ oop->cancel_fd(oop, fde->fd, OOP_READ);
+ if (fde->flags & EVENT_FD_WRITE)
+ oop->cancel_fd(oop, fde->fd, OOP_WRITE);
+
+ if (fde->flags & EVENT_FD_AUTOCLOSE) {
+ close(fde->fd);
+ fde->fd = -1;
+ }
+
+ return 0;
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct fd_event *oop_event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data)
+{
+ struct fd_event *fde;
+ oop_source_sys *oop_sys = ev->additional_data;
+ oop_source *oop = oop_sys_source(oop_sys);
+
+ fde = talloc(mem_ctx?mem_ctx:ev, struct fd_event);
+ if (!fde) return NULL;
+
+ fde->event_ctx = ev;
+ fde->fd = fd;
+ fde->flags = flags;
+ fde->handler = handler;
+ fde->private_data = private_data;
+ fde->additional_flags = 0;
+ fde->additional_data = NULL;
+
+ if (fde->flags & EVENT_FD_READ)
+ oop->on_fd(oop, fde->fd, OOP_READ, oop_event_fd_handler, fde);
+ if (fde->flags & EVENT_FD_WRITE)
+ oop->on_fd(oop, fde->fd, OOP_WRITE, oop_event_fd_handler, fde);
+
+ talloc_set_destructor(fde, oop_event_fd_destructor);
+
+ return fde;
+}
+
+/*
+ return the fd event flags
+*/
+static uint16_t oop_event_get_fd_flags(struct fd_event *fde)
+{
+ return fde->flags;
+}
+
+/*
+ set the fd event flags
+*/
+static void oop_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ oop_source_sys *oop_sys;
+ oop_source *oop;
+
+ oop_sys = fde->event_ctx->additional_data;
+ oop = oop_sys_source(oop_sys);
+
+ if ((fde->flags & EVENT_FD_READ)&&(!(flags & EVENT_FD_READ)))
+ oop->cancel_fd(oop, fde->fd, OOP_READ);
+
+ if ((!(fde->flags & EVENT_FD_READ))&&(flags & EVENT_FD_READ))
+ oop->on_fd(oop, fde->fd, OOP_READ, oop_event_fd_handler, fde);
+
+ if ((fde->flags & EVENT_FD_WRITE)&&(!(flags & EVENT_FD_WRITE)))
+ oop->cancel_fd(oop, fde->fd, OOP_WRITE);
+
+ if ((!(fde->flags & EVENT_FD_WRITE))&&(flags & EVENT_FD_WRITE))
+ oop->on_fd(oop, fde->fd, OOP_WRITE, oop_event_fd_handler, fde);
+
+ fde->flags = flags;
+}
+
+static int oop_event_timed_destructor(struct timed_event *te);
+
+static int oop_event_timed_deny_destructor(struct timed_event *te)
+{
+ return -1;
+}
+
+static void *oop_event_timed_handler(oop_source *oop, struct timeval t, void *ptr)
+{
+ struct timed_event *te = ptr;
+
+ /* deny the handler to free the event */
+ talloc_set_destructor(te, oop_event_timed_deny_destructor);
+ te->handler(te->event_ctx, te, t, te->private_data);
+
+ talloc_set_destructor(te, oop_event_timed_destructor);
+ talloc_free(te);
+
+ return OOP_CONTINUE;
+}
+
+/*
+ destroy a timed event
+*/
+static int oop_event_timed_destructor(struct timed_event *te)
+{
+ struct event_context *ev = te->event_ctx;
+ oop_source_sys *oop_sys = ev->additional_data;
+ oop_source *oop = oop_sys_source(oop_sys);
+
+ oop->cancel_time(oop, te->next_event, oop_event_timed_handler, te);
+
+ return 0;
+}
+
+/*
+ add a timed event
+ return NULL on failure (memory allocation error)
+*/
+static struct timed_event *oop_event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ event_timed_handler_t handler,
+ void *private_data)
+{
+ oop_source_sys *oop_sys = ev->additional_data;
+ oop_source *oop = oop_sys_source(oop_sys);
+ struct timed_event *te;
+
+ te = talloc(mem_ctx?mem_ctx:ev, struct timed_event);
+ if (te == NULL) return NULL;
+
+ te->event_ctx = ev;
+ te->next_event = next_event;
+ te->handler = handler;
+ te->private_data = private_data;
+ te->additional_data = NULL;
+
+ oop->on_time(oop, te->next_event, oop_event_timed_handler, te);
+
+ talloc_set_destructor(te, oop_event_timed_destructor);
+
+ return te;
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int oop_event_loop_once(struct event_context *ev)
+{
+ void *oop_ret;
+ oop_source_sys *oop_sys = ev->additional_data;
+
+ oop_ret = oop_sys_run_once(oop_sys);
+ if (oop_ret == OOP_CONTINUE) {
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+static int oop_event_loop_wait(struct event_context *ev)
+{
+ void *oop_ret;
+ oop_source_sys *oop_sys = ev->additional_data;
+
+ oop_ret = oop_sys_run(oop_sys);
+ if (oop_ret == OOP_CONTINUE) {
+ return 0;
+ }
+
+ return -1;
+}
+
+static const struct event_ops event_oop_ops = {
+ .context_init = oop_event_context_init,
+ .add_fd = oop_event_add_fd,
+ .get_fd_flags = oop_event_get_fd_flags,
+ .set_fd_flags = oop_event_set_fd_flags,
+ .add_timed = oop_event_add_timed,
+ .add_signal = common_event_add_signal,
+ .loop_once = oop_event_loop_once,
+ .loop_wait = oop_event_loop_wait,
+};
+
+const struct event_ops *event_liboop_get_ops(void)
+{
+ return &event_oop_ops;
+}
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+ main select loop and event handling
+ Copyright (C) Andrew Tridgell 2003-2005
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ This is SAMBA's default event loop code
+
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/select.h"
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+struct select_event_context {
+ /* a pointer back to the generic event_context */
+ struct event_context *ev;
+
+ /* list of filedescriptor events */
+ struct fd_event *fd_events;
+
+ /* list of timed events */
+ struct timed_event *timed_events;
+
+ /* the maximum file descriptor number in fd_events */
+ int maxfd;
+
+ /* information for exiting from the event loop */
+ int exit_code;
+
+ /* this is incremented when the loop over events causes something which
+ could change the events yet to be processed */
+ uint32_t destruction_count;
+};
+
+/*
+ create a select_event_context structure.
+*/
+static int select_event_context_init(struct event_context *ev)
+{
+ struct select_event_context *select_ev;
+
+ select_ev = talloc_zero(ev, struct select_event_context);
+ if (!select_ev) return -1;
+ select_ev->ev = ev;
+
+ ev->additional_data = select_ev;
+ return 0;
+}
+
+/*
+ recalculate the maxfd
+*/
+static void calc_maxfd(struct select_event_context *select_ev)
+{
+ struct fd_event *fde;
+
+ select_ev->maxfd = 0;
+ for (fde = select_ev->fd_events; fde; fde = fde->next) {
+ if (fde->fd > select_ev->maxfd) {
+ select_ev->maxfd = fde->fd;
+ }
+ }
+}
+
+
+/* to mark the ev->maxfd invalid
+ * this means we need to recalculate it
+ */
+#define EVENT_INVALID_MAXFD (-1)
+
+/*
+ destroy an fd_event
+*/
+static int select_event_fd_destructor(struct fd_event *fde)
+{
+ struct event_context *ev = fde->event_ctx;
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+
+ if (select_ev->maxfd == fde->fd) {
+ select_ev->maxfd = EVENT_INVALID_MAXFD;
+ }
+
+ DLIST_REMOVE(select_ev->fd_events, fde);
+ select_ev->destruction_count++;
+
+ if (fde->flags & EVENT_FD_AUTOCLOSE) {
+ close(fde->fd);
+ fde->fd = -1;
+ }
+
+ return 0;
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct fd_event *select_event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data)
+{
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+ struct fd_event *fde;
+
+ fde = talloc(mem_ctx?mem_ctx:ev, struct fd_event);
+ if (!fde) return NULL;
+
+ fde->event_ctx = ev;
+ fde->fd = fd;
+ fde->flags = flags;
+ fde->handler = handler;
+ fde->private_data = private_data;
+ fde->additional_flags = 0;
+ fde->additional_data = NULL;
+
+ DLIST_ADD(select_ev->fd_events, fde);
+ if (fde->fd > select_ev->maxfd) {
+ select_ev->maxfd = fde->fd;
+ }
+ talloc_set_destructor(fde, select_event_fd_destructor);
+
+ return fde;
+}
+
+
+/*
+ return the fd event flags
+*/
+static uint16_t select_event_get_fd_flags(struct fd_event *fde)
+{
+ return fde->flags;
+}
+
+/*
+ set the fd event flags
+*/
+static void select_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ struct event_context *ev;
+ struct select_event_context *select_ev;
+
+ if (fde->flags == flags) return;
+
+ ev = fde->event_ctx;
+ select_ev = talloc_get_type(ev->additional_data, struct select_event_context);
+
+ fde->flags = flags;
+}
+
+/*
+ event loop handling using select()
+*/
+static int select_event_loop_select(struct select_event_context *select_ev, struct timeval *tvalp)
+{
+ fd_set r_fds, w_fds;
+ struct fd_event *fde;
+ int selrtn;
+ uint32_t destruction_count = ++select_ev->destruction_count;
+
+ /* we maybe need to recalculate the maxfd */
+ if (select_ev->maxfd == EVENT_INVALID_MAXFD) {
+ calc_maxfd(select_ev);
+ }
+
+ FD_ZERO(&r_fds);
+ FD_ZERO(&w_fds);
+
+ /* setup any fd events */
+ for (fde = select_ev->fd_events; fde; fde = fde->next) {
+ if (fde->flags & EVENT_FD_READ) {
+ FD_SET(fde->fd, &r_fds);
+ }
+ if (fde->flags & EVENT_FD_WRITE) {
+ FD_SET(fde->fd, &w_fds);
+ }
+ }
+
+ if (select_ev->ev->num_signal_handlers &&
+ common_event_check_signal(select_ev->ev)) {
+ return 0;
+ }
+
+ selrtn = select(select_ev->maxfd+1, &r_fds, &w_fds, NULL, tvalp);
+
+ if (selrtn == -1 && errno == EINTR &&
+ select_ev->ev->num_signal_handlers) {
+ common_event_check_signal(select_ev->ev);
+ return 0;
+ }
+
+ if (selrtn == -1 && errno == EBADF) {
+ /* the socket is dead! this should never
+ happen as the socket should have first been
+ made readable and that should have removed
+ the event, so this must be a bug. This is a
+ fatal error. */
+ DEBUG(0,("ERROR: EBADF on select_event_loop_once\n"));
+ select_ev->exit_code = EBADF;
+ return -1;
+ }
+
+ if (selrtn == 0 && tvalp) {
+ /* we don't care about a possible delay here */
+ common_event_loop_timer_delay(select_ev->ev);
+ return 0;
+ }
+
+ if (selrtn > 0) {
+ /* at least one file descriptor is ready - check
+ which ones and call the handler, being careful to allow
+ the handler to remove itself when called */
+ for (fde = select_ev->fd_events; fde; fde = fde->next) {
+ uint16_t flags = 0;
+
+ if (FD_ISSET(fde->fd, &r_fds)) flags |= EVENT_FD_READ;
+ if (FD_ISSET(fde->fd, &w_fds)) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(select_ev->ev, fde, flags, fde->private_data);
+ if (destruction_count != select_ev->destruction_count) {
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int select_event_loop_once(struct event_context *ev)
+{
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+ struct timeval tval;
+
+ tval = common_event_loop_timer_delay(ev);
+ if (timeval_is_zero(&tval)) {
+ return 0;
+ }
+
+ return select_event_loop_select(select_ev, &tval);
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+static int select_event_loop_wait(struct event_context *ev)
+{
+ struct select_event_context *select_ev = talloc_get_type(ev->additional_data,
+ struct select_event_context);
+ select_ev->exit_code = 0;
+
+ while (select_ev->fd_events && select_ev->exit_code == 0) {
+ if (select_event_loop_once(ev) != 0) {
+ break;
+ }
+ }
+
+ return select_ev->exit_code;
+}
+
+static const struct event_ops select_event_ops = {
+ .context_init = select_event_context_init,
+ .add_fd = select_event_add_fd,
+ .get_fd_flags = select_event_get_fd_flags,
+ .set_fd_flags = select_event_set_fd_flags,
+ .add_timed = common_event_add_timed,
+ .add_signal = common_event_add_signal,
+ .loop_once = select_event_loop_once,
+ .loop_wait = select_event_loop_wait,
+};
+
+bool events_select_init(void)
+{
+ return event_register_backend("select", &select_event_ops);
+}
+
+#if _SAMBA_BUILD_
+NTSTATUS s4_events_select_init(void)
+{
+ if (!events_select_init()) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+ return NT_STATUS_OK;
+}
+#endif
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+
+ common events code for signal events
+
+ Copyright (C) Andrew Tridgell 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/select.h"
+#include "system/wait.h"
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+#define NUM_SIGNALS 64
+
+/* maximum number of SA_SIGINFO signals to hold in the queue */
+#define SA_INFO_QUEUE_COUNT 10
+
+struct sigcounter {
+ uint32_t count;
+ uint32_t seen;
+};
+
+#define SIG_INCREMENT(s) (s).count++
+#define SIG_SEEN(s, n) (s).seen += (n)
+#define SIG_PENDING(s) ((s).seen != (s).count)
+
+
+/*
+ the poor design of signals means that this table must be static global
+*/
+static struct sig_state {
+ struct signal_event *sig_handlers[NUM_SIGNALS+1];
+ struct sigaction *oldact[NUM_SIGNALS+1];
+ struct sigcounter signal_count[NUM_SIGNALS+1];
+ struct sigcounter got_signal;
+ int pipe_hack[2];
+#ifdef SA_SIGINFO
+ /* with SA_SIGINFO we get quite a lot of info per signal */
+ siginfo_t *sig_info[NUM_SIGNALS+1];
+ struct sigcounter sig_blocked[NUM_SIGNALS+1];
+#endif
+} *sig_state;
+
+/*
+ return number of sigcounter events not processed yet
+*/
+static uint32_t sig_count(struct sigcounter s)
+{
+ if (s.count >= s.seen) {
+ return s.count - s.seen;
+ }
+ return 1 + (0xFFFFFFFF & ~(s.seen - s.count));
+}
+
+/*
+ signal handler - redirects to registered signals
+*/
+static void signal_handler(int signum)
+{
+ char c = 0;
+ SIG_INCREMENT(sig_state->signal_count[signum]);
+ SIG_INCREMENT(sig_state->got_signal);
+ /* doesn't matter if this pipe overflows */
+ write(sig_state->pipe_hack[1], &c, 1);
+}
+
+#ifdef SA_SIGINFO
+/*
+ signal handler with SA_SIGINFO - redirects to registered signals
+*/
+static void signal_handler_info(int signum, siginfo_t *info, void *uctx)
+{
+ uint32_t count = sig_count(sig_state->signal_count[signum]);
+ sig_state->sig_info[signum][count] = *info;
+
+ signal_handler(signum);
+
+ /* handle SA_SIGINFO */
+ if (count+1 == SA_INFO_QUEUE_COUNT) {
+ /* we've filled the info array - block this signal until
+ these ones are delivered */
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, signum);
+ sigprocmask(SIG_BLOCK, &set, NULL);
+ SIG_INCREMENT(sig_state->sig_blocked[signum]);
+ }
+}
+#endif
+
+/*
+ destroy a signal event
+*/
+static int signal_event_destructor(struct signal_event *se)
+{
+ se->event_ctx->num_signal_handlers--;
+ DLIST_REMOVE(sig_state->sig_handlers[se->signum], se);
+ if (sig_state->sig_handlers[se->signum] == NULL) {
+ /* restore old handler, if any */
+ sigaction(se->signum, sig_state->oldact[se->signum], NULL);
+ sig_state->oldact[se->signum] = NULL;
+#ifdef SA_SIGINFO
+ if (se->sa_flags & SA_SIGINFO) {
+ talloc_free(sig_state->sig_info[se->signum]);
+ sig_state->sig_info[se->signum] = NULL;
+ }
+#endif
+ }
+ return 0;
+}
+
+/*
+ this is part of the pipe hack needed to avoid the signal race condition
+*/
+static void signal_pipe_handler(struct event_context *ev, struct fd_event *fde,
+ uint16_t flags, void *private)
+{
+ char c[16];
+ /* its non-blocking, doesn't matter if we read too much */
+ read(sig_state->pipe_hack[0], c, sizeof(c));
+}
+
+/*
+ add a signal event
+ return NULL on failure (memory allocation error)
+*/
+struct signal_event *common_event_add_signal(struct event_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int signum,
+ int sa_flags,
+ event_signal_handler_t handler,
+ void *private_data)
+{
+ struct signal_event *se;
+
+ if (signum >= NUM_SIGNALS) {
+ return NULL;
+ }
+
+ /* the sig_state needs to be on a global context as it can last across
+ multiple event contexts */
+ if (sig_state == NULL) {
+ sig_state = talloc_zero(talloc_autofree_context(), struct sig_state);
+ if (sig_state == NULL) {
+ return NULL;
+ }
+ }
+
+ se = talloc(mem_ctx?mem_ctx:ev, struct signal_event);
+ if (se == NULL) return NULL;
+
+ se->event_ctx = ev;
+ se->handler = handler;
+ se->private_data = private_data;
+ se->signum = signum;
+ se->sa_flags = sa_flags;
+
+ /* Ensure, no matter the destruction order, that we always have a handle on the global sig_state */
+ if (!talloc_reference(se, sig_state)) {
+ return NULL;
+ }
+
+ /* only install a signal handler if not already installed */
+ if (sig_state->sig_handlers[signum] == NULL) {
+ struct sigaction act;
+ ZERO_STRUCT(act);
+ act.sa_handler = signal_handler;
+ act.sa_flags = sa_flags;
+#ifdef SA_SIGINFO
+ if (sa_flags & SA_SIGINFO) {
+ act.sa_handler = NULL;
+ act.sa_sigaction = signal_handler_info;
+ if (sig_state->sig_info[signum] == NULL) {
+ sig_state->sig_info[signum] = talloc_array(sig_state, siginfo_t, SA_INFO_QUEUE_COUNT);
+ if (sig_state->sig_info[signum] == NULL) {
+ talloc_free(se);
+ return NULL;
+ }
+ }
+ }
+#endif
+ sig_state->oldact[signum] = talloc(sig_state, struct sigaction);
+ if (sig_state->oldact[signum] == NULL) {
+ talloc_free(se);
+ return NULL;
+ }
+ if (sigaction(signum, &act, sig_state->oldact[signum]) == -1) {
+ talloc_free(se);
+ return NULL;
+ }
+ }
+
+ DLIST_ADD(sig_state->sig_handlers[signum], se);
+
+ talloc_set_destructor(se, signal_event_destructor);
+
+ /* we need to setup the pipe hack handler if not already
+ setup */
+ if (ev->pipe_fde == NULL) {
+ if (sig_state->pipe_hack[0] == 0 &&
+ sig_state->pipe_hack[1] == 0) {
+ pipe(sig_state->pipe_hack);
+ set_blocking(sig_state->pipe_hack[0], False);
+ set_blocking(sig_state->pipe_hack[1], False);
+ }
+ ev->pipe_fde = event_add_fd(ev, ev, sig_state->pipe_hack[0],
+ EVENT_FD_READ, signal_pipe_handler, NULL);
+ }
+ ev->num_signal_handlers++;
+
+ return se;
+}
+
+
+/*
+ check if a signal is pending
+ return != 0 if a signal was pending
+*/
+int common_event_check_signal(struct event_context *ev)
+{
+ int i;
+
+ if (!sig_state || !SIG_PENDING(sig_state->got_signal)) {
+ return 0;
+ }
+
+ for (i=0;i<NUM_SIGNALS+1;i++) {
+ struct signal_event *se, *next;
+ struct sigcounter counter = sig_state->signal_count[i];
+ uint32_t count = sig_count(counter);
+
+ if (count == 0) {
+ continue;
+ }
+ for (se=sig_state->sig_handlers[i];se;se=next) {
+ next = se->next;
+#ifdef SA_SIGINFO
+ if (se->sa_flags & SA_SIGINFO) {
+ int j;
+ for (j=0;j<count;j++) {
+ /* note the use of the sig_info array as a
+ ring buffer */
+ int ofs = (counter.count + j) % SA_INFO_QUEUE_COUNT;
+ se->handler(ev, se, i, 1,
+ (void*)&sig_state->sig_info[i][ofs],
+ se->private_data);
+ }
+ if (SIG_PENDING(sig_state->sig_blocked[i])) {
+ /* we'd filled the queue, unblock the
+ signal now */
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, i);
+ SIG_SEEN(sig_state->sig_blocked[i],
+ sig_count(sig_state->sig_blocked[i]));
+ sigprocmask(SIG_UNBLOCK, &set, NULL);
+ }
+ if (se->sa_flags & SA_RESETHAND) {
+ talloc_free(se);
+ }
+ continue;
+ }
+#endif
+ se->handler(ev, se, i, count, NULL, se->private_data);
+ if (se->sa_flags & SA_RESETHAND) {
+ talloc_free(se);
+ }
+ }
+ SIG_SEEN(sig_state->signal_count[i], count);
+ SIG_SEEN(sig_state->got_signal, count);
+ }
+
+ return 1;
+}
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+ main select loop and event handling
+ Copyright (C) Andrew Tridgell 2003-2005
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ This is SAMBA's default event loop code
+
+ - we try to use epoll if configure detected support for it
+ otherwise we use select()
+ - if epoll is broken on the system or the kernel doesn't support it
+ at runtime we fallback to select()
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/network.h"
+#include "system/select.h" /* needed for HAVE_EVENTS_EPOLL */
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+struct std_event_context {
+ /* a pointer back to the generic event_context */
+ struct event_context *ev;
+
+ /* list of filedescriptor events */
+ struct fd_event *fd_events;
+
+ /* the maximum file descriptor number in fd_events */
+ int maxfd;
+
+ /* information for exiting from the event loop */
+ int exit_code;
+
+ /* this is changed by the destructors for the fd event
+ type. It is used to detect event destruction by event
+ handlers, which means the code that is calling the event
+ handler needs to assume that the linked list is no longer
+ valid
+ */
+ uint32_t destruction_count;
+
+ /* when using epoll this is the handle from epoll_create */
+ int epoll_fd;
+
+ /* our pid at the time the epoll_fd was created */
+ pid_t pid;
+};
+
+/* use epoll if it is available */
+#if HAVE_EVENTS_EPOLL
+/*
+ called when a epoll call fails, and we should fallback
+ to using select
+*/
+static void epoll_fallback_to_select(struct std_event_context *std_ev, const char *reason)
+{
+ DEBUG(0,("%s (%s) - falling back to select()\n", reason, strerror(errno)));
+ close(std_ev->epoll_fd);
+ std_ev->epoll_fd = -1;
+ talloc_set_destructor(std_ev, NULL);
+}
+
+/*
+ map from EVENT_FD_* to EPOLLIN/EPOLLOUT
+*/
+static uint32_t epoll_map_flags(uint16_t flags)
+{
+ uint32_t ret = 0;
+ if (flags & EVENT_FD_READ) ret |= (EPOLLIN | EPOLLERR | EPOLLHUP);
+ if (flags & EVENT_FD_WRITE) ret |= (EPOLLOUT | EPOLLERR | EPOLLHUP);
+ return ret;
+}
+
+/*
+ free the epoll fd
+*/
+static int epoll_ctx_destructor(struct std_event_context *std_ev)
+{
+ if (std_ev->epoll_fd != -1) {
+ close(std_ev->epoll_fd);
+ }
+ std_ev->epoll_fd = -1;
+ return 0;
+}
+
+/*
+ init the epoll fd
+*/
+static void epoll_init_ctx(struct std_event_context *std_ev)
+{
+ std_ev->epoll_fd = epoll_create(64);
+ std_ev->pid = getpid();
+ talloc_set_destructor(std_ev, epoll_ctx_destructor);
+}
+
+static void epoll_add_event(struct std_event_context *std_ev, struct fd_event *fde);
+
+/*
+ reopen the epoll handle when our pid changes
+ see http://junkcode.samba.org/ftp/unpacked/junkcode/epoll_fork.c for an
+ demonstration of why this is needed
+ */
+static void epoll_check_reopen(struct std_event_context *std_ev)
+{
+ struct fd_event *fde;
+
+ if (std_ev->pid == getpid()) {
+ return;
+ }
+
+ close(std_ev->epoll_fd);
+ std_ev->epoll_fd = epoll_create(64);
+ if (std_ev->epoll_fd == -1) {
+ DEBUG(0,("Failed to recreate epoll handle after fork\n"));
+ return;
+ }
+ std_ev->pid = getpid();
+ for (fde=std_ev->fd_events;fde;fde=fde->next) {
+ epoll_add_event(std_ev, fde);
+ }
+}
+
+#define EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT (1<<0)
+#define EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR (1<<1)
+#define EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR (1<<2)
+
+/*
+ add the epoll event to the given fd_event
+*/
+static void epoll_add_event(struct std_event_context *std_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (std_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if we don't want events yet, don't add an epoll_event */
+ if (fde->flags == 0) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ if (epoll_ctl(std_ev->epoll_fd, EPOLL_CTL_ADD, fde->fd, &event) != 0) {
+ epoll_fallback_to_select(std_ev, "EPOLL_CTL_ADD failed");
+ }
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+/*
+ delete the epoll event for given fd_event
+*/
+static void epoll_del_event(struct std_event_context *std_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (std_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* if there's no epoll_event, we don't need to delete it */
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT)) return;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ epoll_ctl(std_ev->epoll_fd, EPOLL_CTL_DEL, fde->fd, &event);
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT;
+}
+
+/*
+ change the epoll event to the given fd_event
+*/
+static void epoll_mod_event(struct std_event_context *std_ev, struct fd_event *fde)
+{
+ struct epoll_event event;
+ if (std_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ ZERO_STRUCT(event);
+ event.events = epoll_map_flags(fde->flags);
+ event.data.ptr = fde;
+ if (epoll_ctl(std_ev->epoll_fd, EPOLL_CTL_MOD, fde->fd, &event) != 0) {
+ epoll_fallback_to_select(std_ev, "EPOLL_CTL_MOD failed");
+ }
+
+ /* only if we want to read we want to tell the event handler about errors */
+ if (fde->flags & EVENT_FD_READ) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+ }
+}
+
+static void epoll_change_event(struct std_event_context *std_ev, struct fd_event *fde)
+{
+ bool got_error = (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR);
+ bool want_read = (fde->flags & EVENT_FD_READ);
+ bool want_write= (fde->flags & EVENT_FD_WRITE);
+
+ if (std_ev->epoll_fd == -1) return;
+
+ fde->additional_flags &= ~EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR;
+
+ /* there's already an event */
+ if (fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_HAS_EVENT) {
+ if (want_read || (want_write && !got_error)) {
+ epoll_mod_event(std_ev, fde);
+ return;
+ }
+ /*
+ * if we want to match the select behavior, we need to remove the epoll_event
+ * when the caller isn't interested in events.
+ *
+ * this is because epoll reports EPOLLERR and EPOLLHUP, even without asking for them
+ */
+ epoll_del_event(std_ev, fde);
+ return;
+ }
+
+ /* there's no epoll_event attached to the fde */
+ if (want_read || (want_write && !got_error)) {
+ epoll_add_event(std_ev, fde);
+ return;
+ }
+}
+
+/*
+ event loop handling using epoll
+*/
+static int epoll_event_loop(struct std_event_context *std_ev, struct timeval *tvalp)
+{
+ int ret, i;
+#define MAXEVENTS 8
+ struct epoll_event events[MAXEVENTS];
+ uint32_t destruction_count = ++std_ev->destruction_count;
+ int timeout = -1;
+
+ if (std_ev->epoll_fd == -1) return -1;
+
+ if (tvalp) {
+ /* it's better to trigger timed events a bit later than to early */
+ timeout = ((tvalp->tv_usec+999) / 1000) + (tvalp->tv_sec*1000);
+ }
+
+ if (std_ev->ev->num_signal_handlers &&
+ common_event_check_signal(std_ev->ev)) {
+ return 0;
+ }
+
+ ret = epoll_wait(std_ev->epoll_fd, events, MAXEVENTS, timeout);
+
+ if (ret == -1 && errno == EINTR && std_ev->ev->num_signal_handlers) {
+ if (common_event_check_signal(std_ev->ev)) {
+ return 0;
+ }
+ }
+
+ if (ret == -1 && errno != EINTR) {
+ epoll_fallback_to_select(std_ev, "epoll_wait() failed");
+ return -1;
+ }
+
+ if (ret == 0 && tvalp) {
+ /* we don't care about a possible delay here */
+ common_event_loop_timer_delay(std_ev->ev);
+ return 0;
+ }
+
+ for (i=0;i<ret;i++) {
+ struct fd_event *fde = talloc_get_type(events[i].data.ptr,
+ struct fd_event);
+ uint16_t flags = 0;
+
+ if (fde == NULL) {
+ epoll_fallback_to_select(std_ev, "epoll_wait() gave bad data");
+ return -1;
+ }
+ if (events[i].events & (EPOLLHUP|EPOLLERR)) {
+ fde->additional_flags |= EPOLL_ADDITIONAL_FD_FLAG_GOT_ERROR;
+ /*
+ * if we only wait for EVENT_FD_WRITE, we should not tell the
+ * event handler about it, and remove the epoll_event,
+ * as we only report errors when waiting for read events,
+ * to match the select() behavior
+ */
+ if (!(fde->additional_flags & EPOLL_ADDITIONAL_FD_FLAG_REPORT_ERROR)) {
+ epoll_del_event(std_ev, fde);
+ continue;
+ }
+ flags |= EVENT_FD_READ;
+ }
+ if (events[i].events & EPOLLIN) flags |= EVENT_FD_READ;
+ if (events[i].events & EPOLLOUT) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(std_ev->ev, fde, flags, fde->private_data);
+ if (destruction_count != std_ev->destruction_count) {
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+#else
+#define epoll_init_ctx(std_ev)
+#define epoll_add_event(std_ev,fde)
+#define epoll_del_event(std_ev,fde)
+#define epoll_change_event(std_ev,fde)
+#define epoll_event_loop(std_ev,tvalp) (-1)
+#define epoll_check_reopen(std_ev)
+#endif
+
+/*
+ create a std_event_context structure.
+*/
+static int std_event_context_init(struct event_context *ev)
+{
+ struct std_event_context *std_ev;
+
+ std_ev = talloc_zero(ev, struct std_event_context);
+ if (!std_ev) return -1;
+ std_ev->ev = ev;
+ std_ev->epoll_fd = -1;
+
+ epoll_init_ctx(std_ev);
+
+ ev->additional_data = std_ev;
+ return 0;
+}
+
+/*
+ recalculate the maxfd
+*/
+static void calc_maxfd(struct std_event_context *std_ev)
+{
+ struct fd_event *fde;
+
+ std_ev->maxfd = 0;
+ for (fde = std_ev->fd_events; fde; fde = fde->next) {
+ if (fde->fd > std_ev->maxfd) {
+ std_ev->maxfd = fde->fd;
+ }
+ }
+}
+
+
+/* to mark the ev->maxfd invalid
+ * this means we need to recalculate it
+ */
+#define EVENT_INVALID_MAXFD (-1)
+
+/*
+ destroy an fd_event
+*/
+static int std_event_fd_destructor(struct fd_event *fde)
+{
+ struct event_context *ev = fde->event_ctx;
+ struct std_event_context *std_ev = talloc_get_type(ev->additional_data,
+ struct std_event_context);
+
+ epoll_check_reopen(std_ev);
+
+ if (std_ev->maxfd == fde->fd) {
+ std_ev->maxfd = EVENT_INVALID_MAXFD;
+ }
+
+ DLIST_REMOVE(std_ev->fd_events, fde);
+ std_ev->destruction_count++;
+
+ epoll_del_event(std_ev, fde);
+
+ if (fde->flags & EVENT_FD_AUTOCLOSE) {
+ close(fde->fd);
+ fde->fd = -1;
+ }
+
+ return 0;
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct fd_event *std_event_add_fd(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ event_fd_handler_t handler,
+ void *private_data)
+{
+ struct std_event_context *std_ev = talloc_get_type(ev->additional_data,
+ struct std_event_context);
+ struct fd_event *fde;
+
+ epoll_check_reopen(std_ev);
+
+ fde = talloc(mem_ctx?mem_ctx:ev, struct fd_event);
+ if (!fde) return NULL;
+
+ fde->event_ctx = ev;
+ fde->fd = fd;
+ fde->flags = flags;
+ fde->handler = handler;
+ fde->private_data = private_data;
+ fde->additional_flags = 0;
+ fde->additional_data = NULL;
+
+ DLIST_ADD(std_ev->fd_events, fde);
+ if ((std_ev->maxfd != EVENT_INVALID_MAXFD)
+ && (fde->fd > std_ev->maxfd)) {
+ std_ev->maxfd = fde->fd;
+ }
+ talloc_set_destructor(fde, std_event_fd_destructor);
+
+ epoll_add_event(std_ev, fde);
+
+ return fde;
+}
+
+
+/*
+ return the fd event flags
+*/
+static uint16_t std_event_get_fd_flags(struct fd_event *fde)
+{
+ return fde->flags;
+}
+
+/*
+ set the fd event flags
+*/
+static void std_event_set_fd_flags(struct fd_event *fde, uint16_t flags)
+{
+ struct event_context *ev;
+ struct std_event_context *std_ev;
+
+ if (fde->flags == flags) return;
+
+ ev = fde->event_ctx;
+ std_ev = talloc_get_type(ev->additional_data, struct std_event_context);
+
+ fde->flags = flags;
+
+ epoll_check_reopen(std_ev);
+
+ epoll_change_event(std_ev, fde);
+}
+
+/*
+ event loop handling using select()
+*/
+static int std_event_loop_select(struct std_event_context *std_ev, struct timeval *tvalp)
+{
+ fd_set r_fds, w_fds;
+ struct fd_event *fde;
+ int selrtn;
+ uint32_t destruction_count = ++std_ev->destruction_count;
+
+ /* we maybe need to recalculate the maxfd */
+ if (std_ev->maxfd == EVENT_INVALID_MAXFD) {
+ calc_maxfd(std_ev);
+ }
+
+ FD_ZERO(&r_fds);
+ FD_ZERO(&w_fds);
+
+ /* setup any fd events */
+ for (fde = std_ev->fd_events; fde; fde = fde->next) {
+ if (fde->flags & EVENT_FD_READ) {
+ FD_SET(fde->fd, &r_fds);
+ }
+ if (fde->flags & EVENT_FD_WRITE) {
+ FD_SET(fde->fd, &w_fds);
+ }
+ }
+
+ if (std_ev->ev->num_signal_handlers &&
+ common_event_check_signal(std_ev->ev)) {
+ return 0;
+ }
+
+ selrtn = select(std_ev->maxfd+1, &r_fds, &w_fds, NULL, tvalp);
+
+ if (selrtn == -1 && errno == EINTR &&
+ std_ev->ev->num_signal_handlers) {
+ common_event_check_signal(std_ev->ev);
+ return 0;
+ }
+
+ if (selrtn == -1 && errno == EBADF) {
+ /* the socket is dead! this should never
+ happen as the socket should have first been
+ made readable and that should have removed
+ the event, so this must be a bug. This is a
+ fatal error. */
+ DEBUG(0,("ERROR: EBADF on std_event_loop_once\n"));
+ std_ev->exit_code = EBADF;
+ return -1;
+ }
+
+ if (selrtn == 0 && tvalp) {
+ /* we don't care about a possible delay here */
+ common_event_loop_timer_delay(std_ev->ev);
+ return 0;
+ }
+
+ if (selrtn > 0) {
+ /* at least one file descriptor is ready - check
+ which ones and call the handler, being careful to allow
+ the handler to remove itself when called */
+ for (fde = std_ev->fd_events; fde; fde = fde->next) {
+ uint16_t flags = 0;
+
+ if (FD_ISSET(fde->fd, &r_fds)) flags |= EVENT_FD_READ;
+ if (FD_ISSET(fde->fd, &w_fds)) flags |= EVENT_FD_WRITE;
+ if (flags) {
+ fde->handler(std_ev->ev, fde, flags, fde->private_data);
+ if (destruction_count != std_ev->destruction_count) {
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int std_event_loop_once(struct event_context *ev)
+{
+ struct std_event_context *std_ev = talloc_get_type(ev->additional_data,
+ struct std_event_context);
+ struct timeval tval;
+
+ tval = common_event_loop_timer_delay(ev);
+ if (timeval_is_zero(&tval)) {
+ return 0;
+ }
+
+ epoll_check_reopen(std_ev);
+
+ if (epoll_event_loop(std_ev, &tval) == 0) {
+ return 0;
+ }
+
+ return std_event_loop_select(std_ev, &tval);
+}
+
+/*
+ return on failure or (with 0) if all fd events are removed
+*/
+static int std_event_loop_wait(struct event_context *ev)
+{
+ struct std_event_context *std_ev = talloc_get_type(ev->additional_data,
+ struct std_event_context);
+ std_ev->exit_code = 0;
+
+ while (std_ev->fd_events && std_ev->exit_code == 0) {
+ if (std_event_loop_once(ev) != 0) {
+ break;
+ }
+ }
+
+ return std_ev->exit_code;
+}
+
+static const struct event_ops std_event_ops = {
+ .context_init = std_event_context_init,
+ .add_fd = std_event_add_fd,
+ .get_fd_flags = std_event_get_fd_flags,
+ .set_fd_flags = std_event_set_fd_flags,
+ .add_timed = common_event_add_timed,
+ .add_signal = common_event_add_signal,
+ .loop_once = std_event_loop_once,
+ .loop_wait = std_event_loop_wait,
+};
+
+
+bool events_standard_init(void)
+{
+ return event_register_backend("standard", &std_event_ops);
+}
+
+#if _SAMBA_BUILD_
+NTSTATUS s4_events_standard_init(void)
+{
+ if (!events_standard_init()) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+ return NT_STATUS_OK;
+}
+#endif
--- /dev/null
+/*
+ Unix SMB/CIFS implementation.
+
+ common events code for timed events
+
+ Copyright (C) Andrew Tridgell 2003-2006
+ Copyright (C) Stefan Metzmacher 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/select.h"
+#include "lib/util/dlinklist.h"
+#include "lib/events/events.h"
+#include "lib/events/events_internal.h"
+
+struct timed_event {
+ struct timed_event *prev, *next;
+ struct event_context *event_ctx;
+ struct timeval next_event;
+ event_timed_handler_t handler;
+ /* this is private for the specific handler */
+ void *private_data;
+ /* this is private for the events_ops implementation */
+ void *additional_data;
+};
+
+/*
+ destroy a timed event
+*/
+static int common_event_timed_destructor(struct timed_event *te)
+{
+ struct event_context *ev = talloc_get_type(te->event_ctx,
+ struct event_context);
+ DLIST_REMOVE(ev->timed_events, te);
+ return 0;
+}
+
+static int common_event_timed_deny_destructor(struct timed_event *te)
+{
+ return -1;
+}
+
+/*
+ add a timed event
+ return NULL on failure (memory allocation error)
+*/
+struct timed_event *common_event_add_timed(struct event_context *ev, TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ event_timed_handler_t handler,
+ void *private_data)
+{
+ struct timed_event *te, *last_te, *cur_te;
+
+ te = talloc(mem_ctx?mem_ctx:ev, struct timed_event);
+ if (te == NULL) return NULL;
+
+ te->event_ctx = ev;
+ te->next_event = next_event;
+ te->handler = handler;
+ te->private_data = private_data;
+ te->additional_data = NULL;
+
+ /* keep the list ordered */
+ last_te = NULL;
+ for (cur_te = ev->timed_events; cur_te; cur_te = cur_te->next) {
+ /* if the new event comes before the current one break */
+ if (timeval_compare(&te->next_event, &cur_te->next_event) < 0) {
+ break;
+ }
+
+ last_te = cur_te;
+ }
+
+ DLIST_ADD_AFTER(ev->timed_events, te, last_te);
+
+ talloc_set_destructor(te, common_event_timed_destructor);
+
+ return te;
+}
+
+/*
+ do a single event loop using the events defined in ev
+
+ return the delay untill the next timed event,
+ or zero if a timed event was triggered
+*/
+struct timeval common_event_loop_timer_delay(struct event_context *ev)
+{
+ struct timeval current_time = timeval_zero();
+ struct timed_event *te = ev->timed_events;
+
+ if (!te) {
+ /* have a default tick time of 30 seconds. This guarantees
+ that code that uses its own timeout checking will be
+ able to proceeed eventually */
+ return timeval_set(30, 0);
+ }
+
+ /*
+ * work out the right timeout for the next timed event
+ *
+ * avoid the syscall to gettimeofday() if the timed event should
+ * be triggered directly
+ *
+ * if there's a delay till the next timed event, we're done
+ * with just returning the delay
+ */
+ if (!timeval_is_zero(&te->next_event)) {
+ struct timeval delay;
+
+ current_time = timeval_current();
+
+ delay = timeval_until(¤t_time, &te->next_event);
+ if (!timeval_is_zero(&delay)) {
+ return delay;
+ }
+ }
+
+ /*
+ * ok, we have a timed event that we'll process ...
+ */
+
+ /* deny the handler to free the event */
+ talloc_set_destructor(te, common_event_timed_deny_destructor);
+
+ /* We need to remove the timer from the list before calling the
+ * handler because in a semi-async inner event loop called from the
+ * handler we don't want to come across this event again -- vl */
+ DLIST_REMOVE(ev->timed_events, te);
+
+ /*
+ * If the timed event was registered for a zero current_time,
+ * then we pass a zero timeval here too! To avoid the
+ * overhead of gettimeofday() calls.
+ *
+ * otherwise we pass the current time
+ */
+ te->handler(ev, te, current_time, te->private_data);
+
+ /* The destructor isn't necessary anymore, we've already removed the
+ * event from the list. */
+ talloc_set_destructor(te, NULL);
+
+ talloc_free(te);
+
+ return timeval_zero();
+}
+
--- /dev/null
+EVENTS_OBJ="lib/events/events.o lib/events/events_select.o lib/events/events_signal.o lib/events/events_timed.o lib/events/events_standard.o"
+
+AC_CHECK_HEADERS(sys/epoll.h)
+AC_CHECK_FUNCS(epoll_create)
+
+if test x"$ac_cv_header_sys_epoll_h" = x"yes" -a x"$ac_cv_func_epoll_create" = x"yes"; then
+ EVENTS_OBJ="$EVENTS_OBJ lib/events/events_epoll.o"
+ AC_DEFINE(HAVE_EVENTS_EPOLL, 1, [Whether epoll available])
+fi
+
+AC_SUBST(EVENTS_OBJ)