2 Unix SMB/CIFS implementation.
4 testing of the events subsystem
6 Copyright (C) Stefan Metzmacher 2006-2009
7 Copyright (C) Jeremy Allison 2013
9 ** NOTE! The following LGPL license applies to the tevent
10 ** library. This does NOT imply that all of Samba is released
13 This library is free software; you can redistribute it and/or
14 modify it under the terms of the GNU Lesser General Public
15 License as published by the Free Software Foundation; either
16 version 3 of the License, or (at your option) any later version.
18 This library is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 Lesser General Public License for more details.
23 You should have received a copy of the GNU Lesser General Public
24 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "lib/tevent/tevent.h"
29 #include "system/filesys.h"
30 #include "system/select.h"
31 #include "system/network.h"
32 #include "torture/torture.h"
33 #include "torture/local/proto.h"
41 static void do_read(int fd, void *buf, size_t count)
46 ret = read(fd, buf, count);
47 } while (ret == -1 && errno == EINTR);
50 static void fde_handler_read(struct tevent_context *ev_ctx, struct tevent_fd *f,
51 uint16_t flags, void *private_data)
53 int *fd = (int *)private_data;
56 kill(getpid(), SIGUSR1);
58 kill(getpid(), SIGALRM);
60 do_read(fd[0], &c, 1);
64 static void do_write(int fd, void *buf, size_t count)
69 ret = write(fd, buf, count);
70 } while (ret == -1 && errno == EINTR);
73 static void fde_handler_write(struct tevent_context *ev_ctx, struct tevent_fd *f,
74 uint16_t flags, void *private_data)
76 int *fd = (int *)private_data;
79 do_write(fd[1], &c, 1);
83 /* This will only fire if the fd's returned from pipe() are bi-directional. */
84 static void fde_handler_read_1(struct tevent_context *ev_ctx, struct tevent_fd *f,
85 uint16_t flags, void *private_data)
87 int *fd = (int *)private_data;
90 kill(getpid(), SIGUSR1);
92 kill(getpid(), SIGALRM);
94 do_read(fd[1], &c, 1);
98 /* This will only fire if the fd's returned from pipe() are bi-directional. */
99 static void fde_handler_write_1(struct tevent_context *ev_ctx, struct tevent_fd *f,
100 uint16_t flags, void *private_data)
102 int *fd = (int *)private_data;
104 do_write(fd[0], &c, 1);
107 static void finished_handler(struct tevent_context *ev_ctx, struct tevent_timer *te,
108 struct timeval tval, void *private_data)
110 int *finished = (int *)private_data;
114 static void count_handler(struct tevent_context *ev_ctx, struct tevent_signal *te,
115 int signum, int count, void *info, void *private_data)
117 int *countp = (int *)private_data;
121 static bool test_event_context(struct torture_context *test,
122 const void *test_data)
124 struct tevent_context *ev_ctx;
125 int fd[2] = { -1, -1 };
126 const char *backend = (const char *)test_data;
127 int alarm_count=0, info_count=0;
128 struct tevent_fd *fde_read;
129 struct tevent_fd *fde_read_1;
130 struct tevent_fd *fde_write;
131 struct tevent_fd *fde_write_1;
133 struct tevent_signal *se1 = NULL;
136 struct tevent_signal *se2 = NULL;
139 struct tevent_signal *se3 = NULL;
145 ev_ctx = tevent_context_init_byname(test, backend);
146 if (ev_ctx == NULL) {
147 torture_comment(test, "event backend '%s' not supported\n", backend);
151 torture_comment(test, "backend '%s' - %s\n",
152 backend, __FUNCTION__);
159 torture_assert_int_equal(test, ret, 0, "pipe failed");
161 fde_read = tevent_add_fd(ev_ctx, ev_ctx, fd[0], TEVENT_FD_READ,
162 fde_handler_read, fd);
163 fde_write_1 = tevent_add_fd(ev_ctx, ev_ctx, fd[0], TEVENT_FD_WRITE,
164 fde_handler_write_1, fd);
166 fde_write = tevent_add_fd(ev_ctx, ev_ctx, fd[1], TEVENT_FD_WRITE,
167 fde_handler_write, fd);
168 fde_read_1 = tevent_add_fd(ev_ctx, ev_ctx, fd[1], TEVENT_FD_READ,
169 fde_handler_read_1, fd);
171 tevent_fd_set_auto_close(fde_read);
172 tevent_fd_set_auto_close(fde_write);
174 tevent_add_timer(ev_ctx, ev_ctx, timeval_current_ofs(2,0),
175 finished_handler, &finished);
178 se1 = tevent_add_signal(ev_ctx, ev_ctx, SIGALRM, SA_RESTART, count_handler, &alarm_count);
179 torture_assert(test, se1 != NULL, "failed to setup se1");
182 se2 = tevent_add_signal(ev_ctx, ev_ctx, SIGALRM, SA_RESETHAND, count_handler, &alarm_count);
183 torture_assert(test, se2 != NULL, "failed to setup se2");
186 se3 = tevent_add_signal(ev_ctx, ev_ctx, SIGUSR1, SA_SIGINFO, count_handler, &info_count);
187 torture_assert(test, se3 != NULL, "failed to setup se3");
190 t = timeval_current();
193 if (tevent_loop_once(ev_ctx) == -1) {
195 torture_fail(test, talloc_asprintf(test, "Failed event loop %s\n", strerror(errno)));
199 talloc_free(fde_read_1);
200 talloc_free(fde_write_1);
201 talloc_free(fde_read);
202 talloc_free(fde_write);
204 while (alarm_count < fde_count+1) {
205 if (tevent_loop_once(ev_ctx) == -1) {
210 torture_comment(test, "Got %.2f pipe events/sec\n", fde_count/timeval_elapsed(&t));
216 torture_assert_int_equal(test, alarm_count, 1+fde_count, "alarm count mismatch");
220 * we do not call talloc_free(se2)
221 * because it is already gone,
222 * after triggering the event handler.
228 torture_assert_int_equal(test, info_count, fde_count, "info count mismatch");
236 struct test_event_fd1_state {
237 struct torture_context *tctx;
239 struct tevent_context *ev;
241 struct tevent_timer *te;
242 struct tevent_fd *fde0;
243 struct tevent_fd *fde1;
253 static void test_event_fd1_fde_handler(struct tevent_context *ev_ctx,
254 struct tevent_fd *fde,
258 struct test_event_fd1_state *state =
259 (struct test_event_fd1_state *)private_data;
261 if (state->drain_done) {
262 state->finished = true;
263 state->error = __location__;
271 if (!(flags & TEVENT_FD_READ)) {
272 state->finished = true;
273 state->error = __location__;
277 ret = read(state->sock[0], &c, 1);
285 tevent_fd_set_flags(fde, 0);
286 state->drain_done = true;
290 if (!state->got_write) {
293 if (flags != TEVENT_FD_WRITE) {
294 state->finished = true;
295 state->error = __location__;
298 state->got_write = true;
301 * we write to the other socket...
303 do_write(state->sock[1], &c, 1);
304 TEVENT_FD_NOT_WRITEABLE(fde);
305 TEVENT_FD_READABLE(fde);
309 if (!state->got_read) {
310 if (flags != TEVENT_FD_READ) {
311 state->finished = true;
312 state->error = __location__;
315 state->got_read = true;
317 TEVENT_FD_NOT_READABLE(fde);
321 state->finished = true;
322 state->error = __location__;
326 static void test_event_fd1_finished(struct tevent_context *ev_ctx,
327 struct tevent_timer *te,
331 struct test_event_fd1_state *state =
332 (struct test_event_fd1_state *)private_data;
334 if (state->drain_done) {
335 state->finished = true;
339 if (!state->got_write) {
340 state->finished = true;
341 state->error = __location__;
345 if (!state->got_read) {
346 state->finished = true;
347 state->error = __location__;
352 if (state->loop_count > 3) {
353 state->finished = true;
354 state->error = __location__;
358 state->got_write = false;
359 state->got_read = false;
361 tevent_fd_set_flags(state->fde0, TEVENT_FD_WRITE);
363 if (state->loop_count > 2) {
365 TALLOC_FREE(state->fde1);
366 TEVENT_FD_READABLE(state->fde0);
369 state->te = tevent_add_timer(state->ev, state->ev,
370 timeval_current_ofs(0,2000),
371 test_event_fd1_finished, state);
374 static bool test_event_fd1(struct torture_context *tctx,
375 const void *test_data)
377 struct test_event_fd1_state state;
381 state.backend = (const char *)test_data;
383 state.ev = tevent_context_init_byname(tctx, state.backend);
384 if (state.ev == NULL) {
385 torture_skip(tctx, talloc_asprintf(tctx,
386 "event backend '%s' not supported\n",
391 tevent_set_debug_stderr(state.ev);
392 torture_comment(tctx, "backend '%s' - %s\n",
393 state.backend, __FUNCTION__);
396 * This tests the following:
398 * It monitors the state of state.sock[0]
399 * with tevent_fd, but we never read/write on state.sock[0]
400 * while state.sock[1] * is only used to write a few bytes.
403 * - we wait only for TEVENT_FD_WRITE on state.sock[0]
404 * - we write 1 byte to state.sock[1]
405 * - we wait only for TEVENT_FD_READ on state.sock[0]
406 * - we disable events on state.sock[0]
407 * - the timer event restarts the loop
408 * Then we close state.sock[1]
410 * - we wait for TEVENT_FD_READ/WRITE on state.sock[0]
411 * - we try to read 1 byte
412 * - if the read gets an error of returns 0
413 * we disable the event handler
414 * - the timer finishes the test
418 socketpair(AF_UNIX, SOCK_STREAM, 0, state.sock);
420 state.te = tevent_add_timer(state.ev, state.ev,
421 timeval_current_ofs(0,1000),
422 test_event_fd1_finished, &state);
423 state.fde0 = tevent_add_fd(state.ev, state.ev,
424 state.sock[0], TEVENT_FD_WRITE,
425 test_event_fd1_fde_handler, &state);
426 /* state.fde1 is only used to auto close */
427 state.fde1 = tevent_add_fd(state.ev, state.ev,
429 test_event_fd1_fde_handler, &state);
431 tevent_fd_set_auto_close(state.fde0);
432 tevent_fd_set_auto_close(state.fde1);
434 while (!state.finished) {
436 if (tevent_loop_once(state.ev) == -1) {
437 talloc_free(state.ev);
438 torture_fail(tctx, talloc_asprintf(tctx,
439 "Failed event loop %s\n",
444 talloc_free(state.ev);
446 torture_assert(tctx, state.error == NULL, talloc_asprintf(tctx,
452 struct test_event_fd2_state {
453 struct torture_context *tctx;
455 struct tevent_context *ev;
456 struct tevent_timer *te;
457 struct test_event_fd2_sock {
458 struct test_event_fd2_state *state;
460 struct tevent_fd *fde;
469 static void test_event_fd2_sock_handler(struct tevent_context *ev_ctx,
470 struct tevent_fd *fde,
474 struct test_event_fd2_sock *cur_sock =
475 (struct test_event_fd2_sock *)private_data;
476 struct test_event_fd2_state *state = cur_sock->state;
477 struct test_event_fd2_sock *oth_sock = NULL;
481 if (cur_sock == &state->sock0) {
482 oth_sock = &state->sock1;
484 oth_sock = &state->sock0;
487 if (oth_sock->num_written == 1) {
488 if (flags != (TEVENT_FD_READ | TEVENT_FD_WRITE)) {
489 state->finished = true;
490 state->error = __location__;
495 if (cur_sock->num_read == oth_sock->num_written) {
496 state->finished = true;
497 state->error = __location__;
501 if (!(flags & TEVENT_FD_READ)) {
502 state->finished = true;
503 state->error = __location__;
507 if (oth_sock->num_read >= PIPE_BUF) {
509 * On Linux we become writable once we've read
510 * one byte. On Solaris we only become writable
511 * again once we've read 4096 bytes. PIPE_BUF
512 * is probably a safe bet to test against.
514 * There should be room to write a byte again
516 if (!(flags & TEVENT_FD_WRITE)) {
517 state->finished = true;
518 state->error = __location__;
523 if ((flags & TEVENT_FD_WRITE) && !cur_sock->got_full) {
524 v = (uint8_t)cur_sock->num_written;
525 ret = write(cur_sock->fd, &v, 1);
527 state->finished = true;
528 state->error = __location__;
531 cur_sock->num_written++;
532 if (cur_sock->num_written > 0x80000000) {
533 state->finished = true;
534 state->error = __location__;
540 if (!cur_sock->got_full) {
541 cur_sock->got_full = true;
543 if (!oth_sock->got_full) {
546 * lets wait for oth_sock
549 tevent_fd_set_flags(cur_sock->fde, 0);
554 * oth_sock waited for cur_sock,
557 tevent_fd_set_flags(oth_sock->fde,
558 TEVENT_FD_READ|TEVENT_FD_WRITE);
561 ret = read(cur_sock->fd, &v, 1);
563 state->finished = true;
564 state->error = __location__;
567 c = (uint8_t)cur_sock->num_read;
569 state->finished = true;
570 state->error = __location__;
573 cur_sock->num_read++;
575 if (cur_sock->num_read < oth_sock->num_written) {
576 /* there is more to read */
580 * we read everything, we need to remove TEVENT_FD_WRITE
583 TEVENT_FD_NOT_WRITEABLE(cur_sock->fde);
585 if (oth_sock->num_read == cur_sock->num_written) {
587 * both directions are finished
589 state->finished = true;
595 static void test_event_fd2_finished(struct tevent_context *ev_ctx,
596 struct tevent_timer *te,
600 struct test_event_fd2_state *state =
601 (struct test_event_fd2_state *)private_data;
604 * this should never be triggered
606 state->finished = true;
607 state->error = __location__;
610 static bool test_event_fd2(struct torture_context *tctx,
611 const void *test_data)
613 struct test_event_fd2_state state;
619 state.backend = (const char *)test_data;
621 state.ev = tevent_context_init_byname(tctx, state.backend);
622 if (state.ev == NULL) {
623 torture_skip(tctx, talloc_asprintf(tctx,
624 "event backend '%s' not supported\n",
629 tevent_set_debug_stderr(state.ev);
630 torture_comment(tctx, "backend '%s' - %s\n",
631 state.backend, __FUNCTION__);
634 * This tests the following
636 * - We write 1 byte to each socket
637 * - We wait for TEVENT_FD_READ/WRITE on both sockets
638 * - When we get TEVENT_FD_WRITE we write 1 byte
639 * until both socket buffers are full, which
640 * means both sockets only get TEVENT_FD_READ.
641 * - Then we read 1 byte until we have consumed
642 * all bytes the other end has written.
646 socketpair(AF_UNIX, SOCK_STREAM, 0, sock);
649 * the timer should never expire
651 state.te = tevent_add_timer(state.ev, state.ev,
652 timeval_current_ofs(600, 0),
653 test_event_fd2_finished, &state);
654 state.sock0.state = &state;
655 state.sock0.fd = sock[0];
656 state.sock0.fde = tevent_add_fd(state.ev, state.ev,
658 TEVENT_FD_READ | TEVENT_FD_WRITE,
659 test_event_fd2_sock_handler,
661 state.sock1.state = &state;
662 state.sock1.fd = sock[1];
663 state.sock1.fde = tevent_add_fd(state.ev, state.ev,
665 TEVENT_FD_READ | TEVENT_FD_WRITE,
666 test_event_fd2_sock_handler,
669 tevent_fd_set_auto_close(state.sock0.fde);
670 tevent_fd_set_auto_close(state.sock1.fde);
672 do_write(state.sock0.fd, &c, 1);
673 state.sock0.num_written++;
674 do_write(state.sock1.fd, &c, 1);
675 state.sock1.num_written++;
677 while (!state.finished) {
679 if (tevent_loop_once(state.ev) == -1) {
680 talloc_free(state.ev);
681 torture_fail(tctx, talloc_asprintf(tctx,
682 "Failed event loop %s\n",
687 talloc_free(state.ev);
689 torture_assert(tctx, state.error == NULL, talloc_asprintf(tctx,
697 static pthread_mutex_t threaded_mutex = PTHREAD_MUTEX_INITIALIZER;
698 static bool do_shutdown = false;
700 static void test_event_threaded_lock(void)
703 ret = pthread_mutex_lock(&threaded_mutex);
707 static void test_event_threaded_unlock(void)
710 ret = pthread_mutex_unlock(&threaded_mutex);
714 static void test_event_threaded_trace(enum tevent_trace_point point,
718 case TEVENT_TRACE_BEFORE_WAIT:
719 test_event_threaded_unlock();
721 case TEVENT_TRACE_AFTER_WAIT:
722 test_event_threaded_lock();
724 case TEVENT_TRACE_BEFORE_LOOP_ONCE:
725 case TEVENT_TRACE_AFTER_LOOP_ONCE:
730 static void test_event_threaded_timer(struct tevent_context *ev,
731 struct tevent_timer *te,
732 struct timeval current_time,
738 static void *test_event_poll_thread(void *private_data)
740 struct tevent_context *ev = (struct tevent_context *)private_data;
742 test_event_threaded_lock();
746 ret = tevent_loop_once(ev);
749 test_event_threaded_unlock();
756 static void test_event_threaded_read_handler(struct tevent_context *ev,
757 struct tevent_fd *fde,
761 int *pfd = (int *)private_data;
765 if ((flags & TEVENT_FD_READ) == 0) {
770 nread = read(*pfd, &c, 1);
771 } while ((nread == -1) && (errno == EINTR));
776 static bool test_event_context_threaded(struct torture_context *test,
777 const void *test_data)
779 struct tevent_context *ev;
780 struct tevent_timer *te;
781 struct tevent_fd *fde;
782 pthread_t poll_thread;
787 ev = tevent_context_init_byname(test, "poll_mt");
788 torture_assert(test, ev != NULL, "poll_mt not supported");
790 tevent_set_trace_callback(ev, test_event_threaded_trace, NULL);
792 te = tevent_add_timer(ev, ev, timeval_current_ofs(5, 0),
793 test_event_threaded_timer, NULL);
794 torture_assert(test, te != NULL, "Could not add timer");
796 ret = pthread_create(&poll_thread, NULL, test_event_poll_thread, ev);
797 torture_assert(test, ret == 0, "Could not create poll thread");
800 torture_assert(test, ret == 0, "Could not create pipe");
804 test_event_threaded_lock();
806 fde = tevent_add_fd(ev, ev, fds[0], TEVENT_FD_READ,
807 test_event_threaded_read_handler, &fds[0]);
808 torture_assert(test, fde != NULL, "Could not add fd event");
810 test_event_threaded_unlock();
814 do_write(fds[1], &c, 1);
818 test_event_threaded_lock();
820 test_event_threaded_unlock();
822 do_write(fds[1], &c, 1);
824 ret = pthread_join(poll_thread, NULL);
825 torture_assert(test, ret == 0, "pthread_join failed");
830 #define NUM_TEVENT_THREADS 100
832 /* Ugly, but needed for torture_comment... */
833 static struct torture_context *thread_test_ctx;
834 static pthread_t thread_map[NUM_TEVENT_THREADS];
835 static unsigned thread_counter;
837 /* Called in master thread context */
838 static void callback_nowait(struct tevent_context *ev,
839 struct tevent_immediate *im,
842 pthread_t *thread_id_ptr =
843 talloc_get_type_abort(private_ptr, pthread_t);
846 for (i = 0; i < NUM_TEVENT_THREADS; i++) {
847 if (pthread_equal(*thread_id_ptr,
852 torture_comment(thread_test_ctx,
853 "Callback %u from thread %u\n",
859 /* Blast the master tevent_context with a callback, no waiting. */
860 static void *thread_fn_nowait(void *private_ptr)
862 struct tevent_thread_proxy *master_tp =
863 talloc_get_type_abort(private_ptr, struct tevent_thread_proxy);
864 struct tevent_immediate *im;
865 pthread_t *thread_id_ptr;
867 im = tevent_create_immediate(NULL);
871 thread_id_ptr = talloc(NULL, pthread_t);
872 if (thread_id_ptr == NULL) {
875 *thread_id_ptr = pthread_self();
877 tevent_thread_proxy_schedule(master_tp,
884 static void timeout_fn(struct tevent_context *ev,
885 struct tevent_timer *te,
886 struct timeval tv, void *p)
888 thread_counter = NUM_TEVENT_THREADS * 10;
891 static bool test_multi_tevent_threaded(struct torture_context *test,
892 const void *test_data)
895 struct tevent_context *master_ev;
896 struct tevent_thread_proxy *tp;
898 talloc_disable_null_tracking();
900 /* Ugly global stuff. */
901 thread_test_ctx = test;
904 master_ev = tevent_context_init(NULL);
905 if (master_ev == NULL) {
908 tevent_set_debug_stderr(master_ev);
910 tp = tevent_thread_proxy_create(master_ev);
913 talloc_asprintf(test,
914 "tevent_thread_proxy_create failed\n"));
915 talloc_free(master_ev);
919 for (i = 0; i < NUM_TEVENT_THREADS; i++) {
920 int ret = pthread_create(&thread_map[i],
926 talloc_asprintf(test,
927 "Failed to create thread %i, %d\n",
933 /* Ensure we don't wait more than 10 seconds. */
934 tevent_add_timer(master_ev,
936 timeval_current_ofs(10,0),
940 while (thread_counter < NUM_TEVENT_THREADS) {
941 int ret = tevent_loop_once(master_ev);
942 torture_assert(test, ret == 0, "tevent_loop_once failed");
945 torture_assert(test, thread_counter == NUM_TEVENT_THREADS,
946 "thread_counter fail\n");
948 talloc_free(master_ev);
953 struct tevent_thread_proxy *reply_tp;
958 static void thread_timeout_fn(struct tevent_context *ev,
959 struct tevent_timer *te,
960 struct timeval tv, void *p)
962 int *p_finished = (int *)p;
967 /* Called in child-thread context */
968 static void thread_callback(struct tevent_context *ev,
969 struct tevent_immediate *im,
972 struct reply_state *rsp =
973 talloc_get_type_abort(private_ptr, struct reply_state);
975 talloc_steal(ev, rsp);
976 *rsp->p_finished = 1;
979 /* Called in master thread context */
980 static void master_callback(struct tevent_context *ev,
981 struct tevent_immediate *im,
984 struct reply_state *rsp =
985 talloc_get_type_abort(private_ptr, struct reply_state);
988 talloc_steal(ev, rsp);
990 for (i = 0; i < NUM_TEVENT_THREADS; i++) {
991 if (pthread_equal(rsp->thread_id,
996 torture_comment(thread_test_ctx,
997 "Callback %u from thread %u\n",
1000 /* Now reply to the thread ! */
1001 tevent_thread_proxy_schedule(rsp->reply_tp,
1009 static void *thread_fn_1(void *private_ptr)
1011 struct tevent_thread_proxy *master_tp =
1012 talloc_get_type_abort(private_ptr, struct tevent_thread_proxy);
1013 struct tevent_thread_proxy *tp;
1014 struct tevent_immediate *im;
1015 struct tevent_context *ev;
1016 struct reply_state *rsp;
1020 ev = tevent_context_init(NULL);
1025 tp = tevent_thread_proxy_create(ev);
1031 im = tevent_create_immediate(ev);
1037 rsp = talloc(ev, struct reply_state);
1043 rsp->thread_id = pthread_self();
1045 rsp->p_finished = &finished;
1047 /* Introduce a little randomness into the mix.. */
1048 usleep(random() % 7000);
1050 tevent_thread_proxy_schedule(master_tp,
1055 /* Ensure we don't wait more than 10 seconds. */
1056 tevent_add_timer(ev,
1058 timeval_current_ofs(10,0),
1062 while (finished == 0) {
1063 ret = tevent_loop_once(ev);
1073 * NB. We should talloc_free(ev) here, but if we do
1074 * we currently get hit by helgrind Fix #323432
1075 * "When calling pthread_cond_destroy or pthread_mutex_destroy
1076 * with initializers as argument Helgrind (incorrectly) reports errors."
1078 * http://valgrind.10908.n7.nabble.com/Helgrind-3-9-0-false-positive-
1079 * with-pthread-mutex-destroy-td47757.html
1081 * Helgrind doesn't understand that the request/reply
1082 * messages provide synchronization between the lock/unlock
1083 * in tevent_thread_proxy_schedule(), and the pthread_destroy()
1084 * when the struct tevent_thread_proxy object is talloc_free'd.
1086 * As a work-around for now return ev for the parent thread to free.
1091 static bool test_multi_tevent_threaded_1(struct torture_context *test,
1092 const void *test_data)
1095 struct tevent_context *master_ev;
1096 struct tevent_thread_proxy *master_tp;
1099 talloc_disable_null_tracking();
1101 /* Ugly global stuff. */
1102 thread_test_ctx = test;
1105 master_ev = tevent_context_init(NULL);
1106 if (master_ev == NULL) {
1109 tevent_set_debug_stderr(master_ev);
1111 master_tp = tevent_thread_proxy_create(master_ev);
1112 if (master_tp == NULL) {
1114 talloc_asprintf(test,
1115 "tevent_thread_proxy_create failed\n"));
1116 talloc_free(master_ev);
1120 for (i = 0; i < NUM_TEVENT_THREADS; i++) {
1121 ret = pthread_create(&thread_map[i],
1127 talloc_asprintf(test,
1128 "Failed to create thread %i, %d\n",
1134 while (thread_counter < NUM_TEVENT_THREADS) {
1135 ret = tevent_loop_once(master_ev);
1136 torture_assert(test, ret == 0, "tevent_loop_once failed");
1139 /* Wait for all the threads to finish - join 'em. */
1140 for (i = 0; i < NUM_TEVENT_THREADS; i++) {
1142 ret = pthread_join(thread_map[i], &retval);
1143 torture_assert(test, ret == 0, "pthread_join failed");
1144 /* Free the child thread event context. */
1145 talloc_free(retval);
1148 talloc_free(master_ev);
1152 struct threaded_test_2 {
1153 struct tevent_threaded_context *tctx;
1154 struct tevent_immediate *im;
1155 pthread_t thread_id;
1158 static void master_callback_2(struct tevent_context *ev,
1159 struct tevent_immediate *im,
1160 void *private_data);
1162 static void *thread_fn_2(void *private_data)
1164 struct threaded_test_2 *state = private_data;
1166 state->thread_id = pthread_self();
1168 usleep(random() % 7000);
1170 tevent_threaded_schedule_immediate(
1171 state->tctx, state->im, master_callback_2, state);
1176 static void master_callback_2(struct tevent_context *ev,
1177 struct tevent_immediate *im,
1180 struct threaded_test_2 *state = private_data;
1183 for (i = 0; i < NUM_TEVENT_THREADS; i++) {
1184 if (pthread_equal(state->thread_id, thread_map[i])) {
1188 torture_comment(thread_test_ctx,
1189 "Callback_2 %u from thread %u\n",
1195 static bool test_multi_tevent_threaded_2(struct torture_context *test,
1196 const void *test_data)
1200 struct tevent_context *ev;
1201 struct tevent_threaded_context *tctx;
1204 thread_test_ctx = test;
1207 ev = tevent_context_init(test);
1208 torture_assert(test, ev != NULL, "tevent_context_init failed");
1210 tctx = tevent_threaded_context_create(ev, ev);
1211 torture_assert(test, tctx != NULL,
1212 "tevent_threaded_context_create failed");
1214 for (i=0; i<NUM_TEVENT_THREADS; i++) {
1215 struct threaded_test_2 *state;
1217 state = talloc(ev, struct threaded_test_2);
1218 torture_assert(test, state != NULL, "talloc failed");
1221 state->im = tevent_create_immediate(state);
1222 torture_assert(test, state->im != NULL,
1223 "tevent_create_immediate failed");
1225 ret = pthread_create(&thread_map[i], NULL, thread_fn_2, state);
1226 torture_assert(test, ret == 0, "pthread_create failed");
1229 while (thread_counter < NUM_TEVENT_THREADS) {
1230 ret = tevent_loop_once(ev);
1231 torture_assert(test, ret == 0, "tevent_loop_once failed");
1234 /* Wait for all the threads to finish - join 'em. */
1235 for (i = 0; i < NUM_TEVENT_THREADS; i++) {
1237 ret = pthread_join(thread_map[i], &retval);
1238 torture_assert(test, ret == 0, "pthread_join failed");
1239 /* Free the child thread event context. */
1248 struct torture_suite *torture_local_event(TALLOC_CTX *mem_ctx)
1250 struct torture_suite *suite = torture_suite_create(mem_ctx, "event");
1251 const char **list = tevent_backend_list(suite);
1254 for (i=0;list && list[i];i++) {
1255 struct torture_suite *backend_suite;
1257 backend_suite = torture_suite_create(mem_ctx, list[i]);
1259 torture_suite_add_simple_tcase_const(backend_suite,
1262 (const void *)list[i]);
1263 torture_suite_add_simple_tcase_const(backend_suite,
1266 (const void *)list[i]);
1267 torture_suite_add_simple_tcase_const(backend_suite,
1270 (const void *)list[i]);
1272 torture_suite_add_suite(suite, backend_suite);
1276 torture_suite_add_simple_tcase_const(suite, "threaded_poll_mt",
1277 test_event_context_threaded,
1280 torture_suite_add_simple_tcase_const(suite, "multi_tevent_threaded",
1281 test_multi_tevent_threaded,
1284 torture_suite_add_simple_tcase_const(suite, "multi_tevent_threaded_1",
1285 test_multi_tevent_threaded_1,
1288 torture_suite_add_simple_tcase_const(suite, "multi_tevent_threaded_2",
1289 test_multi_tevent_threaded_2,