2 Unix SMB/CIFS implementation.
3 Samba internal messaging functions
4 Copyright (C) Andrew Tridgell 2000
5 Copyright (C) 2001 by Martin Pool
6 Copyright (C) 2002 by Jeremy Allison
7 Copyright (C) 2007 by Volker Lendecke
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 @defgroup messages Internal messaging framework
28 @brief Module for internal messaging between Samba daemons.
30 The idea is that if a part of Samba wants to do communication with
31 another Samba process then it will do a message_register() of a
32 dispatch function, and use message_send_pid() to send messages to
35 The dispatch function is given the pid of the sender, and it can
36 use that to reply by message_send_pid(). See ping_message() for a
39 @caution Dispatch functions must be able to cope with incoming
40 messages on an *odd* byte boundary.
42 This system doesn't have any inherent size limitations but is not
43 very efficient for large messages or when messages are sent in very
49 #include "dbwrap/dbwrap.h"
52 #include "lib/util/tevent_unix.h"
53 #include "lib/background.h"
55 struct messaging_callback {
56 struct messaging_callback *prev, *next;
58 void (*fn)(struct messaging_context *msg, void *private_data,
60 struct server_id server_id, DATA_BLOB *data);
64 /****************************************************************************
65 A useful function for testing the message system.
66 ****************************************************************************/
68 static void ping_message(struct messaging_context *msg_ctx,
74 const char *msg = "none";
77 if (data->data != NULL) {
78 free_me = talloc_strndup(talloc_tos(), (char *)data->data,
82 DEBUG(1,("INFO: Received PING message from PID %s [%s]\n",
83 procid_str_static(&src), msg));
85 messaging_send(msg_ctx, src, MSG_PONG, data);
88 /****************************************************************************
89 Register/replace a dispatch function for a particular message type.
90 JRA changed Dec 13 2006. Only one message handler now permitted per type.
91 *NOTE*: Dispatch functions must be able to cope with incoming
92 messages on an *odd* byte boundary.
93 ****************************************************************************/
96 struct messaging_context *msg_ctx;
104 /****************************************************************************
105 Send one of the messages for the broadcast.
106 ****************************************************************************/
108 static int traverse_fn(struct db_record *rec, const struct server_id *id,
109 uint32_t msg_flags, void *state)
111 struct msg_all *msg_all = (struct msg_all *)state;
114 /* Don't send if the receiver hasn't registered an interest. */
116 if((msg_flags & msg_all->msg_flag) == 0) {
120 /* If the msg send fails because the pid was not found (i.e. smbd died),
121 * the msg has already been deleted from the messages.tdb.*/
123 status = messaging_send_buf(msg_all->msg_ctx, *id, msg_all->msg_type,
124 (const uint8_t *)msg_all->buf, msg_all->len);
126 if (NT_STATUS_EQUAL(status, NT_STATUS_INVALID_HANDLE)) {
129 * If the pid was not found delete the entry from
133 DEBUG(2, ("pid %s doesn't exist\n", procid_str_static(id)));
135 dbwrap_record_delete(rec);
142 * Send a message to all smbd processes.
144 * It isn't very efficient, but should be OK for the sorts of
145 * applications that use it. When we need efficient broadcast we can add
148 * @param n_sent Set to the number of messages sent. This should be
149 * equal to the number of processes, but be careful for races.
151 * @retval True for success.
153 bool message_send_all(struct messaging_context *msg_ctx,
155 const void *buf, size_t len,
158 struct msg_all msg_all;
160 msg_all.msg_type = msg_type;
161 if (msg_type < 0x100) {
162 msg_all.msg_flag = FLAG_MSG_GENERAL;
163 } else if (msg_type > 0x100 && msg_type < 0x200) {
164 msg_all.msg_flag = FLAG_MSG_NMBD;
165 } else if (msg_type > 0x200 && msg_type < 0x300) {
166 msg_all.msg_flag = FLAG_MSG_PRINT_GENERAL;
167 } else if (msg_type > 0x300 && msg_type < 0x400) {
168 msg_all.msg_flag = FLAG_MSG_SMBD;
169 } else if (msg_type > 0x400 && msg_type < 0x600) {
170 msg_all.msg_flag = FLAG_MSG_WINBIND;
171 } else if (msg_type > 4000 && msg_type < 5000) {
172 msg_all.msg_flag = FLAG_MSG_DBWRAP;
180 msg_all.msg_ctx = msg_ctx;
182 serverid_traverse(traverse_fn, &msg_all);
184 *n_sent = msg_all.n_sent;
188 struct messaging_context *messaging_init(TALLOC_CTX *mem_ctx,
189 struct tevent_context *ev)
191 struct messaging_context *ctx;
194 if (!(ctx = talloc_zero(mem_ctx, struct messaging_context))) {
198 ctx->id = procid_self();
201 status = messaging_dgm_init(ctx, ctx, &ctx->local);
203 if (!NT_STATUS_IS_OK(status)) {
204 DEBUG(2, ("messaging_dgm_init failed: %s\n",
210 if (lp_clustering()) {
211 status = messaging_ctdbd_init(ctx, ctx, &ctx->remote);
213 if (!NT_STATUS_IS_OK(status)) {
214 DEBUG(2, ("messaging_ctdbd_init failed: %s\n",
220 ctx->id.vnn = get_my_vnn();
222 messaging_register(ctx, NULL, MSG_PING, ping_message);
224 /* Register some debugging related messages */
226 register_msg_pool_usage(ctx);
227 register_dmalloc_msgs(ctx);
228 debug_register_msgs(ctx);
233 struct server_id messaging_server_id(const struct messaging_context *msg_ctx)
239 * re-init after a fork
241 NTSTATUS messaging_reinit(struct messaging_context *msg_ctx)
245 TALLOC_FREE(msg_ctx->local);
247 msg_ctx->id = procid_self();
249 status = messaging_dgm_init(msg_ctx, msg_ctx, &msg_ctx->local);
250 if (!NT_STATUS_IS_OK(status)) {
251 DEBUG(0, ("messaging_dgm_init failed: %s\n",
256 TALLOC_FREE(msg_ctx->remote);
258 if (lp_clustering()) {
259 status = messaging_ctdbd_init(msg_ctx, msg_ctx,
262 if (!NT_STATUS_IS_OK(status)) {
263 DEBUG(1, ("messaging_ctdbd_init failed: %s\n",
274 * Register a dispatch function for a particular message type. Allow multiple
277 NTSTATUS messaging_register(struct messaging_context *msg_ctx,
280 void (*fn)(struct messaging_context *msg,
283 struct server_id server_id,
286 struct messaging_callback *cb;
288 DEBUG(5, ("Registering messaging pointer for type %u - "
290 (unsigned)msg_type, private_data));
293 * Only one callback per type
296 for (cb = msg_ctx->callbacks; cb != NULL; cb = cb->next) {
297 /* we allow a second registration of the same message
298 type if it has a different private pointer. This is
299 needed in, for example, the internal notify code,
300 which creates a new notify context for each tree
301 connect, and expects to receive messages to each of
303 if (cb->msg_type == msg_type && private_data == cb->private_data) {
304 DEBUG(5,("Overriding messaging pointer for type %u - private_data=%p\n",
305 (unsigned)msg_type, private_data));
307 cb->private_data = private_data;
312 if (!(cb = talloc(msg_ctx, struct messaging_callback))) {
313 return NT_STATUS_NO_MEMORY;
316 cb->msg_type = msg_type;
318 cb->private_data = private_data;
320 DLIST_ADD(msg_ctx->callbacks, cb);
325 De-register the function for a particular message type.
327 void messaging_deregister(struct messaging_context *ctx, uint32_t msg_type,
330 struct messaging_callback *cb, *next;
332 for (cb = ctx->callbacks; cb; cb = next) {
334 if ((cb->msg_type == msg_type)
335 && (cb->private_data == private_data)) {
336 DEBUG(5,("Deregistering messaging pointer for type %u - private_data=%p\n",
337 (unsigned)msg_type, private_data));
338 DLIST_REMOVE(ctx->callbacks, cb);
344 struct messaging_selfsend_state {
345 struct messaging_context *msg;
346 struct messaging_rec rec;
349 static void messaging_trigger_self(struct tevent_context *ev,
350 struct tevent_immediate *im,
354 Send a message to a particular server
356 NTSTATUS messaging_send(struct messaging_context *msg_ctx,
357 struct server_id server, uint32_t msg_type,
358 const DATA_BLOB *data)
360 if (server_id_is_disconnected(&server)) {
361 return NT_STATUS_INVALID_PARAMETER_MIX;
364 if (!procid_is_local(&server)) {
365 return msg_ctx->remote->send_fn(msg_ctx, server,
370 if (server_id_equal(&msg_ctx->id, &server)) {
371 struct messaging_selfsend_state *state;
372 struct tevent_immediate *im;
374 state = talloc_pooled_object(
375 msg_ctx, struct messaging_selfsend_state,
378 return NT_STATUS_NO_MEMORY;
380 state->msg = msg_ctx;
381 state->rec.msg_version = MESSAGE_VERSION;
382 state->rec.msg_type = msg_type & MSG_TYPE_MASK;
383 state->rec.dest = server;
384 state->rec.src = msg_ctx->id;
386 /* Can't fail, it's a pooled_object */
387 state->rec.buf = data_blob_talloc(
388 state, data->data, data->length);
390 im = tevent_create_immediate(state);
393 return NT_STATUS_NO_MEMORY;
396 tevent_schedule_immediate(im, msg_ctx->event_ctx,
397 messaging_trigger_self, state);
401 return msg_ctx->local->send_fn(msg_ctx, server, msg_type, data,
405 static void messaging_trigger_self(struct tevent_context *ev,
406 struct tevent_immediate *im,
409 struct messaging_selfsend_state *state = talloc_get_type_abort(
410 private_data, struct messaging_selfsend_state);
411 messaging_dispatch_rec(state->msg, &state->rec);
415 NTSTATUS messaging_send_buf(struct messaging_context *msg_ctx,
416 struct server_id server, uint32_t msg_type,
417 const uint8_t *buf, size_t len)
419 DATA_BLOB blob = data_blob_const(buf, len);
420 return messaging_send(msg_ctx, server, msg_type, &blob);
423 NTSTATUS messaging_send_iov(struct messaging_context *msg_ctx,
424 struct server_id server, uint32_t msg_type,
425 const struct iovec *iov, int iovlen)
430 buf = iov_buf(talloc_tos(), iov, iovlen);
432 return NT_STATUS_NO_MEMORY;
435 status = messaging_send_buf(msg_ctx, server, msg_type,
436 buf, talloc_get_size(buf));
442 static struct messaging_rec *messaging_rec_dup(TALLOC_CTX *mem_ctx,
443 struct messaging_rec *rec)
445 struct messaging_rec *result;
447 result = talloc_pooled_object(mem_ctx, struct messaging_rec,
449 if (result == NULL) {
454 /* Doesn't fail, see talloc_pooled_object */
456 result->buf.data = talloc_memdup(result, rec->buf.data,
461 struct messaging_filtered_read_state {
462 struct tevent_context *ev;
463 struct messaging_context *msg_ctx;
466 bool (*filter)(struct messaging_rec *rec, void *private_data);
469 struct messaging_rec *rec;
472 static void messaging_filtered_read_cleanup(struct tevent_req *req,
473 enum tevent_req_state req_state);
475 struct tevent_req *messaging_filtered_read_send(
476 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
477 struct messaging_context *msg_ctx,
478 bool (*filter)(struct messaging_rec *rec, void *private_data),
481 struct tevent_req *req;
482 struct messaging_filtered_read_state *state;
483 size_t new_waiters_len;
485 req = tevent_req_create(mem_ctx, &state,
486 struct messaging_filtered_read_state);
491 state->msg_ctx = msg_ctx;
492 state->filter = filter;
493 state->private_data = private_data;
496 * We have to defer the callback here, as we might be called from
497 * within a different tevent_context than state->ev
499 tevent_req_defer_callback(req, state->ev);
501 state->tevent_handle = messaging_dgm_register_tevent_context(
503 if (tevent_req_nomem(state, req)) {
504 return tevent_req_post(req, ev);
508 * We add ourselves to the "new_waiters" array, not the "waiters"
509 * array. If we are called from within messaging_read_done,
510 * messaging_dispatch_rec will be in an active for-loop on
511 * "waiters". We must be careful not to mess with this array, because
512 * it could mean that a single event is being delivered twice.
515 new_waiters_len = talloc_array_length(msg_ctx->new_waiters);
517 if (new_waiters_len == msg_ctx->num_new_waiters) {
518 struct tevent_req **tmp;
520 tmp = talloc_realloc(msg_ctx, msg_ctx->new_waiters,
521 struct tevent_req *, new_waiters_len+1);
522 if (tevent_req_nomem(tmp, req)) {
523 return tevent_req_post(req, ev);
525 msg_ctx->new_waiters = tmp;
528 msg_ctx->new_waiters[msg_ctx->num_new_waiters] = req;
529 msg_ctx->num_new_waiters += 1;
530 tevent_req_set_cleanup_fn(req, messaging_filtered_read_cleanup);
535 static void messaging_filtered_read_cleanup(struct tevent_req *req,
536 enum tevent_req_state req_state)
538 struct messaging_filtered_read_state *state = tevent_req_data(
539 req, struct messaging_filtered_read_state);
540 struct messaging_context *msg_ctx = state->msg_ctx;
543 tevent_req_set_cleanup_fn(req, NULL);
545 TALLOC_FREE(state->tevent_handle);
548 * Just set the [new_]waiters entry to NULL, be careful not to mess
549 * with the other "waiters" array contents. We are often called from
550 * within "messaging_dispatch_rec", which loops over
551 * "waiters". Messing with the "waiters" array will mess up that
555 for (i=0; i<msg_ctx->num_waiters; i++) {
556 if (msg_ctx->waiters[i] == req) {
557 msg_ctx->waiters[i] = NULL;
562 for (i=0; i<msg_ctx->num_new_waiters; i++) {
563 if (msg_ctx->new_waiters[i] == req) {
564 msg_ctx->new_waiters[i] = NULL;
570 static void messaging_filtered_read_done(struct tevent_req *req,
571 struct messaging_rec *rec)
573 struct messaging_filtered_read_state *state = tevent_req_data(
574 req, struct messaging_filtered_read_state);
576 state->rec = messaging_rec_dup(state, rec);
577 if (tevent_req_nomem(state->rec, req)) {
580 tevent_req_done(req);
583 int messaging_filtered_read_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
584 struct messaging_rec **presult)
586 struct messaging_filtered_read_state *state = tevent_req_data(
587 req, struct messaging_filtered_read_state);
590 if (tevent_req_is_unix_error(req, &err)) {
591 tevent_req_received(req);
594 *presult = talloc_move(mem_ctx, &state->rec);
598 struct messaging_read_state {
600 struct messaging_rec *rec;
603 static bool messaging_read_filter(struct messaging_rec *rec,
605 static void messaging_read_done(struct tevent_req *subreq);
607 struct tevent_req *messaging_read_send(TALLOC_CTX *mem_ctx,
608 struct tevent_context *ev,
609 struct messaging_context *msg,
612 struct tevent_req *req, *subreq;
613 struct messaging_read_state *state;
615 req = tevent_req_create(mem_ctx, &state,
616 struct messaging_read_state);
620 state->msg_type = msg_type;
622 subreq = messaging_filtered_read_send(state, ev, msg,
623 messaging_read_filter, state);
624 if (tevent_req_nomem(subreq, req)) {
625 return tevent_req_post(req, ev);
627 tevent_req_set_callback(subreq, messaging_read_done, req);
631 static bool messaging_read_filter(struct messaging_rec *rec,
634 struct messaging_read_state *state = talloc_get_type_abort(
635 private_data, struct messaging_read_state);
637 return rec->msg_type == state->msg_type;
640 static void messaging_read_done(struct tevent_req *subreq)
642 struct tevent_req *req = tevent_req_callback_data(
643 subreq, struct tevent_req);
644 struct messaging_read_state *state = tevent_req_data(
645 req, struct messaging_read_state);
648 ret = messaging_filtered_read_recv(subreq, state, &state->rec);
650 if (tevent_req_error(req, ret)) {
653 tevent_req_done(req);
656 int messaging_read_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
657 struct messaging_rec **presult)
659 struct messaging_read_state *state = tevent_req_data(
660 req, struct messaging_read_state);
663 if (tevent_req_is_unix_error(req, &err)) {
666 if (presult != NULL) {
667 *presult = talloc_move(mem_ctx, &state->rec);
672 static bool messaging_append_new_waiters(struct messaging_context *msg_ctx)
674 if (msg_ctx->num_new_waiters == 0) {
678 if (talloc_array_length(msg_ctx->waiters) <
679 (msg_ctx->num_waiters + msg_ctx->num_new_waiters)) {
680 struct tevent_req **tmp;
681 tmp = talloc_realloc(
682 msg_ctx, msg_ctx->waiters, struct tevent_req *,
683 msg_ctx->num_waiters + msg_ctx->num_new_waiters);
685 DEBUG(1, ("%s: talloc failed\n", __func__));
688 msg_ctx->waiters = tmp;
691 memcpy(&msg_ctx->waiters[msg_ctx->num_waiters], msg_ctx->new_waiters,
692 sizeof(struct tevent_req *) * msg_ctx->num_new_waiters);
694 msg_ctx->num_waiters += msg_ctx->num_new_waiters;
695 msg_ctx->num_new_waiters = 0;
701 Dispatch one messaging_rec
703 void messaging_dispatch_rec(struct messaging_context *msg_ctx,
704 struct messaging_rec *rec)
706 struct messaging_callback *cb, *next;
709 for (cb = msg_ctx->callbacks; cb != NULL; cb = next) {
711 if (cb->msg_type == rec->msg_type) {
712 cb->fn(msg_ctx, cb->private_data, rec->msg_type,
713 rec->src, &rec->buf);
714 /* we continue looking for matching messages
715 after finding one. This matters for
716 subsystems like the internal notify code
717 which register more than one handler for
718 the same message type */
722 if (!messaging_append_new_waiters(msg_ctx)) {
727 while (i < msg_ctx->num_waiters) {
728 struct tevent_req *req;
729 struct messaging_filtered_read_state *state;
731 req = msg_ctx->waiters[i];
734 * This got cleaned up. In the meantime,
735 * move everything down one. We need
736 * to keep the order of waiters, as
737 * other code may depend on this.
739 if (i < msg_ctx->num_waiters - 1) {
740 memmove(&msg_ctx->waiters[i],
741 &msg_ctx->waiters[i+1],
742 sizeof(struct tevent_req *) *
743 (msg_ctx->num_waiters - i - 1));
745 msg_ctx->num_waiters -= 1;
749 state = tevent_req_data(
750 req, struct messaging_filtered_read_state);
751 if (state->filter(rec, state->private_data)) {
752 messaging_filtered_read_done(req, rec);
760 static int mess_parent_dgm_cleanup(void *private_data);
761 static void mess_parent_dgm_cleanup_done(struct tevent_req *req);
763 bool messaging_parent_dgm_cleanup_init(struct messaging_context *msg)
765 struct tevent_req *req;
767 req = background_job_send(
768 msg, msg->event_ctx, msg, NULL, 0,
769 lp_parm_int(-1, "messaging", "messaging dgm cleanup interval",
771 mess_parent_dgm_cleanup, msg);
775 tevent_req_set_callback(req, mess_parent_dgm_cleanup_done, msg);
779 static int mess_parent_dgm_cleanup(void *private_data)
781 struct messaging_context *msg_ctx = talloc_get_type_abort(
782 private_data, struct messaging_context);
785 status = messaging_dgm_wipe(msg_ctx);
786 DEBUG(10, ("messaging_dgm_wipe returned %s\n", nt_errstr(status)));
787 return lp_parm_int(-1, "messaging", "messaging dgm cleanup interval",
791 static void mess_parent_dgm_cleanup_done(struct tevent_req *req)
793 struct messaging_context *msg = tevent_req_callback_data(
794 req, struct messaging_context);
797 status = background_job_recv(req);
799 DEBUG(1, ("messaging dgm cleanup job ended with %s\n",
802 req = background_job_send(
803 msg, msg->event_ctx, msg, NULL, 0,
804 lp_parm_int(-1, "messaging", "messaging dgm cleanup interval",
806 mess_parent_dgm_cleanup, msg);
808 DEBUG(1, ("background_job_send failed\n"));
810 tevent_req_set_callback(req, mess_parent_dgm_cleanup_done, msg);