2 * Unix SMB/CIFS implementation.
4 * Copyright (C) Volker Lendecke 2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "lib/util/server_id.h"
23 #include "lib/util/data_blob.h"
24 #include "librpc/gen_ndr/notify.h"
25 #include "librpc/gen_ndr/messaging.h"
26 #include "librpc/gen_ndr/server_id.h"
27 #include "lib/dbwrap/dbwrap.h"
28 #include "lib/dbwrap/dbwrap_rbt.h"
33 #include "lib/util/server_id_db.h"
34 #include "lib/util/tevent_unix.h"
35 #include "lib/util/tevent_ntstatus.h"
36 #include "ctdbd_conn.h"
37 #include "ctdb_srvids.h"
38 #include "server_id_db_util.h"
39 #include "lib/util/iov_buf.h"
40 #include "messages_util.h"
42 #ifdef CLUSTER_SUPPORT
43 #include "ctdb_protocol.h"
49 * All of notifyd's state
52 struct notifyd_state {
53 struct tevent_context *ev;
54 struct messaging_context *msg_ctx;
55 struct ctdbd_connection *ctdbd_conn;
58 * Database of everything clients show interest in. Indexed by
59 * absolute path. The database keys are not 0-terminated
60 * because the criticial operation, notifyd_trigger, can walk
61 * the structure from the top without adding intermediate 0s.
62 * The database records contain an array of
64 * struct notifyd_instance
66 * to be maintained and parsed by notifyd_entry_parse()
68 struct db_context *entries;
71 * In the cluster case, this is the place where we store a log
72 * of all MSG_SMB_NOTIFY_REC_CHANGE messages. We just 1:1
73 * forward them to our peer notifyd's in the cluster once a
74 * second or when the log grows too large.
77 struct messaging_reclog *log;
80 * Array of companion notifyd's in a cluster. Every notifyd
81 * broadcasts its messaging_reclog to every other notifyd in
82 * the cluster. This is done by making ctdb send a message to
83 * srvid CTDB_SRVID_SAMBA_NOTIFY_PROXY with destination node
84 * number CTDB_BROADCAST_VNNMAP. Everybody in the cluster who
85 * had called register_with_ctdbd this srvid will receive the
88 * Database replication happens via these broadcasts. Also,
89 * they serve as liveness indication. If a notifyd receives a
90 * broadcast from an unknown peer, it will create one for this
91 * srvid. Also when we don't hear anything from a peer for a
92 * while, we will discard it.
95 struct notifyd_peer **peers;
98 sys_notify_watch_fn sys_notify_watch;
99 struct sys_notify_context *sys_notify_ctx;
103 * notifyd's representation of a notify instance
105 struct notifyd_instance {
106 struct server_id client;
107 struct notify_instance instance;
109 void *sys_watch; /* inotify/fam/etc handle */
112 * Filters after sys_watch took responsibility of some bits
114 uint32_t internal_filter;
115 uint32_t internal_subdir_filter;
118 struct notifyd_peer {
119 struct notifyd_state *state;
120 struct server_id pid;
122 struct db_context *db;
123 time_t last_broadcast;
126 static void notifyd_rec_change(struct messaging_context *msg_ctx,
127 void *private_data, uint32_t msg_type,
128 struct server_id src, DATA_BLOB *data);
129 static bool notifyd_trigger(struct messaging_context *msg_ctx,
130 struct messaging_rec **prec,
132 static bool notifyd_get_db(struct messaging_context *msg_ctx,
133 struct messaging_rec **prec,
136 #ifdef CLUSTER_SUPPORT
137 static bool notifyd_got_db(struct messaging_context *msg_ctx,
138 struct messaging_rec **prec,
140 static void notifyd_broadcast_reclog(struct ctdbd_connection *ctdbd_conn,
141 struct server_id src,
142 struct messaging_reclog *log);
144 static void notifyd_sys_callback(struct sys_notify_context *ctx,
145 void *private_data, struct notify_event *ev,
148 #ifdef CLUSTER_SUPPORT
149 static struct tevent_req *notifyd_broadcast_reclog_send(
150 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
151 struct ctdbd_connection *ctdbd_conn, struct server_id src,
152 struct messaging_reclog *log);
153 static int notifyd_broadcast_reclog_recv(struct tevent_req *req);
155 static struct tevent_req *notifyd_clean_peers_send(
156 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
157 struct notifyd_state *notifyd);
158 static int notifyd_clean_peers_recv(struct tevent_req *req);
161 static int sys_notify_watch_dummy(
163 struct sys_notify_context *ctx,
166 uint32_t *subdir_filter,
167 void (*callback)(struct sys_notify_context *ctx,
169 struct notify_event *ev,
174 void **handle = handle_p;
179 static void notifyd_handler_done(struct tevent_req *subreq);
181 #ifdef CLUSTER_SUPPORT
182 static void notifyd_broadcast_reclog_finished(struct tevent_req *subreq);
183 static void notifyd_clean_peers_finished(struct tevent_req *subreq);
184 static int notifyd_snoop_broadcast(uint32_t src_vnn, uint32_t dst_vnn,
186 const uint8_t *msg, size_t msglen,
190 struct tevent_req *notifyd_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
191 struct messaging_context *msg_ctx,
192 struct ctdbd_connection *ctdbd_conn,
193 sys_notify_watch_fn sys_notify_watch,
194 struct sys_notify_context *sys_notify_ctx)
196 struct tevent_req *req, *subreq;
197 struct notifyd_state *state;
198 struct server_id_db *names_db;
202 req = tevent_req_create(mem_ctx, &state, struct notifyd_state);
207 state->msg_ctx = msg_ctx;
208 state->ctdbd_conn = ctdbd_conn;
210 if (sys_notify_watch == NULL) {
211 sys_notify_watch = sys_notify_watch_dummy;
214 state->sys_notify_watch = sys_notify_watch;
215 state->sys_notify_ctx = sys_notify_ctx;
217 state->entries = db_open_rbt(state);
218 if (tevent_req_nomem(state->entries, req)) {
219 return tevent_req_post(req, ev);
222 status = messaging_register(msg_ctx, state, MSG_SMB_NOTIFY_REC_CHANGE,
224 if (tevent_req_nterror(req, status)) {
225 return tevent_req_post(req, ev);
228 subreq = messaging_handler_send(state, ev, msg_ctx,
229 MSG_SMB_NOTIFY_TRIGGER,
230 notifyd_trigger, state);
231 if (tevent_req_nomem(subreq, req)) {
232 goto deregister_rec_change;
234 tevent_req_set_callback(subreq, notifyd_handler_done, req);
236 subreq = messaging_handler_send(state, ev, msg_ctx,
237 MSG_SMB_NOTIFY_GET_DB,
238 notifyd_get_db, state);
239 if (tevent_req_nomem(subreq, req)) {
240 goto deregister_rec_change;
242 tevent_req_set_callback(subreq, notifyd_handler_done, req);
244 names_db = messaging_names_db(msg_ctx);
246 ret = server_id_db_set_exclusive(names_db, "notify-daemon");
248 DEBUG(10, ("%s: server_id_db_add failed: %s\n",
249 __func__, strerror(ret)));
250 tevent_req_error(req, ret);
251 goto deregister_rec_change;
254 if (ctdbd_conn == NULL) {
256 * No cluster around, skip the database replication
262 #ifdef CLUSTER_SUPPORT
263 subreq = messaging_handler_send(state, ev, msg_ctx,
265 notifyd_got_db, state);
266 if (tevent_req_nomem(subreq, req)) {
267 goto deregister_rec_change;
269 tevent_req_set_callback(subreq, notifyd_handler_done, req);
271 state->log = talloc_zero(state, struct messaging_reclog);
272 if (tevent_req_nomem(state->log, req)) {
273 goto deregister_rec_change;
276 subreq = notifyd_broadcast_reclog_send(
277 state->log, ev, ctdbd_conn,
278 messaging_server_id(msg_ctx),
280 if (tevent_req_nomem(subreq, req)) {
281 goto deregister_rec_change;
283 tevent_req_set_callback(subreq,
284 notifyd_broadcast_reclog_finished,
287 subreq = notifyd_clean_peers_send(state, ev, state);
288 if (tevent_req_nomem(subreq, req)) {
289 goto deregister_rec_change;
291 tevent_req_set_callback(subreq, notifyd_clean_peers_finished,
294 ret = register_with_ctdbd(ctdbd_conn,
295 CTDB_SRVID_SAMBA_NOTIFY_PROXY,
296 notifyd_snoop_broadcast, state);
298 tevent_req_error(req, ret);
299 goto deregister_rec_change;
305 deregister_rec_change:
306 messaging_deregister(msg_ctx, MSG_SMB_NOTIFY_REC_CHANGE, state);
307 return tevent_req_post(req, ev);
310 static void notifyd_handler_done(struct tevent_req *subreq)
312 struct tevent_req *req = tevent_req_callback_data(
313 subreq, struct tevent_req);
316 ret = messaging_handler_recv(subreq);
318 tevent_req_error(req, ret);
321 #ifdef CLUSTER_SUPPORT
323 static void notifyd_broadcast_reclog_finished(struct tevent_req *subreq)
325 struct tevent_req *req = tevent_req_callback_data(
326 subreq, struct tevent_req);
329 ret = notifyd_broadcast_reclog_recv(subreq);
331 tevent_req_error(req, ret);
334 static void notifyd_clean_peers_finished(struct tevent_req *subreq)
336 struct tevent_req *req = tevent_req_callback_data(
337 subreq, struct tevent_req);
340 ret = notifyd_clean_peers_recv(subreq);
342 tevent_req_error(req, ret);
347 int notifyd_recv(struct tevent_req *req)
349 return tevent_req_simple_recv_unix(req);
353 * Parse an entry in the notifyd_context->entries database
356 static bool notifyd_parse_entry(uint8_t *buf, size_t buflen,
357 struct notifyd_instance **instances,
358 size_t *num_instances)
360 if ((buflen % sizeof(struct notifyd_instance)) != 0) {
361 DEBUG(1, ("%s: invalid buffer size: %u\n",
362 __func__, (unsigned)buflen));
366 if (instances != NULL) {
367 *instances = (struct notifyd_instance *)buf;
369 if (num_instances != NULL) {
370 *num_instances = buflen / sizeof(struct notifyd_instance);
375 static bool notifyd_apply_rec_change(
376 const struct server_id *client,
377 const char *path, size_t pathlen,
378 const struct notify_instance *chg,
379 struct db_context *entries,
380 sys_notify_watch_fn sys_notify_watch,
381 struct sys_notify_context *sys_notify_ctx,
382 struct messaging_context *msg_ctx)
384 struct db_record *rec;
385 struct notifyd_instance *instances;
386 size_t num_instances;
388 struct notifyd_instance *instance;
394 DEBUG(1, ("%s: pathlen==0\n", __func__));
397 if (path[pathlen-1] != '\0') {
398 DEBUG(1, ("%s: path not 0-terminated\n", __func__));
402 DEBUG(10, ("%s: path=%s, filter=%u, subdir_filter=%u, "
403 "private_data=%p\n", __func__, path,
404 (unsigned)chg->filter, (unsigned)chg->subdir_filter,
407 rec = dbwrap_fetch_locked(
409 make_tdb_data((const uint8_t *)path, pathlen-1));
412 DEBUG(1, ("%s: dbwrap_fetch_locked failed\n", __func__));
417 value = dbwrap_record_get_value(rec);
419 if (value.dsize != 0) {
420 if (!notifyd_parse_entry(value.dptr, value.dsize, NULL,
427 * Overallocate by one instance to avoid a realloc when adding
429 instances = talloc_array(rec, struct notifyd_instance,
431 if (instances == NULL) {
432 DEBUG(1, ("%s: talloc failed\n", __func__));
436 if (value.dsize != 0) {
437 memcpy(instances, value.dptr, value.dsize);
440 for (i=0; i<num_instances; i++) {
441 instance = &instances[i];
443 if (server_id_equal(&instance->client, client) &&
444 (instance->instance.private_data == chg->private_data)) {
449 if (i < num_instances) {
450 instance->instance = *chg;
453 * We've overallocated for one instance
455 instance = &instances[num_instances];
457 *instance = (struct notifyd_instance) {
460 .internal_filter = chg->filter,
461 .internal_subdir_filter = chg->subdir_filter
467 if ((instance->instance.filter != 0) ||
468 (instance->instance.subdir_filter != 0)) {
471 TALLOC_FREE(instance->sys_watch);
473 ret = sys_notify_watch(entries, sys_notify_ctx, path,
474 &instance->internal_filter,
475 &instance->internal_subdir_filter,
476 notifyd_sys_callback, msg_ctx,
477 &instance->sys_watch);
479 DEBUG(1, ("%s: inotify_watch returned %s\n",
480 __func__, strerror(errno)));
484 if ((instance->instance.filter == 0) &&
485 (instance->instance.subdir_filter == 0)) {
486 /* This is a delete request */
487 TALLOC_FREE(instance->sys_watch);
488 *instance = instances[num_instances-1];
492 DEBUG(10, ("%s: %s has %u instances\n", __func__,
493 path, (unsigned)num_instances));
495 if (num_instances == 0) {
496 status = dbwrap_record_delete(rec);
497 if (!NT_STATUS_IS_OK(status)) {
498 DEBUG(1, ("%s: dbwrap_record_delete returned %s\n",
499 __func__, nt_errstr(status)));
503 value = make_tdb_data(
504 (uint8_t *)instances,
505 sizeof(struct notifyd_instance) * num_instances);
507 status = dbwrap_record_store(rec, value, 0);
508 if (!NT_STATUS_IS_OK(status)) {
509 DEBUG(1, ("%s: dbwrap_record_store returned %s\n",
510 __func__, nt_errstr(status)));
521 static void notifyd_sys_callback(struct sys_notify_context *ctx,
522 void *private_data, struct notify_event *ev,
525 struct messaging_context *msg_ctx = talloc_get_type_abort(
526 private_data, struct messaging_context);
527 struct notify_trigger_msg msg;
531 msg = (struct notify_trigger_msg) {
532 .when = timespec_current(),
533 .action = ev->action,
537 iov[0].iov_base = &msg;
538 iov[0].iov_len = offsetof(struct notify_trigger_msg, path);
539 iov[1].iov_base = discard_const_p(char, ev->dir);
540 iov[1].iov_len = strlen(ev->dir);
541 iov[2].iov_base = &slash;
543 iov[3].iov_base = discard_const_p(char, ev->path);
544 iov[3].iov_len = strlen(ev->path)+1;
547 msg_ctx, messaging_server_id(msg_ctx),
548 MSG_SMB_NOTIFY_TRIGGER, iov, ARRAY_SIZE(iov), NULL, 0);
551 static bool notifyd_parse_rec_change(uint8_t *buf, size_t bufsize,
552 struct notify_rec_change_msg **pmsg,
555 struct notify_rec_change_msg *msg;
557 if (bufsize < offsetof(struct notify_rec_change_msg, path) + 1) {
558 DEBUG(1, ("%s: message too short, ignoring: %u\n", __func__,
563 *pmsg = msg = (struct notify_rec_change_msg *)buf;
564 *pathlen = bufsize - offsetof(struct notify_rec_change_msg, path);
566 DEBUG(10, ("%s: Got rec_change_msg filter=%u, subdir_filter=%u, "
567 "private_data=%p, path=%.*s\n",
568 __func__, (unsigned)msg->instance.filter,
569 (unsigned)msg->instance.subdir_filter,
570 msg->instance.private_data, (int)(*pathlen), msg->path));
575 static void notifyd_rec_change(struct messaging_context *msg_ctx,
576 void *private_data, uint32_t msg_type,
577 struct server_id src, DATA_BLOB *data)
579 struct notifyd_state *state = talloc_get_type_abort(
580 private_data, struct notifyd_state);
581 struct server_id_buf idbuf;
582 struct notify_rec_change_msg *msg;
586 DBG_DEBUG("Got %zu bytes from %s\n", data->length,
587 server_id_str_buf(src, &idbuf));
589 ok = notifyd_parse_rec_change(data->data, data->length,
595 ok = notifyd_apply_rec_change(
596 &src, msg->path, pathlen, &msg->instance,
597 state->entries, state->sys_notify_watch, state->sys_notify_ctx,
600 DEBUG(1, ("%s: notifyd_apply_rec_change failed, ignoring\n",
605 if ((state->log == NULL) || (state->ctdbd_conn == NULL)) {
609 #ifdef CLUSTER_SUPPORT
612 struct messaging_rec **tmp;
613 struct messaging_reclog *log;
614 struct iovec iov = { .iov_base = data->data, .iov_len = data->length };
618 tmp = talloc_realloc(log, log->recs, struct messaging_rec *,
621 DEBUG(1, ("%s: talloc_realloc failed, ignoring\n", __func__));
626 log->recs[log->num_recs] = messaging_rec_create(
627 log->recs, src, messaging_server_id(msg_ctx),
628 msg_type, &iov, 1, NULL, 0);
630 if (log->recs[log->num_recs] == NULL) {
631 DBG_WARNING("messaging_rec_create failed, ignoring\n");
637 if (log->num_recs >= 100) {
639 * Don't let the log grow too large
641 notifyd_broadcast_reclog(state->ctdbd_conn,
642 messaging_server_id(msg_ctx), log);
649 struct notifyd_trigger_state {
650 struct messaging_context *msg_ctx;
651 struct notify_trigger_msg *msg;
653 bool covered_by_sys_notify;
656 static void notifyd_trigger_parser(TDB_DATA key, TDB_DATA data,
659 static bool notifyd_trigger(struct messaging_context *msg_ctx,
660 struct messaging_rec **prec,
663 struct notifyd_state *state = talloc_get_type_abort(
664 private_data, struct notifyd_state);
665 struct server_id my_id = messaging_server_id(msg_ctx);
666 struct messaging_rec *rec = *prec;
667 struct notifyd_trigger_state tstate;
669 const char *p, *next_p;
671 if (rec->buf.length < offsetof(struct notify_trigger_msg, path) + 1) {
672 DEBUG(1, ("message too short, ignoring: %u\n",
673 (unsigned)rec->buf.length));
676 if (rec->buf.data[rec->buf.length-1] != 0) {
677 DEBUG(1, ("%s: path not 0-terminated, ignoring\n", __func__));
681 tstate.msg_ctx = msg_ctx;
683 tstate.covered_by_sys_notify = (rec->src.vnn == my_id.vnn);
684 tstate.covered_by_sys_notify &= !server_id_equal(&rec->src, &my_id);
686 tstate.msg = (struct notify_trigger_msg *)rec->buf.data;
687 path = tstate.msg->path;
689 DEBUG(10, ("%s: Got trigger_msg action=%u, filter=%u, path=%s\n",
690 __func__, (unsigned)tstate.msg->action,
691 (unsigned)tstate.msg->filter, path));
693 if (path[0] != '/') {
694 DEBUG(1, ("%s: path %s does not start with /, ignoring\n",
699 for (p = strchr(path+1, '/'); p != NULL; p = next_p) {
700 ptrdiff_t path_len = p - path;
704 next_p = strchr(p+1, '/');
705 tstate.recursive = (next_p != NULL);
707 DEBUG(10, ("%s: Trying path %.*s\n", __func__,
708 (int)path_len, path));
710 key = (TDB_DATA) { .dptr = discard_const_p(uint8_t, path),
713 dbwrap_parse_record(state->entries, key,
714 notifyd_trigger_parser, &tstate);
716 if (state->peers == NULL) {
720 if (rec->src.vnn != my_id.vnn) {
724 for (i=0; i<state->num_peers; i++) {
725 if (state->peers[i]->db == NULL) {
727 * Inactive peer, did not get a db yet
731 dbwrap_parse_record(state->peers[i]->db, key,
732 notifyd_trigger_parser, &tstate);
739 static void notifyd_send_delete(struct messaging_context *msg_ctx,
741 struct notifyd_instance *instance);
743 static void notifyd_trigger_parser(TDB_DATA key, TDB_DATA data,
747 struct notifyd_trigger_state *tstate = private_data;
748 struct notify_event_msg msg = { .action = tstate->msg->action,
749 .when = tstate->msg->when };
751 size_t path_len = key.dsize;
752 struct notifyd_instance *instances = NULL;
753 size_t num_instances = 0;
756 if (!notifyd_parse_entry(data.dptr, data.dsize, &instances,
758 DEBUG(1, ("%s: Could not parse notifyd_entry\n", __func__));
762 DEBUG(10, ("%s: Found %u instances for %.*s\n", __func__,
763 (unsigned)num_instances, (int)key.dsize,
766 iov[0].iov_base = &msg;
767 iov[0].iov_len = offsetof(struct notify_event_msg, path);
768 iov[1].iov_base = tstate->msg->path + path_len + 1;
769 iov[1].iov_len = strlen((char *)(iov[1].iov_base)) + 1;
771 for (i=0; i<num_instances; i++) {
772 struct notifyd_instance *instance = &instances[i];
773 struct server_id_buf idbuf;
777 if (tstate->covered_by_sys_notify) {
778 if (tstate->recursive) {
779 i_filter = instance->internal_subdir_filter;
781 i_filter = instance->internal_filter;
784 if (tstate->recursive) {
785 i_filter = instance->instance.subdir_filter;
787 i_filter = instance->instance.filter;
791 if ((i_filter & tstate->msg->filter) == 0) {
795 msg.private_data = instance->instance.private_data;
797 status = messaging_send_iov(
798 tstate->msg_ctx, instance->client,
799 MSG_PVFS_NOTIFY, iov, ARRAY_SIZE(iov), NULL, 0);
801 DEBUG(10, ("%s: messaging_send_iov to %s returned %s\n",
803 server_id_str_buf(instance->client, &idbuf),
806 if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND) &&
807 procid_is_local(&instance->client)) {
809 * That process has died
811 notifyd_send_delete(tstate->msg_ctx, key, instance);
815 if (!NT_STATUS_IS_OK(status)) {
816 DEBUG(1, ("%s: messaging_send_iov returned %s\n",
817 __func__, nt_errstr(status)));
823 * Send a delete request to ourselves to properly discard a notify
824 * record for an smbd that has died.
827 static void notifyd_send_delete(struct messaging_context *msg_ctx,
829 struct notifyd_instance *instance)
831 struct notify_rec_change_msg msg = {
832 .instance.private_data = instance->instance.private_data
839 * Send a rec_change to ourselves to delete a dead entry
842 iov[0] = (struct iovec) {
844 .iov_len = offsetof(struct notify_rec_change_msg, path) };
845 iov[1] = (struct iovec) { .iov_base = key.dptr, .iov_len = key.dsize };
846 iov[2] = (struct iovec) { .iov_base = &nul, .iov_len = sizeof(nul) };
848 ret = messaging_send_iov_from(
849 msg_ctx, instance->client, messaging_server_id(msg_ctx),
850 MSG_SMB_NOTIFY_REC_CHANGE, iov, ARRAY_SIZE(iov), NULL, 0);
853 DEBUG(10, ("%s: messaging_send_iov_from returned %s\n",
854 __func__, strerror(ret)));
858 static bool notifyd_get_db(struct messaging_context *msg_ctx,
859 struct messaging_rec **prec,
862 struct notifyd_state *state = talloc_get_type_abort(
863 private_data, struct notifyd_state);
864 struct messaging_rec *rec = *prec;
865 struct server_id_buf id1, id2;
867 uint64_t rec_index = UINT64_MAX;
868 uint8_t index_buf[sizeof(uint64_t)];
873 dbsize = dbwrap_marshall(state->entries, NULL, 0);
875 buf = talloc_array(rec, uint8_t, dbsize);
877 DEBUG(1, ("%s: talloc_array(%ju) failed\n",
878 __func__, (uintmax_t)dbsize));
882 dbsize = dbwrap_marshall(state->entries, buf, dbsize);
884 if (dbsize != talloc_get_size(buf)) {
885 DEBUG(1, ("%s: dbsize changed: %ju->%ju\n", __func__,
886 (uintmax_t)talloc_get_size(buf),
892 if (state->log != NULL) {
893 rec_index = state->log->rec_index;
895 SBVAL(index_buf, 0, rec_index);
897 iov[0] = (struct iovec) { .iov_base = index_buf,
898 .iov_len = sizeof(index_buf) };
899 iov[1] = (struct iovec) { .iov_base = buf,
902 DEBUG(10, ("%s: Sending %ju bytes to %s->%s\n", __func__,
903 (uintmax_t)iov_buflen(iov, ARRAY_SIZE(iov)),
904 server_id_str_buf(messaging_server_id(msg_ctx), &id1),
905 server_id_str_buf(rec->src, &id2)));
907 status = messaging_send_iov(msg_ctx, rec->src, MSG_SMB_NOTIFY_DB,
908 iov, ARRAY_SIZE(iov), NULL, 0);
910 if (!NT_STATUS_IS_OK(status)) {
911 DEBUG(1, ("%s: messaging_send_iov failed: %s\n",
912 __func__, nt_errstr(status)));
918 #ifdef CLUSTER_SUPPORT
920 static int notifyd_add_proxy_syswatches(struct db_record *rec,
923 static bool notifyd_got_db(struct messaging_context *msg_ctx,
924 struct messaging_rec **prec,
927 struct notifyd_state *state = talloc_get_type_abort(
928 private_data, struct notifyd_state);
929 struct messaging_rec *rec = *prec;
930 struct notifyd_peer *p = NULL;
931 struct server_id_buf idbuf;
936 for (i=0; i<state->num_peers; i++) {
937 if (server_id_equal(&rec->src, &state->peers[i]->pid)) {
944 DEBUG(10, ("%s: Did not find peer for db from %s\n",
945 __func__, server_id_str_buf(rec->src, &idbuf)));
949 if (rec->buf.length < 8) {
950 DEBUG(10, ("%s: Got short db length %u from %s\n", __func__,
951 (unsigned)rec->buf.length,
952 server_id_str_buf(rec->src, &idbuf)));
957 p->rec_index = BVAL(rec->buf.data, 0);
959 p->db = db_open_rbt(p);
961 DEBUG(10, ("%s: db_open_rbt failed\n", __func__));
966 status = dbwrap_unmarshall(p->db, rec->buf.data + 8,
967 rec->buf.length - 8);
968 if (!NT_STATUS_IS_OK(status)) {
969 DEBUG(10, ("%s: dbwrap_unmarshall returned %s for db %s\n",
970 __func__, nt_errstr(status),
971 server_id_str_buf(rec->src, &idbuf)));
976 dbwrap_traverse_read(p->db, notifyd_add_proxy_syswatches, state,
979 DEBUG(10, ("%s: Database from %s contained %d records\n", __func__,
980 server_id_str_buf(rec->src, &idbuf), count));
985 static void notifyd_broadcast_reclog(struct ctdbd_connection *ctdbd_conn,
986 struct server_id src,
987 struct messaging_reclog *log)
989 enum ndr_err_code ndr_err;
990 uint8_t msghdr[MESSAGE_HDR_LENGTH];
999 DEBUG(10, ("%s: rec_index=%ju, num_recs=%u\n", __func__,
1000 (uintmax_t)log->rec_index, (unsigned)log->num_recs));
1002 message_hdr_put(msghdr, MSG_SMB_NOTIFY_REC_CHANGES, src,
1003 (struct server_id) {0 });
1004 iov[0] = (struct iovec) { .iov_base = msghdr,
1005 .iov_len = sizeof(msghdr) };
1007 ndr_err = ndr_push_struct_blob(
1009 (ndr_push_flags_fn_t)ndr_push_messaging_reclog);
1010 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1011 DEBUG(1, ("%s: ndr_push_messaging_recs failed: %s\n",
1012 __func__, ndr_errstr(ndr_err)));
1015 iov[1] = (struct iovec) { .iov_base = blob.data,
1016 .iov_len = blob.length };
1018 ret = ctdbd_messaging_send_iov(
1019 ctdbd_conn, CTDB_BROADCAST_VNNMAP,
1020 CTDB_SRVID_SAMBA_NOTIFY_PROXY, iov, ARRAY_SIZE(iov));
1021 TALLOC_FREE(blob.data);
1023 DEBUG(1, ("%s: ctdbd_messaging_send failed: %s\n",
1024 __func__, strerror(ret)));
1028 log->rec_index += 1;
1032 TALLOC_FREE(log->recs);
1035 struct notifyd_broadcast_reclog_state {
1036 struct tevent_context *ev;
1037 struct ctdbd_connection *ctdbd_conn;
1038 struct server_id src;
1039 struct messaging_reclog *log;
1042 static void notifyd_broadcast_reclog_next(struct tevent_req *subreq);
1044 static struct tevent_req *notifyd_broadcast_reclog_send(
1045 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
1046 struct ctdbd_connection *ctdbd_conn, struct server_id src,
1047 struct messaging_reclog *log)
1049 struct tevent_req *req, *subreq;
1050 struct notifyd_broadcast_reclog_state *state;
1052 req = tevent_req_create(mem_ctx, &state,
1053 struct notifyd_broadcast_reclog_state);
1058 state->ctdbd_conn = ctdbd_conn;
1062 subreq = tevent_wakeup_send(state, state->ev,
1063 timeval_current_ofs_msec(1000));
1064 if (tevent_req_nomem(subreq, req)) {
1065 return tevent_req_post(req, ev);
1067 tevent_req_set_callback(subreq, notifyd_broadcast_reclog_next, req);
1071 static void notifyd_broadcast_reclog_next(struct tevent_req *subreq)
1073 struct tevent_req *req = tevent_req_callback_data(
1074 subreq, struct tevent_req);
1075 struct notifyd_broadcast_reclog_state *state = tevent_req_data(
1076 req, struct notifyd_broadcast_reclog_state);
1079 ok = tevent_wakeup_recv(subreq);
1080 TALLOC_FREE(subreq);
1082 tevent_req_oom(req);
1086 notifyd_broadcast_reclog(state->ctdbd_conn, state->src, state->log);
1088 subreq = tevent_wakeup_send(state, state->ev,
1089 timeval_current_ofs_msec(1000));
1090 if (tevent_req_nomem(subreq, req)) {
1093 tevent_req_set_callback(subreq, notifyd_broadcast_reclog_next, req);
1096 static int notifyd_broadcast_reclog_recv(struct tevent_req *req)
1098 return tevent_req_simple_recv_unix(req);
1101 struct notifyd_clean_peers_state {
1102 struct tevent_context *ev;
1103 struct notifyd_state *notifyd;
1106 static void notifyd_clean_peers_next(struct tevent_req *subreq);
1108 static struct tevent_req *notifyd_clean_peers_send(
1109 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
1110 struct notifyd_state *notifyd)
1112 struct tevent_req *req, *subreq;
1113 struct notifyd_clean_peers_state *state;
1115 req = tevent_req_create(mem_ctx, &state,
1116 struct notifyd_clean_peers_state);
1121 state->notifyd = notifyd;
1123 subreq = tevent_wakeup_send(state, state->ev,
1124 timeval_current_ofs_msec(30000));
1125 if (tevent_req_nomem(subreq, req)) {
1126 return tevent_req_post(req, ev);
1128 tevent_req_set_callback(subreq, notifyd_clean_peers_next, req);
1132 static void notifyd_clean_peers_next(struct tevent_req *subreq)
1134 struct tevent_req *req = tevent_req_callback_data(
1135 subreq, struct tevent_req);
1136 struct notifyd_clean_peers_state *state = tevent_req_data(
1137 req, struct notifyd_clean_peers_state);
1138 struct notifyd_state *notifyd = state->notifyd;
1141 time_t now = time(NULL);
1143 ok = tevent_wakeup_recv(subreq);
1144 TALLOC_FREE(subreq);
1146 tevent_req_oom(req);
1151 while (i < notifyd->num_peers) {
1152 struct notifyd_peer *p = notifyd->peers[i];
1154 if ((now - p->last_broadcast) > 60) {
1155 struct server_id_buf idbuf;
1158 * Haven't heard for more than 60 seconds. Call this
1162 DEBUG(10, ("%s: peer %s died\n", __func__,
1163 server_id_str_buf(p->pid, &idbuf)));
1165 * This implicitly decrements notifyd->num_peers
1173 subreq = tevent_wakeup_send(state, state->ev,
1174 timeval_current_ofs_msec(30000));
1175 if (tevent_req_nomem(subreq, req)) {
1178 tevent_req_set_callback(subreq, notifyd_clean_peers_next, req);
1181 static int notifyd_clean_peers_recv(struct tevent_req *req)
1183 return tevent_req_simple_recv_unix(req);
1186 static int notifyd_add_proxy_syswatches(struct db_record *rec,
1189 struct notifyd_state *state = talloc_get_type_abort(
1190 private_data, struct notifyd_state);
1191 struct db_context *db = dbwrap_record_get_db(rec);
1192 TDB_DATA key = dbwrap_record_get_key(rec);
1193 TDB_DATA value = dbwrap_record_get_value(rec);
1194 struct notifyd_instance *instances = NULL;
1195 size_t num_instances = 0;
1197 char path[key.dsize+1];
1200 memcpy(path, key.dptr, key.dsize);
1201 path[key.dsize] = '\0';
1203 ok = notifyd_parse_entry(value.dptr, value.dsize, &instances,
1206 DEBUG(1, ("%s: Could not parse notifyd entry for %s\n",
1211 for (i=0; i<num_instances; i++) {
1212 struct notifyd_instance *instance = &instances[i];
1213 uint32_t filter = instance->instance.filter;
1214 uint32_t subdir_filter = instance->instance.subdir_filter;
1218 * This is a remote database. Pointers that we were
1219 * given don't make sense locally. Initialize to NULL
1220 * in case sys_notify_watch fails.
1222 instances[i].sys_watch = NULL;
1224 ret = state->sys_notify_watch(
1225 db, state->sys_notify_ctx, path,
1226 &filter, &subdir_filter,
1227 notifyd_sys_callback, state->msg_ctx,
1228 &instance->sys_watch);
1230 DEBUG(1, ("%s: inotify_watch returned %s\n",
1231 __func__, strerror(errno)));
1238 static int notifyd_db_del_syswatches(struct db_record *rec, void *private_data)
1240 TDB_DATA key = dbwrap_record_get_key(rec);
1241 TDB_DATA value = dbwrap_record_get_value(rec);
1242 struct notifyd_instance *instances = NULL;
1243 size_t num_instances = 0;
1247 ok = notifyd_parse_entry(value.dptr, value.dsize, &instances,
1250 DEBUG(1, ("%s: Could not parse notifyd entry for %.*s\n",
1251 __func__, (int)key.dsize, (char *)key.dptr));
1254 for (i=0; i<num_instances; i++) {
1255 TALLOC_FREE(instances[i].sys_watch);
1260 static int notifyd_peer_destructor(struct notifyd_peer *p)
1262 struct notifyd_state *state = p->state;
1265 if (p->db != NULL) {
1266 dbwrap_traverse_read(p->db, notifyd_db_del_syswatches,
1270 for (i = 0; i<state->num_peers; i++) {
1271 if (p == state->peers[i]) {
1272 state->peers[i] = state->peers[state->num_peers-1];
1273 state->num_peers -= 1;
1280 static struct notifyd_peer *notifyd_peer_new(
1281 struct notifyd_state *state, struct server_id pid)
1283 struct notifyd_peer *p, **tmp;
1285 tmp = talloc_realloc(state, state->peers, struct notifyd_peer *,
1286 state->num_peers+1);
1292 p = talloc_zero(state->peers, struct notifyd_peer);
1299 state->peers[state->num_peers] = p;
1300 state->num_peers += 1;
1302 talloc_set_destructor(p, notifyd_peer_destructor);
1307 static void notifyd_apply_reclog(struct notifyd_peer *peer,
1308 const uint8_t *msg, size_t msglen)
1310 struct notifyd_state *state = peer->state;
1311 DATA_BLOB blob = { .data = discard_const_p(uint8_t, msg),
1313 struct server_id_buf idbuf;
1314 struct messaging_reclog *log;
1315 enum ndr_err_code ndr_err;
1318 if (peer->db == NULL) {
1325 log = talloc(peer, struct messaging_reclog);
1327 DEBUG(10, ("%s: talloc failed\n", __func__));
1331 ndr_err = ndr_pull_struct_blob_all(
1333 (ndr_pull_flags_fn_t)ndr_pull_messaging_reclog);
1334 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1335 DEBUG(10, ("%s: ndr_pull_messaging_reclog failed: %s\n",
1336 __func__, ndr_errstr(ndr_err)));
1340 DEBUG(10, ("%s: Got %u recs index %ju from %s\n", __func__,
1341 (unsigned)log->num_recs, (uintmax_t)log->rec_index,
1342 server_id_str_buf(peer->pid, &idbuf)));
1344 if (log->rec_index != peer->rec_index) {
1345 DEBUG(3, ("%s: Got rec index %ju from %s, expected %ju\n",
1346 __func__, (uintmax_t)log->rec_index,
1347 server_id_str_buf(peer->pid, &idbuf),
1348 (uintmax_t)peer->rec_index));
1352 for (i=0; i<log->num_recs; i++) {
1353 struct messaging_rec *r = log->recs[i];
1354 struct notify_rec_change_msg *chg;
1358 ok = notifyd_parse_rec_change(r->buf.data, r->buf.length,
1361 DEBUG(3, ("%s: notifyd_parse_rec_change failed\n",
1366 ok = notifyd_apply_rec_change(&r->src, chg->path, pathlen,
1367 &chg->instance, peer->db,
1368 state->sys_notify_watch,
1369 state->sys_notify_ctx,
1372 DEBUG(3, ("%s: notifyd_apply_rec_change failed\n",
1378 peer->rec_index += 1;
1379 peer->last_broadcast = time(NULL);
1385 DEBUG(10, ("%s: Dropping peer %s\n", __func__,
1386 server_id_str_buf(peer->pid, &idbuf)));
1391 * Receive messaging_reclog (log of MSG_SMB_NOTIFY_REC_CHANGE
1392 * messages) broadcasts by other notifyds. Several cases:
1394 * We don't know the source. This creates a new peer. Creating a peer
1395 * involves asking the peer for its full database. We assume ordered
1396 * messages, so the new database will arrive before the next broadcast
1399 * We know the source and the log index matches. We will apply the log
1400 * locally to our peer's db as if we had received it from a local
1403 * We know the source but the log index does not match. This means we
1404 * lost a message. We just drop the whole peer and wait for the next
1405 * broadcast, which will then trigger a fresh database pull.
1408 static int notifyd_snoop_broadcast(uint32_t src_vnn, uint32_t dst_vnn,
1410 const uint8_t *msg, size_t msglen,
1413 struct notifyd_state *state = talloc_get_type_abort(
1414 private_data, struct notifyd_state);
1415 struct server_id my_id = messaging_server_id(state->msg_ctx);
1416 struct notifyd_peer *p;
1419 struct server_id src, dst;
1420 struct server_id_buf idbuf;
1423 if (msglen < MESSAGE_HDR_LENGTH) {
1424 DEBUG(10, ("%s: Got short broadcast\n", __func__));
1427 message_hdr_get(&msg_type, &src, &dst, msg);
1429 if (msg_type != MSG_SMB_NOTIFY_REC_CHANGES) {
1430 DEBUG(10, ("%s Got message %u, ignoring\n", __func__,
1431 (unsigned)msg_type));
1434 if (server_id_equal(&src, &my_id)) {
1435 DEBUG(10, ("%s: Ignoring my own broadcast\n", __func__));
1439 DEBUG(10, ("%s: Got MSG_SMB_NOTIFY_REC_CHANGES from %s\n",
1440 __func__, server_id_str_buf(src, &idbuf)));
1442 for (i=0; i<state->num_peers; i++) {
1443 if (server_id_equal(&state->peers[i]->pid, &src)) {
1445 DEBUG(10, ("%s: Applying changes to peer %u\n",
1446 __func__, (unsigned)i));
1448 notifyd_apply_reclog(state->peers[i],
1449 msg + MESSAGE_HDR_LENGTH,
1450 msglen - MESSAGE_HDR_LENGTH);
1455 DEBUG(10, ("%s: Creating new peer for %s\n", __func__,
1456 server_id_str_buf(src, &idbuf)));
1458 p = notifyd_peer_new(state, src);
1460 DEBUG(10, ("%s: notifyd_peer_new failed\n", __func__));
1464 status = messaging_send_buf(state->msg_ctx, src, MSG_SMB_NOTIFY_GET_DB,
1466 if (!NT_STATUS_IS_OK(status)) {
1467 DEBUG(10, ("%s: messaging_send_buf failed: %s\n",
1468 __func__, nt_errstr(status)));
1477 struct notifyd_parse_db_state {
1478 bool (*fn)(const char *path,
1479 struct server_id server,
1480 const struct notify_instance *instance,
1481 void *private_data);
1485 static bool notifyd_parse_db_parser(TDB_DATA key, TDB_DATA value,
1488 struct notifyd_parse_db_state *state = private_data;
1489 char path[key.dsize+1];
1490 struct notifyd_instance *instances = NULL;
1491 size_t num_instances = 0;
1495 memcpy(path, key.dptr, key.dsize);
1496 path[key.dsize] = 0;
1498 ok = notifyd_parse_entry(value.dptr, value.dsize, &instances,
1501 DEBUG(10, ("%s: Could not parse entry for path %s\n",
1506 for (i=0; i<num_instances; i++) {
1507 ok = state->fn(path, instances[i].client,
1508 &instances[i].instance,
1509 state->private_data);
1518 int notifyd_parse_db(const uint8_t *buf, size_t buflen,
1519 uint64_t *log_index,
1520 bool (*fn)(const char *path,
1521 struct server_id server,
1522 const struct notify_instance *instance,
1523 void *private_data),
1526 struct notifyd_parse_db_state state = {
1527 .fn = fn, .private_data = private_data
1534 *log_index = BVAL(buf, 0);
1539 status = dbwrap_parse_marshall_buf(
1540 buf, buflen, notifyd_parse_db_parser, &state);
1541 if (!NT_STATUS_IS_OK(status)) {
1542 return map_errno_from_nt_status(status);