2 Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
3 This file is part of GlusterFS.
5 This file is licensed to you under your choice of the GNU Lesser
6 General Public License, version 3 or any later version (LGPLv3 or
7 later), or the GNU General Public License, version 2 (GPLv2), in all
8 cases as published by the Free Software Foundation.
13 #include "glusterfs.h"
16 #include "protocol-common.h"
22 #include "compat-errno.h"
23 #include "statedump.h"
25 #include "glusterd-mem-types.h"
27 #include "glusterd-sm.h"
28 #include "glusterd-op-sm.h"
29 #include "glusterd-utils.h"
30 #include "glusterd-server-quorum.h"
31 #include "glusterd-store.h"
32 #include "glusterd-locks.h"
33 #include "glusterd-snapshot-utils.h"
35 #include "glusterd1-xdr.h"
37 #include "xdr-generic.h"
39 #include "glusterd-volgen.h"
40 #include "glusterd-mountbroker.h"
41 #include "glusterd-messages.h"
43 #include <sys/resource.h>
46 #include "common-utils.h"
49 #include "glusterd-syncop.h"
50 #include "glusterd-messages.h"
56 extern glusterd_op_info_t opinfo;
58 int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
59 rpc_clnt_event_t event,
60 void *data, rpc_clnt_notify_t notify_fn)
62 glusterd_conf_t *priv = THIS->private;
65 synclock_lock (&priv->big_lock);
66 ret = notify_fn (rpc, mydata, event, data);
67 synclock_unlock (&priv->big_lock);
72 int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn)
74 glusterd_conf_t *priv = THIS->private;
77 synclock_lock (&priv->big_lock);
79 synclock_unlock (&priv->big_lock);
85 glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
86 char *hostname, int port,
87 gd1_mgmt_friend_req *friend_req)
90 glusterd_peerinfo_t *peerinfo = NULL;
91 glusterd_friend_sm_event_t *event = NULL;
92 glusterd_friend_req_ctx_t *ctx = NULL;
93 char rhost[UNIX_PATH_MAX + 1] = {0};
94 uuid_t friend_uuid = {0};
97 gf_uuid_parse (uuid_utoa (uuid), friend_uuid);
99 port = GF_DEFAULT_BASE_PORT;
101 ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost));
105 peerinfo = glusterd_peerinfo_find (uuid, rhost);
107 if (peerinfo == NULL) {
108 ret = glusterd_xfer_friend_add_resp (req, hostname, rhost, port,
109 -1, GF_PROBE_UNKNOWN_PEER);
110 if (friend_req->vols.vols_val) {
111 free (friend_req->vols.vols_val);
112 friend_req->vols.vols_val = NULL;
117 ret = glusterd_friend_sm_new_event
118 (GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
121 gf_msg ("glusterd", GF_LOG_ERROR, 0,
122 GD_MSG_EVENT_NEW_GET_FAIL,
123 "event generation failed: %d", ret);
127 event->peername = gf_strdup (peerinfo->hostname);
128 gf_uuid_copy (event->peerid, peerinfo->uuid);
130 ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
133 gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
134 GD_MSG_NO_MEMORY, "Unable to allocate memory");
139 gf_uuid_copy (ctx->uuid, uuid);
141 ctx->hostname = gf_strdup (hostname);
150 ret = dict_unserialize (friend_req->vols.vols_val,
151 friend_req->vols.vols_len,
157 dict->extra_stdfree = friend_req->vols.vols_val;
162 ret = glusterd_friend_sm_inject_event (event);
164 gf_msg ("glusterd", GF_LOG_ERROR, 0,
165 GD_MSG_EVENT_INJECT_FAIL,
166 "Unable to inject event %d, "
167 "ret = %d", event->event, ret);
172 if (peerinfo && (0 == peerinfo->connected))
173 ret = GLUSTERD_CONNECTION_AWAITED;
178 if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
179 if (ctx && ctx->hostname)
180 GF_FREE (ctx->hostname);
183 if ((!dict->extra_stdfree) &&
184 friend_req->vols.vols_val)
185 free (friend_req->vols.vols_val);
188 free (friend_req->vols.vols_val);
191 GF_FREE (event->peername);
200 glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
201 char *hostname, int port)
204 glusterd_peerinfo_t *peerinfo = NULL;
205 glusterd_friend_sm_event_t *event = NULL;
206 glusterd_friend_req_ctx_t *ctx = NULL;
209 port = GF_DEFAULT_BASE_PORT;
213 peerinfo = glusterd_peerinfo_find (uuid, hostname);
215 if (peerinfo == NULL) {
216 gf_msg ("glusterd", GF_LOG_CRITICAL, 0,
217 GD_MSG_REQ_FROM_UNKNOWN_PEER,
218 "Received remove-friend from unknown peer %s",
220 ret = glusterd_xfer_friend_remove_resp (req, hostname,
225 ret = glusterd_friend_sm_new_event
226 (GD_FRIEND_EVENT_RCVD_REMOVE_FRIEND, &event);
229 gf_msg ("glusterd", GF_LOG_ERROR, 0,
230 GD_MSG_EVENT_NEW_GET_FAIL,
231 "event generation failed: %d", ret);
235 event->peername = gf_strdup (hostname);
236 gf_uuid_copy (event->peerid, uuid);
238 ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
241 gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
242 GD_MSG_NO_MEMORY, "Unable to allocate memory");
247 gf_uuid_copy (ctx->uuid, uuid);
249 ctx->hostname = gf_strdup (hostname);
254 ret = glusterd_friend_sm_inject_event (event);
257 gf_msg ("glusterd", GF_LOG_ERROR, 0,
258 GD_MSG_EVENT_INJECT_FAIL, "Unable to inject event %d, "
259 "ret = %d", event->event, ret);
269 if (ctx && ctx->hostname)
270 GF_FREE (ctx->hostname);
273 GF_FREE (event->peername);
287 _build_option_key (dict_t *d, char *k, data_t *v, void *tmp)
289 char reconfig_key[256] = {0, };
290 struct args_pack *pack = NULL;
292 xlator_t *this = NULL;
293 glusterd_conf_t *priv = NULL;
297 priv = this->private;
301 if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
304 if (priv->op_version > GD_OP_VERSION_MIN) {
305 if ((strcmp (k, "features.limit-usage") == 0) ||
306 (strcmp (k, "features.soft-limit") == 0))
309 snprintf (reconfig_key, 256, "volume%d.option.%s",
311 ret = dict_set_str (pack->dict, reconfig_key, v->data);
319 glusterd_add_tier_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
320 dict_t *dict, int count)
323 char key[256] = {0,};
328 memset (key, 0, sizeof (key));
329 snprintf (key, 256, "volume%d.cold_type", count);
330 ret = dict_set_int32 (dict, key, volinfo->tier_info.cold_type);
334 memset (key, 0, sizeof (key));
335 snprintf (key, 256, "volume%d.cold_brick_count", count);
336 ret = dict_set_int32 (dict, key, volinfo->tier_info.cold_brick_count);
340 memset (key, 0, sizeof (key));
341 snprintf (key, 256, "volume%d.cold_dist_count", count);
342 ret = dict_set_int32 (dict, key,
343 volinfo->tier_info.cold_dist_leaf_count);
347 memset (key, 0, sizeof (key));
348 snprintf (key, 256, "volume%d.cold_replica_count", count);
349 ret = dict_set_int32 (dict, key,
350 volinfo->tier_info.cold_replica_count);
354 memset (key, 0, sizeof (key));
355 snprintf (key, 256, "volume%d.cold_disperse_count", count);
356 ret = dict_set_int32 (dict, key,
357 volinfo->tier_info.cold_disperse_count);
361 memset (key, 0, sizeof (key));
362 snprintf (key, 256, "volume%d.cold_redundancy_count", count);
363 ret = dict_set_int32 (dict, key,
364 volinfo->tier_info.cold_redundancy_count);
368 memset (key, 0, sizeof (key));
369 snprintf (key, 256, "volume%d.hot_type", count);
370 ret = dict_set_int32 (dict, key, volinfo->tier_info.hot_type);
374 memset (key, 0, sizeof (key));
375 snprintf (key, 256, "volume%d.hot_brick_count", count);
376 ret = dict_set_int32 (dict, key, volinfo->tier_info.hot_brick_count);
380 memset (key, 0, sizeof (key));
381 snprintf (key, 256, "volume%d.hot_replica_count", count);
382 ret = dict_set_int32 (dict, key, volinfo->tier_info.hot_replica_count);
392 glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
393 dict_t *volumes, int count)
397 char key[256] = {0, };
398 glusterd_brickinfo_t *brickinfo = NULL;
402 glusterd_conf_t *priv = NULL;
403 char *volume_id_str = NULL;
404 struct args_pack pack = {0,};
405 xlator_t *this = NULL;
406 GF_UNUSED int caps = 0;
412 priv = this->private;
416 snprintf (key, 256, "volume%d.name", count);
417 ret = dict_set_str (volumes, key, volinfo->volname);
421 snprintf (key, 256, "volume%d.type", count);
422 ret = dict_set_int32 (volumes, key, volinfo->type);
426 snprintf (key, 256, "volume%d.status", count);
427 ret = dict_set_int32 (volumes, key, volinfo->status);
431 snprintf (key, 256, "volume%d.brick_count", count);
432 ret = dict_set_int32 (volumes, key, volinfo->brick_count);
436 snprintf (key, 256, "volume%d.hot_brick_count", count);
437 ret = dict_set_int32 (volumes, key, volinfo->tier_info.hot_brick_count);
441 if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
442 ret = glusterd_add_tier_volume_detail_to_dict (volinfo,
448 snprintf (key, 256, "volume%d.dist_count", count);
449 ret = dict_set_int32 (volumes, key, volinfo->dist_leaf_count);
453 snprintf (key, 256, "volume%d.stripe_count", count);
454 ret = dict_set_int32 (volumes, key, volinfo->stripe_count);
458 snprintf (key, 256, "volume%d.replica_count", count);
459 ret = dict_set_int32 (volumes, key, volinfo->replica_count);
463 snprintf (key, 256, "volume%d.disperse_count", count);
464 ret = dict_set_int32 (volumes, key, volinfo->disperse_count);
468 snprintf (key, 256, "volume%d.redundancy_count", count);
469 ret = dict_set_int32 (volumes, key, volinfo->redundancy_count);
473 snprintf (key, 256, "volume%d.transport", count);
474 ret = dict_set_int32 (volumes, key, volinfo->transport_type);
478 volume_id_str = gf_strdup (uuid_utoa (volinfo->volume_id));
482 snprintf (key, sizeof (key), "volume%d.volume_id", count);
483 ret = dict_set_dynstr (volumes, key, volume_id_str);
487 snprintf (key, 256, "volume%d.rebalance", count);
488 ret = dict_set_int32 (volumes, key, volinfo->rebal.defrag_cmd);
492 #ifdef HAVE_BD_XLATOR
495 snprintf (key, 256, "volume%d.xlator0", count);
496 buf = GF_MALLOC (256, gf_common_mt_char);
501 if (volinfo->caps & CAPS_BD)
502 snprintf (buf, 256, "BD");
503 ret = dict_set_dynstr (volumes, key, buf);
509 if (volinfo->caps & CAPS_THIN) {
510 snprintf (key, 256, "volume%d.xlator0.caps%d", count,
512 buf = GF_MALLOC (256, gf_common_mt_char);
517 snprintf (buf, 256, "thin");
518 ret = dict_set_dynstr (volumes, key, buf);
525 if (volinfo->caps & CAPS_OFFLOAD_COPY) {
526 snprintf (key, 256, "volume%d.xlator0.caps%d", count,
528 buf = GF_MALLOC (256, gf_common_mt_char);
533 snprintf (buf, 256, "offload_copy");
534 ret = dict_set_dynstr (volumes, key, buf);
541 if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
542 snprintf (key, 256, "volume%d.xlator0.caps%d", count,
544 buf = GF_MALLOC (256, gf_common_mt_char);
549 snprintf (buf, 256, "offload_snapshot");
550 ret = dict_set_dynstr (volumes, key, buf);
557 if (volinfo->caps & CAPS_OFFLOAD_ZERO) {
558 snprintf (key, 256, "volume%d.xlator0.caps%d", count,
560 buf = GF_MALLOC (256, gf_common_mt_char);
565 snprintf (buf, 256, "offload_zerofill");
566 ret = dict_set_dynstr (volumes, key, buf);
576 cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
577 char brick[1024] = {0,};
578 char brick_uuid[64] = {0,};
579 snprintf (key, 256, "volume%d.brick%d", count, i);
580 snprintf (brick, 1024, "%s:%s", brickinfo->hostname,
582 buf = gf_strdup (brick);
583 ret = dict_set_dynstr (volumes, key, buf);
586 snprintf (key, 256, "volume%d.brick%d.uuid", count, i);
587 snprintf (brick_uuid, 64, "%s", uuid_utoa (brickinfo->uuid));
588 buf = gf_strdup (brick_uuid);
591 ret = dict_set_dynstr (volumes, key, buf);
595 #ifdef HAVE_BD_XLATOR
596 if (volinfo->caps & CAPS_BD) {
597 snprintf (key, 256, "volume%d.vg%d", count, i);
598 snprintf (brick, 1024, "%s", brickinfo->vg);
599 buf = gf_strdup (brick);
600 ret = dict_set_dynstr (volumes, key, buf);
608 dict = volinfo->dict;
615 pack.vol_count = count;
617 dict_foreach (dict, _build_option_key, (void *) &pack);
618 dict_foreach (priv->opts, _build_option_key, &pack);
620 snprintf (key, 256, "volume%d.opt_count", pack.vol_count);
621 ret = dict_set_int32 (volumes, key, pack.opt_count);
627 glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
628 char *err_str, size_t err_len)
633 xlator_t *this = NULL;
634 glusterd_conf_t *priv = NULL;
637 char *volname = NULL;
638 uuid_t *txn_id = NULL;
639 glusterd_op_info_t txn_op_info = {{0},};
640 glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
641 uint32_t op_errno = 0;
644 GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
645 GF_ASSERT (NULL != ctx);
649 priv = this->private;
654 /* Generate a transaction-id for this operation and
655 * save it in the dict. This transaction id distinguishes
656 * each transaction, and helps separate opinfos in the
657 * op state machine. */
658 ret = glusterd_generate_txn_id (dict, &txn_id);
660 gf_msg (this->name, GF_LOG_ERROR, 0,
661 GD_MSG_TRANS_IDGEN_FAIL,
662 "Failed to generate transaction id");
666 /* Save the MY_UUID as the originator_uuid. This originator_uuid
667 * will be used by is_origin_glusterd() to determine if a node
668 * is the originator node for a command. */
669 ret = glusterd_set_originator_uuid (dict);
671 gf_msg (this->name, GF_LOG_ERROR, 0,
672 GD_MSG_UUID_SET_FAIL,
673 "Failed to set originator_uuid.");
677 /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
678 if (priv->op_version < GD_OP_VERSION_3_6_0) {
679 ret = glusterd_lock (MY_UUID);
681 gf_msg (this->name, GF_LOG_ERROR, 0,
682 GD_MSG_GLUSTERD_LOCK_FAIL,
683 "Unable to acquire lock on localhost, ret: %d",
685 snprintf (err_str, err_len,
686 "Another transaction is in progress. "
687 "Please try again after sometime.");
691 /* If no volname is given as a part of the command, locks will
693 ret = dict_get_str (dict, "volname", &tmp);
695 gf_msg (this->name, GF_LOG_INFO, errno,
696 GD_MSG_DICT_GET_FAILED,
697 "No Volume name present. "
698 "Locks not being held.");
699 goto local_locking_done;
701 /* Use a copy of volname, as cli response will be
702 * sent before the unlock, and the volname in the
703 * dict, might be removed */
704 volname = gf_strdup (tmp);
709 ret = glusterd_mgmt_v3_lock (volname, MY_UUID, &op_errno,
712 gf_msg (this->name, GF_LOG_ERROR, 0,
713 GD_MSG_MGMTV3_LOCK_GET_FAIL,
714 "Unable to acquire lock for %s", volname);
715 snprintf (err_str, err_len,
716 "Another transaction is in progress for %s. "
717 "Please try again after sometime.", volname);
723 gf_msg_debug (this->name, 0, "Acquired lock on localhost");
726 /* If no volname is given as a part of the command, locks will
727 * not be held, hence sending stage event. */
728 if (volname || (priv->op_version < GD_OP_VERSION_3_6_0))
729 event_type = GD_OP_EVENT_START_LOCK;
731 txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
732 event_type = GD_OP_EVENT_ALL_ACC;
735 /* Save opinfo for this transaction with the transaction id */
736 glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req);
738 ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
740 gf_msg (this->name, GF_LOG_ERROR, 0,
741 GD_MSG_TRANS_OPINFO_SET_FAIL,
742 "Unable to set transaction's opinfo");
748 ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);
750 gf_msg (this->name, GF_LOG_ERROR, 0,
751 GD_MSG_EVENT_INJECT_FAIL, "Failed to acquire cluster"
758 /* Based on the op-version, we release the
759 * cluster or mgmt_v3 lock */
760 if (priv->op_version < GD_OP_VERSION_3_6_0)
761 glusterd_unlock (MY_UUID);
763 ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
766 gf_msg (this->name, GF_LOG_ERROR, 0,
767 GD_MSG_MGMTV3_UNLOCK_FAIL,
768 "Unable to release lock for %s",
777 gf_msg_debug (this->name, 0, "Returning %d", ret);
782 __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
784 dict_t *op_ctx = NULL;
786 gd1_mgmt_cluster_lock_req lock_req = {{0},};
787 glusterd_op_lock_ctx_t *ctx = NULL;
788 glusterd_op_t op = GD_OP_EVENT_LOCK;
789 glusterd_op_info_t txn_op_info = {{0},};
790 glusterd_conf_t *priv = NULL;
791 uuid_t *txn_id = NULL;
792 xlator_t *this = NULL;
796 priv = this->private;
800 txn_id = &priv->global_txn_id;
802 ret = xdr_to_generic (req->msg[0], &lock_req,
803 (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
805 gf_msg (this->name, GF_LOG_ERROR, 0,
806 GD_MSG_REQ_DECODE_FAIL, "Failed to decode lock "
807 "request received from peer");
808 req->rpc_err = GARBAGE_ARGS;
812 gf_msg_debug (this->name, 0, "Received LOCK from uuid: %s",
813 uuid_utoa (lock_req.uuid));
816 ret = (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL);
819 gf_msg (this->name, GF_LOG_WARNING, 0,
820 GD_MSG_PEER_NOT_FOUND, "%s doesn't "
821 "belong to the cluster. Ignoring request.",
822 uuid_utoa (lock_req.uuid));
827 ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
834 gf_uuid_copy (ctx->uuid, lock_req.uuid);
838 op_ctx = dict_new ();
840 gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
841 GD_MSG_DICT_CREATE_FAIL,
842 "Unable to set new dict");
846 glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req);
848 ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
850 gf_msg (this->name, GF_LOG_ERROR, 0,
851 GD_MSG_TRANS_OPINFO_SET_FAIL,
852 "Unable to set transaction's opinfo");
853 dict_unref (txn_op_info.op_ctx);
857 ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx);
859 gf_msg (this->name, GF_LOG_ERROR, 0,
860 GD_MSG_EVENT_INJECT_FAIL,
861 "Failed to inject event GD_OP_EVENT_LOCK");
864 gf_msg_debug (this->name, 0, "Returning %d", ret);
866 glusterd_friend_sm ();
873 glusterd_handle_cluster_lock (rpcsvc_request_t *req)
875 return glusterd_big_locked_handler (req,
876 __glusterd_handle_cluster_lock);
880 glusterd_req_ctx_create (rpcsvc_request_t *rpc_req,
881 glusterd_op_t op, uuid_t uuid,
882 char *buf_val, size_t buf_len,
883 gf_gld_mem_types_t mem_type,
884 glusterd_req_ctx_t **req_ctx_out)
888 glusterd_req_ctx_t *req_ctx = NULL;
890 xlator_t *this = NULL;
895 gf_uuid_unparse (uuid, str);
896 gf_msg_debug (this->name, 0, "Received op from uuid %s", str);
902 req_ctx = GF_CALLOC (1, sizeof (*req_ctx), mem_type);
907 gf_uuid_copy (req_ctx->uuid, uuid);
909 ret = dict_unserialize (buf_val, buf_len, &dict);
911 gf_msg (this->name, GF_LOG_WARNING, 0,
912 GD_MSG_DICT_UNSERIALIZE_FAIL,
913 "failed to unserialize the dictionary");
917 req_ctx->dict = dict;
918 req_ctx->req = rpc_req;
919 *req_ctx_out = req_ctx;
931 __glusterd_handle_stage_op (rpcsvc_request_t *req)
934 glusterd_req_ctx_t *req_ctx = NULL;
935 gd1_mgmt_stage_op_req op_req = {{0},};
936 xlator_t *this = NULL;
937 uuid_t *txn_id = NULL;
938 glusterd_op_info_t txn_op_info = {{0},};
939 glusterd_op_sm_state_info_t state = {0,};
940 glusterd_conf_t *priv = NULL;
944 priv = this->private;
948 txn_id = &priv->global_txn_id;
950 ret = xdr_to_generic (req->msg[0], &op_req,
951 (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
953 gf_msg (this->name, GF_LOG_ERROR, 0,
954 GD_MSG_REQ_DECODE_FAIL, "Failed to decode stage "
955 "request received from peer");
956 req->rpc_err = GARBAGE_ARGS;
960 ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
961 op_req.buf.buf_val, op_req.buf.buf_len,
962 gf_gld_mt_op_stage_ctx_t, &req_ctx);
964 gf_msg (this->name, GF_LOG_ERROR, 0,
965 GD_MSG_REQ_CTX_CREATE_FAIL, "Failed to create req_ctx");
969 ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
970 gf_msg_debug (this->name, 0, "transaction ID = %s",
971 uuid_utoa (*txn_id));
974 ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
977 gf_msg (this->name, GF_LOG_WARNING, 0,
978 GD_MSG_PEER_NOT_FOUND, "%s doesn't "
979 "belong to the cluster. Ignoring request.",
980 uuid_utoa (op_req.uuid));
985 /* In cases where there is no volname, the receivers won't have a
986 * transaction opinfo created, as for those operations, the locking
987 * phase where the transaction opinfos are created, won't be called. */
988 ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
990 gf_msg_debug (this->name, 0,
991 "No transaction's opinfo set");
993 state.state = GD_OP_STATE_LOCKED;
994 glusterd_txn_opinfo_init (&txn_op_info, &state, &op_req.op,
997 ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
999 gf_msg (this->name, GF_LOG_ERROR, 0,
1000 GD_MSG_TRANS_OPINFO_SET_FAIL,
1001 "Unable to set transaction's opinfo");
1002 dict_unref (req_ctx->dict);
1007 ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP,
1010 gf_msg (this->name, GF_LOG_ERROR, 0,
1011 GD_MSG_EVENT_INJECT_FAIL,
1012 "Failed to inject event GD_OP_EVENT_STAGE_OP");
1015 free (op_req.buf.buf_val);//malloced by xdr
1016 glusterd_friend_sm ();
1022 glusterd_handle_stage_op (rpcsvc_request_t *req)
1024 return glusterd_big_locked_handler (req, __glusterd_handle_stage_op);
1029 __glusterd_handle_commit_op (rpcsvc_request_t *req)
1032 glusterd_req_ctx_t *req_ctx = NULL;
1033 gd1_mgmt_commit_op_req op_req = {{0},};
1034 xlator_t *this = NULL;
1035 uuid_t *txn_id = NULL;
1036 glusterd_conf_t *priv = NULL;
1040 priv = this->private;
1044 txn_id = &priv->global_txn_id;
1046 ret = xdr_to_generic (req->msg[0], &op_req,
1047 (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
1049 gf_msg (this->name, GF_LOG_ERROR, 0,
1050 GD_MSG_REQ_DECODE_FAIL, "Failed to decode commit "
1051 "request received from peer");
1052 req->rpc_err = GARBAGE_ARGS;
1057 ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
1060 gf_msg (this->name, GF_LOG_WARNING, 0,
1061 GD_MSG_PEER_NOT_FOUND, "%s doesn't "
1062 "belong to the cluster. Ignoring request.",
1063 uuid_utoa (op_req.uuid));
1068 //the structures should always be equal
1069 GF_ASSERT (sizeof (gd1_mgmt_commit_op_req) == sizeof (gd1_mgmt_stage_op_req));
1070 ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
1071 op_req.buf.buf_val, op_req.buf.buf_len,
1072 gf_gld_mt_op_commit_ctx_t, &req_ctx);
1076 ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
1077 gf_msg_debug (this->name, 0, "transaction ID = %s",
1078 uuid_utoa (*txn_id));
1080 ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP,
1084 free (op_req.buf.buf_val);//malloced by xdr
1085 glusterd_friend_sm ();
1091 glusterd_handle_commit_op (rpcsvc_request_t *req)
1093 return glusterd_big_locked_handler (req, __glusterd_handle_commit_op);
1097 __glusterd_handle_cli_probe (rpcsvc_request_t *req)
1100 gf_cli_req cli_req = {{0,},};
1101 glusterd_peerinfo_t *peerinfo = NULL;
1102 gf_boolean_t run_fsm = _gf_true;
1103 xlator_t *this = NULL;
1104 char *bind_name = NULL;
1105 dict_t *dict = NULL;
1106 char *hostname = NULL;
1113 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1115 //failed to decode msg;
1116 gf_msg (this->name, GF_LOG_ERROR, 0,
1117 GD_MSG_REQ_DECODE_FAIL, "xdr decoding error");
1118 req->rpc_err = GARBAGE_ARGS;
1122 if (cli_req.dict.dict_len) {
1125 ret = dict_unserialize (cli_req.dict.dict_val,
1126 cli_req.dict.dict_len, &dict);
1128 gf_msg (this->name, GF_LOG_ERROR, 0,
1129 GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to "
1130 "unserialize req-buffer to dictionary");
1135 ret = dict_get_str (dict, "hostname", &hostname);
1137 gf_msg (this->name, GF_LOG_ERROR, 0,
1138 GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1139 "Failed to get hostname");
1143 ret = dict_get_int32 (dict, "port", &port);
1145 gf_msg (this->name, GF_LOG_ERROR, 0,
1146 GD_MSG_PORT_NOTFOUND_IN_DICT, "Failed to get port");
1150 if (glusterd_is_any_volume_in_server_quorum (this) &&
1151 !does_gd_meet_server_quorum (this)) {
1152 glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET,
1153 NULL, hostname, port, dict);
1154 gf_msg (this->name, GF_LOG_CRITICAL, 0,
1155 GD_MSG_SERVER_QUORUM_NOT_MET,
1156 "Server quorum not met. Rejecting operation.");
1161 gf_msg ("glusterd", GF_LOG_INFO, 0,
1162 GD_MSG_CLI_REQ_RECVD,
1163 "Received CLI probe req %s %d",
1166 if (dict_get_str(this->options,"transport.socket.bind-address",
1168 gf_msg_debug ("glusterd", 0,
1169 "only checking probe address vs. bind address");
1170 ret = gf_is_same_address (bind_name, hostname);
1173 ret = gf_is_local_addr (hostname);
1176 glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST,
1177 NULL, hostname, port, dict);
1184 peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
1185 ret = (peerinfo && gd_peer_has_address (peerinfo, hostname));
1190 gf_msg_debug ("glusterd", 0, "Probe host %s port %d "
1191 "already a peer", hostname, port);
1192 glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
1193 hostname, port, dict);
1198 ret = glusterd_probe_begin (req, hostname, port, dict, &op_errno);
1200 if (ret == GLUSTERD_CONNECTION_AWAITED) {
1201 //fsm should be run after connection establishes
1202 run_fsm = _gf_false;
1205 } else if (ret == -1) {
1206 glusterd_xfer_cli_probe_resp (req, -1, op_errno,
1207 NULL, hostname, port, dict);
1212 free (cli_req.dict.dict_val);
1215 glusterd_friend_sm ();
1223 glusterd_handle_cli_probe (rpcsvc_request_t *req)
1225 return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe);
1229 __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
1232 gf_cli_req cli_req = {{0,},};
1235 xlator_t *this = NULL;
1236 glusterd_conf_t *priv = NULL;
1237 dict_t *dict = NULL;
1238 char *hostname = NULL;
1241 glusterd_volinfo_t *volinfo = NULL;
1242 glusterd_volinfo_t *tmp = NULL;
1246 priv = this->private;
1250 ret = xdr_to_generic (req->msg[0], &cli_req,
1251 (xdrproc_t)xdr_gf_cli_req);
1253 //failed to decode msg;
1254 gf_msg (this->name, GF_LOG_ERROR, 0,
1255 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1256 "request received from cli");
1257 req->rpc_err = GARBAGE_ARGS;
1261 if (cli_req.dict.dict_len) {
1264 ret = dict_unserialize (cli_req.dict.dict_val,
1265 cli_req.dict.dict_len, &dict);
1267 gf_msg (this->name, GF_LOG_ERROR, 0,
1268 GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to "
1269 "unserialize req-buffer to dictionary");
1274 gf_msg ("glusterd", GF_LOG_INFO, 0,
1275 GD_MSG_CLI_REQ_RECVD,
1276 "Received CLI deprobe req");
1278 ret = dict_get_str (dict, "hostname", &hostname);
1280 gf_msg (this->name, GF_LOG_ERROR, 0,
1281 GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1282 "Failed to get hostname");
1286 ret = dict_get_int32 (dict, "port", &port);
1288 gf_msg (this->name, GF_LOG_ERROR, 0,
1289 GD_MSG_PORT_NOTFOUND_IN_DICT, "Failed to get port");
1292 ret = dict_get_int32 (dict, "flags", &flags);
1294 gf_msg (this->name, GF_LOG_ERROR, 0,
1295 GD_MSG_FLAGS_NOTFOUND_IN_DICT, "Failed to get flags");
1299 ret = glusterd_hostname_to_uuid (hostname, uuid);
1301 op_errno = GF_DEPROBE_NOT_FRIEND;
1305 if (!gf_uuid_compare (uuid, MY_UUID)) {
1306 op_errno = GF_DEPROBE_LOCALHOST;
1311 if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1312 /* Check if peers are connected, except peer being
1314 if (!glusterd_chk_peers_connected_befriended (uuid)) {
1316 op_errno = GF_DEPROBE_FRIEND_DOWN;
1321 /* Check for if volumes exist with some bricks on the peer being
1322 * detached. It's not a problem if a volume contains none or all
1323 * of its bricks on the peer being detached
1325 cds_list_for_each_entry_safe (volinfo, tmp, &priv->volumes,
1327 ret = glusterd_friend_contains_vol_bricks (volinfo,
1330 op_errno = GF_DEPROBE_BRICK_EXIST;
1335 if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1336 if (glusterd_is_any_volume_in_server_quorum (this) &&
1337 !does_gd_meet_server_quorum (this)) {
1338 gf_msg (this->name, GF_LOG_CRITICAL, 0,
1339 GD_MSG_SERVER_QUORUM_NOT_MET,
1340 "Server quorum not met. Rejecting operation.");
1342 op_errno = GF_DEPROBE_QUORUM_NOT_MET;
1347 if (!gf_uuid_is_null (uuid)) {
1348 ret = glusterd_deprobe_begin (req, hostname, port, uuid, dict,
1351 ret = glusterd_deprobe_begin (req, hostname, port, NULL, dict,
1356 free (cli_req.dict.dict_val);
1359 ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, NULL,
1363 glusterd_friend_sm ();
1370 glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
1372 return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe);
1376 __glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
1379 gf1_cli_peer_list_req cli_req = {0,};
1380 dict_t *dict = NULL;
1384 ret = xdr_to_generic (req->msg[0], &cli_req,
1385 (xdrproc_t)xdr_gf1_cli_peer_list_req);
1387 //failed to decode msg;
1388 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1389 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1390 "request received from cli");
1391 req->rpc_err = GARBAGE_ARGS;
1395 gf_msg ("glusterd", GF_LOG_INFO, 0,
1396 GD_MSG_CLI_REQ_RECVD,
1397 "Received cli list req");
1399 if (cli_req.dict.dict_len) {
1400 /* Unserialize the dictionary */
1403 ret = dict_unserialize (cli_req.dict.dict_val,
1404 cli_req.dict.dict_len,
1407 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1408 GD_MSG_DICT_UNSERIALIZE_FAIL,
1410 "unserialize req-buffer to dictionary");
1413 dict->extra_stdfree = cli_req.dict.dict_val;
1417 ret = glusterd_list_friends (req, dict, cli_req.flags);
1423 glusterd_friend_sm ();
1430 glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
1432 return glusterd_big_locked_handler (req,
1433 __glusterd_handle_cli_list_friends);
1437 __glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
1440 gf_cli_req cli_req = {{0,}};
1441 dict_t *dict = NULL;
1446 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1448 //failed to decode msg;
1449 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1450 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1451 "request received from cli");
1452 req->rpc_err = GARBAGE_ARGS;
1456 gf_msg ("glusterd", GF_LOG_INFO, 0,
1457 GD_MSG_GET_VOL_REQ_RCVD,
1458 "Received get vol req");
1460 if (cli_req.dict.dict_len) {
1461 /* Unserialize the dictionary */
1464 ret = dict_unserialize (cli_req.dict.dict_val,
1465 cli_req.dict.dict_len,
1468 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1469 GD_MSG_DICT_UNSERIALIZE_FAIL,
1471 "unserialize req-buffer to dictionary");
1474 dict->extra_stdfree = cli_req.dict.dict_val;
1478 ret = dict_get_int32 (dict, "flags", &flags);
1480 gf_msg (THIS->name, GF_LOG_ERROR, 0,
1481 GD_MSG_FLAGS_NOTFOUND_IN_DICT, "failed to get flags");
1485 ret = glusterd_get_volumes (req, dict, flags);
1491 glusterd_friend_sm ();
1498 glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
1500 return glusterd_big_locked_handler (req,
1501 __glusterd_handle_cli_get_volume);
1505 __glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
1508 dict_t *dict = NULL;
1509 xlator_t *this = NULL;
1510 glusterd_conf_t *priv = NULL;
1512 gf_cli_rsp rsp = {0,};
1513 gf_cli_req cli_req = {{0,}};
1514 char msg_str[2048] = {0,};
1519 priv = this->private;
1522 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1524 //failed to decode msg;
1525 gf_msg (this->name, GF_LOG_ERROR, 0,
1526 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1527 "request received from cli");
1528 req->rpc_err = GARBAGE_ARGS;
1532 gf_msg_debug ("glusterd", 0, "Received uuid reset req");
1534 if (cli_req.dict.dict_len) {
1535 /* Unserialize the dictionary */
1538 ret = dict_unserialize (cli_req.dict.dict_val,
1539 cli_req.dict.dict_len,
1542 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1543 GD_MSG_DICT_UNSERIALIZE_FAIL,
1545 "unserialize req-buffer to dictionary");
1546 snprintf (msg_str, sizeof (msg_str), "Unable to decode "
1550 dict->extra_stdfree = cli_req.dict.dict_val;
1554 /* In the above section if dict_unserialize is successful, ret is set
1558 // Do not allow peer reset if there are any volumes in the cluster
1559 if (!cds_list_empty (&priv->volumes)) {
1560 snprintf (msg_str, sizeof (msg_str), "volumes are already "
1561 "present in the cluster. Resetting uuid is not "
1563 gf_msg (this->name, GF_LOG_WARNING, 0,
1564 GD_MSG_VOLS_ALREADY_PRESENT, "%s", msg_str);
1568 // Do not allow peer reset if trusted storage pool is already formed
1569 if (!cds_list_empty (&priv->peers)) {
1570 snprintf (msg_str, sizeof (msg_str),"trusted storage pool "
1571 "has been already formed. Please detach this peer "
1572 "from the pool and reset its uuid.");
1573 gf_msg (this->name, GF_LOG_WARNING, 0,
1574 GD_MSG_TSP_ALREADY_FORMED, "%s", msg_str);
1578 gf_uuid_copy (uuid, priv->uuid);
1579 ret = glusterd_uuid_generate_save ();
1581 if (!gf_uuid_compare (uuid, MY_UUID)) {
1582 snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid"
1583 " are same. Try gluster peer reset again");
1584 gf_msg (this->name, GF_LOG_ERROR, 0,
1585 GD_MSG_UUIDS_SAME_RETRY, "%s", msg_str);
1593 if (msg_str[0] == '\0')
1594 snprintf (msg_str, sizeof (msg_str), "Operation "
1596 rsp.op_errstr = msg_str;
1602 glusterd_to_cli (req, &rsp, NULL, 0, NULL,
1603 (xdrproc_t)xdr_gf_cli_rsp, dict);
1609 glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
1611 return glusterd_big_locked_handler (req,
1612 __glusterd_handle_cli_uuid_reset);
1616 __glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
1619 dict_t *dict = NULL;
1620 dict_t *rsp_dict = NULL;
1621 xlator_t *this = NULL;
1622 glusterd_conf_t *priv = NULL;
1623 gf_cli_rsp rsp = {0,};
1624 gf_cli_req cli_req = {{0,}};
1625 char msg_str[2048] = {0,};
1626 char uuid_str[64] = {0,};
1631 priv = this->private;
1634 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1636 gf_msg (this->name, GF_LOG_ERROR, 0,
1637 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1638 "request received from cli");
1639 req->rpc_err = GARBAGE_ARGS;
1643 gf_msg_debug ("glusterd", 0, "Received uuid get req");
1645 if (cli_req.dict.dict_len) {
1652 ret = dict_unserialize (cli_req.dict.dict_val,
1653 cli_req.dict.dict_len,
1656 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1657 GD_MSG_DICT_UNSERIALIZE_FAIL,
1659 "unserialize req-buffer to dictionary");
1660 snprintf (msg_str, sizeof (msg_str), "Unable to decode "
1665 dict->extra_stdfree = cli_req.dict.dict_val;
1670 rsp_dict = dict_new ();
1676 uuid_utoa_r (MY_UUID, uuid_str);
1677 ret = dict_set_str (rsp_dict, "uuid", uuid_str);
1679 gf_msg (this->name, GF_LOG_ERROR, 0,
1680 GD_MSG_DICT_SET_FAILED, "Failed to set uuid in "
1685 ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
1686 &rsp.dict.dict_len);
1688 gf_msg (this->name, GF_LOG_ERROR, 0,
1689 GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
1690 "Failed to serialize "
1698 if (msg_str[0] == '\0')
1699 snprintf (msg_str, sizeof (msg_str), "Operation "
1701 rsp.op_errstr = msg_str;
1708 glusterd_to_cli (req, &rsp, NULL, 0, NULL,
1709 (xdrproc_t)xdr_gf_cli_rsp, dict);
1714 glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
1716 return glusterd_big_locked_handler (req,
1717 __glusterd_handle_cli_uuid_get);
1721 __glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
1724 dict_t *dict = NULL;
1725 glusterd_conf_t *priv = NULL;
1726 glusterd_volinfo_t *volinfo = NULL;
1728 char key[1024] = {0,};
1729 gf_cli_rsp rsp = {0,};
1733 priv = THIS->private;
1740 cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
1741 memset (key, 0, sizeof (key));
1742 snprintf (key, sizeof (key), "volume%d", count);
1743 ret = dict_set_str (dict, key, volinfo->volname);
1749 ret = dict_set_int32 (dict, "count", count);
1753 ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
1754 &rsp.dict.dict_len);
1763 rsp.op_errstr = "Error listing volumes";
1767 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
1768 (xdrproc_t)xdr_gf_cli_rsp);
1774 glusterd_friend_sm ();
1781 glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
1783 return glusterd_big_locked_handler (req,
1784 __glusterd_handle_cli_list_volume);
1788 glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
1789 char *err_str, size_t err_len)
1793 ret = glusterd_op_txn_begin (req, op, ctx, err_str, err_len);
1799 __glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
1802 gf_cli_req cli_req = { {0,} } ;
1803 dict_t *dict = NULL;
1804 glusterd_op_t cli_op = GD_OP_GANESHA;
1805 char *volname = NULL;
1806 char *op_errstr = NULL;
1807 gf_boolean_t help = _gf_false;
1808 char err_str[2048] = {0,};
1809 xlator_t *this = NULL;
1816 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1818 snprintf (err_str, sizeof (err_str), "Failed to decode "
1819 "request received from cli");
1820 gf_msg (this->name, GF_LOG_ERROR, 0,
1821 GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
1822 req->rpc_err = GARBAGE_ARGS;
1826 if (cli_req.dict.dict_len) {
1827 /* Unserialize the dictionary */
1834 ret = dict_unserialize (cli_req.dict.dict_val,
1835 cli_req.dict.dict_len,
1838 gf_msg (this->name, GF_LOG_ERROR, 0,
1839 GD_MSG_DICT_UNSERIALIZE_FAIL,
1841 "unserialize req-buffer to dictionary");
1842 snprintf (err_str, sizeof (err_str), "Unable to decode "
1846 dict->extra_stdfree = cli_req.dict.dict_val;
1850 gf_msg_trace (this->name, 0, "Received global option request");
1852 ret = glusterd_op_begin_synctask (req, GD_OP_GANESHA, dict);
1855 if (err_str[0] == '\0')
1856 snprintf (err_str, sizeof (err_str),
1857 "Operation failed");
1858 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
1862 GF_FREE (op_errstr);
1871 glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
1873 return glusterd_big_locked_handler (req, __glusterd_handle_ganesha_cmd);
1877 __glusterd_handle_reset_volume (rpcsvc_request_t *req)
1880 gf_cli_req cli_req = {{0,}};
1881 dict_t *dict = NULL;
1882 glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
1883 char *volname = NULL;
1884 char err_str[2048] = {0,};
1885 xlator_t *this = NULL;
1891 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1893 snprintf (err_str, sizeof (err_str), "Failed to decode request "
1894 "received from cli");
1895 gf_msg (this->name, GF_LOG_ERROR, 0,
1896 GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
1897 req->rpc_err = GARBAGE_ARGS;
1901 if (cli_req.dict.dict_len) {
1902 /* Unserialize the dictionary */
1905 ret = dict_unserialize (cli_req.dict.dict_val,
1906 cli_req.dict.dict_len,
1909 gf_msg (this->name, GF_LOG_ERROR, 0,
1910 GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
1911 "unserialize req-buffer to dictionary");
1912 snprintf (err_str, sizeof (err_str), "Unable to decode "
1916 dict->extra_stdfree = cli_req.dict.dict_val;
1920 ret = dict_get_str (dict, "volname", &volname);
1922 snprintf (err_str, sizeof (err_str), "Failed to get volume "
1924 gf_msg (this->name, GF_LOG_ERROR, 0,
1925 GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
1928 gf_msg_debug (this->name, 0, "Received volume reset request for "
1929 "volume %s", volname);
1931 ret = glusterd_op_begin_synctask (req, GD_OP_RESET_VOLUME, dict);
1935 if (err_str[0] == '\0')
1936 snprintf (err_str, sizeof (err_str),
1937 "Operation failed");
1938 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
1946 glusterd_handle_reset_volume (rpcsvc_request_t *req)
1948 return glusterd_big_locked_handler (req,
1949 __glusterd_handle_reset_volume);
1953 __glusterd_handle_set_volume (rpcsvc_request_t *req)
1956 gf_cli_req cli_req = {{0,}};
1957 dict_t *dict = NULL;
1958 glusterd_op_t cli_op = GD_OP_SET_VOLUME;
1961 char *volname = NULL;
1962 char *op_errstr = NULL;
1963 gf_boolean_t help = _gf_false;
1964 char err_str[2048] = {0,};
1965 xlator_t *this = NULL;
1972 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1974 snprintf (err_str, sizeof (err_str), "Failed to decode "
1975 "request received from cli");
1976 gf_msg (this->name, GF_LOG_ERROR, 0,
1977 GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
1978 req->rpc_err = GARBAGE_ARGS;
1982 if (cli_req.dict.dict_len) {
1983 /* Unserialize the dictionary */
1986 ret = dict_unserialize (cli_req.dict.dict_val,
1987 cli_req.dict.dict_len,
1990 gf_msg (this->name, GF_LOG_ERROR, errno,
1991 GD_MSG_DICT_UNSERIALIZE_FAIL,
1993 "unserialize req-buffer to dictionary");
1994 snprintf (err_str, sizeof (err_str), "Unable to decode "
1998 dict->extra_stdfree = cli_req.dict.dict_val;
2002 ret = dict_get_str (dict, "volname", &volname);
2004 snprintf (err_str, sizeof (err_str), "Failed to get volume "
2005 "name while handling volume set command");
2006 gf_msg (this->name, GF_LOG_ERROR, 0,
2007 GD_MSG_DICT_GET_FAILED, "%s", err_str);
2011 if (strcmp (volname, "help") == 0 ||
2012 strcmp (volname, "help-xml") == 0) {
2013 ret = glusterd_volset_help (dict, &op_errstr);
2018 ret = dict_get_str (dict, "key1", &key);
2020 snprintf (err_str, sizeof (err_str), "Failed to get key while"
2021 " handling volume set for %s", volname);
2022 gf_msg (this->name, GF_LOG_ERROR, 0,
2023 GD_MSG_DICT_GET_FAILED, "%s", err_str);
2027 ret = dict_get_str (dict, "value1", &value);
2029 snprintf (err_str, sizeof (err_str), "Failed to get value while"
2030 " handling volume set for %s", volname);
2031 gf_msg (this->name, GF_LOG_ERROR, 0,
2032 GD_MSG_DICT_GET_FAILED, "%s", err_str);
2035 gf_msg_debug (this->name, 0, "Received volume set request for "
2036 "volume %s", volname);
2038 ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
2042 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, dict,
2043 (op_errstr)? op_errstr:"");
2045 if (err_str[0] == '\0')
2046 snprintf (err_str, sizeof (err_str),
2047 "Operation failed");
2048 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
2052 GF_FREE (op_errstr);
2058 glusterd_handle_set_volume (rpcsvc_request_t *req)
2060 return glusterd_big_locked_handler (req, __glusterd_handle_set_volume);
2064 __glusterd_handle_sync_volume (rpcsvc_request_t *req)
2067 gf_cli_req cli_req = {{0,}};
2068 dict_t *dict = NULL;
2069 gf_cli_rsp cli_rsp = {0.};
2070 char msg[2048] = {0,};
2071 char *volname = NULL;
2072 gf1_cli_sync_volume flags = 0;
2073 char *hostname = NULL;
2074 xlator_t *this = NULL;
2080 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2082 //failed to decode msg;
2083 gf_msg (this->name, GF_LOG_ERROR, 0,
2084 GD_MSG_REQ_DECODE_FAIL, "%s", "Failed to decode "
2085 "request received from cli");
2086 req->rpc_err = GARBAGE_ARGS;
2090 if (cli_req.dict.dict_len) {
2091 /* Unserialize the dictionary */
2094 ret = dict_unserialize (cli_req.dict.dict_val,
2095 cli_req.dict.dict_len,
2098 gf_msg (this->name, GF_LOG_ERROR, 0,
2099 GD_MSG_DICT_UNSERIALIZE_FAIL,
2101 "unserialize req-buffer to dictionary");
2102 snprintf (msg, sizeof (msg), "Unable to decode the "
2106 dict->extra_stdfree = cli_req.dict.dict_val;
2110 ret = dict_get_str (dict, "hostname", &hostname);
2112 snprintf (msg, sizeof (msg), "Failed to get hostname");
2113 gf_msg (this->name, GF_LOG_ERROR, 0,
2114 GD_MSG_HOSTNAME_NOTFOUND_IN_DICT, "%s", msg);
2118 ret = dict_get_str (dict, "volname", &volname);
2120 ret = dict_get_int32 (dict, "flags", (int32_t*)&flags);
2122 snprintf (msg, sizeof (msg), "Failed to get volume name"
2124 gf_msg (this->name, GF_LOG_ERROR, 0,
2125 GD_MSG_FLAGS_NOTFOUND_IN_DICT, "%s", msg);
2130 gf_msg (this->name, GF_LOG_INFO, 0,
2131 GD_MSG_VOL_SYNC_REQ_RCVD, "Received volume sync req "
2132 "for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
2134 if (gf_is_local_addr (hostname)) {
2136 snprintf (msg, sizeof (msg), "sync from localhost"
2138 gf_msg (this->name, GF_LOG_ERROR, 0,
2139 GD_MSG_SYNC_FROM_LOCALHOST_UNALLOWED, "%s", msg);
2143 ret = glusterd_op_begin_synctask (req, GD_OP_SYNC_VOLUME, dict);
2147 cli_rsp.op_ret = -1;
2148 cli_rsp.op_errstr = msg;
2150 snprintf (msg, sizeof (msg), "Operation failed");
2151 glusterd_to_cli (req, &cli_rsp, NULL, 0, NULL,
2152 (xdrproc_t)xdr_gf_cli_rsp, dict);
2154 ret = 0; //sent error to cli, prevent second reply
2161 glusterd_handle_sync_volume (rpcsvc_request_t *req)
2163 return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume);
2167 glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret,
2168 char *op_errstr, dict_t *dict)
2172 gf1_cli_fsm_log_rsp rsp = {0};
2175 GF_ASSERT (op_errstr);
2177 rsp.op_ret = op_ret;
2178 rsp.op_errstr = op_errstr;
2179 if (rsp.op_ret == 0)
2180 ret = dict_allocate_and_serialize (dict, &rsp.fsm_log.fsm_log_val,
2181 &rsp.fsm_log.fsm_log_len);
2183 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2184 (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
2185 GF_FREE (rsp.fsm_log.fsm_log_val);
2187 gf_msg_debug ("glusterd", 0, "Responded, ret: %d", ret);
2193 __glusterd_handle_fsm_log (rpcsvc_request_t *req)
2196 gf1_cli_fsm_log_req cli_req = {0,};
2197 dict_t *dict = NULL;
2198 glusterd_sm_tr_log_t *log = NULL;
2199 xlator_t *this = NULL;
2200 glusterd_conf_t *conf = NULL;
2201 char msg[2048] = {0};
2202 glusterd_peerinfo_t *peerinfo = NULL;
2206 ret = xdr_to_generic (req->msg[0], &cli_req,
2207 (xdrproc_t)xdr_gf1_cli_fsm_log_req);
2209 //failed to decode msg;
2210 gf_msg (this->name, GF_LOG_ERROR, 0,
2211 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2212 "request received from client.");
2213 req->rpc_err = GARBAGE_ARGS;
2214 snprintf (msg, sizeof (msg), "Garbage request");
2224 if (strcmp ("", cli_req.name) == 0) {
2226 conf = this->private;
2227 ret = glusterd_sm_tr_log_add_to_dict (dict, &conf->op_sm_log);
2231 peerinfo = glusterd_peerinfo_find_by_hostname (cli_req.name);
2234 snprintf (msg, sizeof (msg), "%s is not a peer",
2237 ret = glusterd_sm_tr_log_add_to_dict
2238 (dict, &peerinfo->sm_log);
2245 (void)glusterd_fsm_log_send_resp (req, ret, msg, dict);
2246 free (cli_req.name);//malloced by xdr
2250 glusterd_friend_sm ();
2253 return 0;//send 0 to avoid double reply
2257 glusterd_handle_fsm_log (rpcsvc_request_t *req)
2259 return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log);
2263 glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
2266 gd1_mgmt_cluster_lock_rsp rsp = {{0},};
2270 glusterd_get_uuid (&rsp.uuid);
2271 rsp.op_ret = status;
2273 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2274 (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
2276 gf_msg_debug (THIS->name, 0, "Responded to lock, ret: %d", ret);
2282 glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
2285 gd1_mgmt_cluster_unlock_rsp rsp = {{0},};
2289 rsp.op_ret = status;
2290 glusterd_get_uuid (&rsp.uuid);
2292 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2293 (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
2295 gf_msg_debug (THIS->name, 0, "Responded to unlock, ret: %d", ret);
2301 glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
2305 gd1_mgmt_v3_lock_rsp rsp = {{0},};
2310 glusterd_get_uuid (&rsp.uuid);
2311 rsp.op_ret = status;
2313 rsp.op_errno = errno;
2314 gf_uuid_copy (rsp.txn_id, *txn_id);
2316 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2317 (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
2319 gf_msg_debug (THIS->name, 0, "Responded to mgmt_v3 lock, ret: %d",
2326 glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
2330 gd1_mgmt_v3_unlock_rsp rsp = {{0},};
2335 rsp.op_ret = status;
2337 rsp.op_errno = errno;
2338 glusterd_get_uuid (&rsp.uuid);
2339 gf_uuid_copy (rsp.txn_id, *txn_id);
2341 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2342 (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
2344 gf_msg_debug (THIS->name, 0, "Responded to mgmt_v3 unlock, ret: %d",
2351 __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
2353 gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
2355 glusterd_op_lock_ctx_t *ctx = NULL;
2356 xlator_t *this = NULL;
2357 uuid_t *txn_id = NULL;
2358 glusterd_conf_t *priv = NULL;
2362 priv = this->private;
2366 txn_id = &priv->global_txn_id;
2368 ret = xdr_to_generic (req->msg[0], &unlock_req,
2369 (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
2371 gf_msg (this->name, GF_LOG_ERROR, 0,
2372 GD_MSG_REQ_DECODE_FAIL, "Failed to decode unlock "
2373 "request received from peer");
2374 req->rpc_err = GARBAGE_ARGS;
2379 gf_msg_debug (this->name, 0,
2380 "Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
2383 ret = (glusterd_peerinfo_find_by_uuid (unlock_req.uuid) == NULL);
2386 gf_msg (this->name, GF_LOG_WARNING, 0,
2387 GD_MSG_PEER_NOT_FOUND, "%s doesn't "
2388 "belong to the cluster. Ignoring request.",
2389 uuid_utoa (unlock_req.uuid));
2394 ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
2398 gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
2399 GD_MSG_NO_MEMORY, "No memory.");
2402 gf_uuid_copy (ctx->uuid, unlock_req.uuid);
2406 ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);
2409 glusterd_friend_sm ();
2416 glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
2418 return glusterd_big_locked_handler (req,
2419 __glusterd_handle_cluster_unlock);
2423 glusterd_op_stage_send_resp (rpcsvc_request_t *req,
2424 int32_t op, int32_t status,
2425 char *op_errstr, dict_t *rsp_dict)
2427 gd1_mgmt_stage_op_rsp rsp = {{0},};
2429 xlator_t *this = NULL;
2435 rsp.op_ret = status;
2436 glusterd_get_uuid (&rsp.uuid);
2439 rsp.op_errstr = op_errstr;
2443 ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
2444 &rsp.dict.dict_len);
2446 gf_msg (this->name, GF_LOG_ERROR, 0,
2447 GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
2448 "failed to get serialized length of dict");
2452 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2453 (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
2455 gf_msg_debug (this->name, 0, "Responded to stage, ret: %d", ret);
2456 GF_FREE (rsp.dict.dict_val);
2462 glusterd_op_commit_send_resp (rpcsvc_request_t *req,
2463 int32_t op, int32_t status, char *op_errstr,
2466 gd1_mgmt_commit_op_rsp rsp = {{0}, };
2468 xlator_t *this = NULL;
2473 rsp.op_ret = status;
2474 glusterd_get_uuid (&rsp.uuid);
2478 rsp.op_errstr = op_errstr;
2483 ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
2484 &rsp.dict.dict_len);
2486 gf_msg (this->name, GF_LOG_ERROR, 0,
2487 GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
2488 "failed to get serialized length of dict");
2494 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2495 (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
2497 gf_msg_debug (this->name, 0, "Responded to commit, ret: %d", ret);
2500 GF_FREE (rsp.dict.dict_val);
2505 __glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
2508 gd1_mgmt_friend_req friend_req = {{0},};
2509 gf_boolean_t run_fsm = _gf_true;
2512 ret = xdr_to_generic (req->msg[0], &friend_req,
2513 (xdrproc_t)xdr_gd1_mgmt_friend_req);
2515 //failed to decode msg;
2516 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2517 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2518 "request received from friend");
2519 req->rpc_err = GARBAGE_ARGS;
2523 gf_msg ("glusterd", GF_LOG_INFO, 0,
2525 "Received probe from uuid: %s", uuid_utoa (friend_req.uuid));
2526 ret = glusterd_handle_friend_req (req, friend_req.uuid,
2527 friend_req.hostname, friend_req.port,
2530 if (ret == GLUSTERD_CONNECTION_AWAITED) {
2531 //fsm should be run after connection establishes
2532 run_fsm = _gf_false;
2537 free (friend_req.hostname);//malloced by xdr
2540 glusterd_friend_sm ();
2548 glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
2550 return glusterd_big_locked_handler (req,
2551 __glusterd_handle_incoming_friend_req);
2555 __glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
2558 gd1_mgmt_friend_req friend_req = {{0},};
2559 char remote_hostname[UNIX_PATH_MAX + 1] = {0,};
2562 ret = xdr_to_generic (req->msg[0], &friend_req,
2563 (xdrproc_t)xdr_gd1_mgmt_friend_req);
2565 //failed to decode msg;
2566 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2567 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2568 "request received.");
2569 req->rpc_err = GARBAGE_ARGS;
2573 gf_msg ("glusterd", GF_LOG_INFO, 0,
2574 GD_MSG_UNFRIEND_REQ_RCVD,
2575 "Received unfriend from uuid: %s", uuid_utoa (friend_req.uuid));
2577 ret = glusterd_remote_hostname_get (req, remote_hostname,
2578 sizeof (remote_hostname));
2580 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2581 GD_MSG_HOSTNAME_RESOLVE_FAIL,
2582 "Unable to get the remote hostname");
2585 ret = glusterd_handle_unfriend_req (req, friend_req.uuid,
2586 remote_hostname, friend_req.port);
2589 free (friend_req.hostname);//malloced by xdr
2590 free (friend_req.vols.vols_val);//malloced by xdr
2592 glusterd_friend_sm ();
2599 glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
2601 return glusterd_big_locked_handler (req,
2602 __glusterd_handle_incoming_unfriend_req);
2607 glusterd_handle_friend_update_delete (dict_t *dict)
2609 char *hostname = NULL;
2614 ret = dict_get_str (dict, "hostname", &hostname);
2618 ret = glusterd_friend_remove (NULL, hostname);
2621 gf_msg_debug ("glusterd", 0, "Returning %d", ret);
2626 glusterd_peer_hostname_update (glusterd_peerinfo_t *peerinfo,
2627 const char *hostname, gf_boolean_t store_update)
2631 GF_ASSERT (peerinfo);
2632 GF_ASSERT (hostname);
2634 ret = gd_add_address_to_peer (peerinfo, hostname);
2636 gf_msg (THIS->name, GF_LOG_ERROR, 0,
2637 GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
2638 "Couldn't add address to the peer info");
2643 ret = glusterd_store_peerinfo (peerinfo);
2645 gf_msg_debug (THIS->name, 0, "Returning %d", ret);
2650 __glusterd_handle_friend_update (rpcsvc_request_t *req)
2653 gd1_mgmt_friend_update friend_req = {{0},};
2654 glusterd_peerinfo_t *peerinfo = NULL;
2655 glusterd_conf_t *priv = NULL;
2656 xlator_t *this = NULL;
2657 gd1_mgmt_friend_update_rsp rsp = {{0},};
2658 dict_t *dict = NULL;
2659 char key[100] = {0,};
2660 char *uuid_buf = NULL;
2664 glusterd_peerctx_args_t args = {0};
2671 priv = this->private;
2674 ret = xdr_to_generic (req->msg[0], &friend_req,
2675 (xdrproc_t)xdr_gd1_mgmt_friend_update);
2677 //failed to decode msg;
2678 gf_msg (this->name, GF_LOG_ERROR, 0,
2679 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2680 "request received");
2681 req->rpc_err = GARBAGE_ARGS;
2687 if (glusterd_peerinfo_find (friend_req.uuid, NULL) == NULL) {
2692 gf_msg (this->name, GF_LOG_CRITICAL, 0,
2693 GD_MSG_REQ_FROM_UNKNOWN_PEER,
2694 "Received friend update request "
2695 "from unknown peer %s", uuid_utoa (friend_req.uuid));
2699 gf_msg ("glusterd", GF_LOG_INFO, 0,
2700 GD_MSG_FRIEND_UPDATE_RCVD,
2701 "Received friend update from uuid: %s", uuid_utoa (friend_req.uuid));
2703 if (friend_req.friends.friends_len) {
2704 /* Unserialize the dictionary */
2707 ret = dict_unserialize (friend_req.friends.friends_val,
2708 friend_req.friends.friends_len,
2711 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2712 GD_MSG_DICT_UNSERIALIZE_FAIL,
2714 "unserialize req-buffer to dictionary");
2717 dict->extra_stdfree = friend_req.friends.friends_val;
2721 ret = dict_get_int32 (dict, "count", &count);
2725 ret = dict_get_int32 (dict, "op", &op);
2729 if (GD_FRIEND_UPDATE_DEL == op) {
2730 ret = glusterd_handle_friend_update_delete (dict);
2734 args.mode = GD_MODE_ON;
2735 while ( i <= count) {
2736 memset (key, 0, sizeof (key));
2737 snprintf (key, sizeof (key), "friend%d.uuid", i);
2738 ret = dict_get_str (dict, key, &uuid_buf);
2741 gf_uuid_parse (uuid_buf, uuid);
2743 if (!gf_uuid_compare (uuid, MY_UUID)) {
2744 gf_msg (this->name, GF_LOG_INFO, 0,
2745 GD_MSG_UUID_RECEIVED,
2746 "Received my uuid as Friend");
2751 memset (key, 0, sizeof (key));
2752 snprintf (key, sizeof (key), "friend%d", i);
2755 peerinfo = glusterd_peerinfo_find (uuid, NULL);
2756 if (peerinfo == NULL) {
2757 /* Create a new peer and add it to the list as there is
2758 * no existing peer with the uuid
2760 peerinfo = gd_peerinfo_from_dict (dict, key);
2761 if (peerinfo == NULL) {
2763 gf_msg (this->name, GF_LOG_ERROR, 0,
2764 GD_MSG_PEERINFO_CREATE_FAIL,
2765 "Could not create peerinfo from dict "
2766 "for prefix %s", key);
2770 /* As this is a new peer, it should be added as a
2771 * friend. The friend state machine will take care of
2772 * correcting the state as required
2774 peerinfo->state.state = GD_FRIEND_STATE_BEFRIENDED;
2776 ret = glusterd_friend_add_from_peerinfo (peerinfo, 0,
2779 /* As an existing peer was found, update it with the new
2782 ret = gd_update_peerinfo_from_dict (peerinfo, dict,
2785 gf_msg (this->name, GF_LOG_ERROR, 0,
2786 GD_MSG_PEER_INFO_UPDATE_FAIL,
2788 "update peer %s", peerinfo->hostname);
2791 ret = glusterd_store_peerinfo (peerinfo);
2793 gf_msg (this->name, GF_LOG_ERROR, 0,
2794 GD_MSG_PEERINFO_CREATE_FAIL,
2795 "Failed to store peerinfo");
2807 gf_uuid_copy (rsp.uuid, MY_UUID);
2808 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2809 (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
2811 if (!dict->extra_stdfree && friend_req.friends.friends_val)
2812 free (friend_req.friends.friends_val);//malloced by xdr
2815 free (friend_req.friends.friends_val);//malloced by xdr
2819 glusterd_peerinfo_cleanup (peerinfo);
2821 glusterd_friend_sm ();
2828 glusterd_handle_friend_update (rpcsvc_request_t *req)
2830 return glusterd_big_locked_handler (req,
2831 __glusterd_handle_friend_update);
2835 __glusterd_handle_probe_query (rpcsvc_request_t *req)
2838 xlator_t *this = NULL;
2839 glusterd_conf_t *conf = NULL;
2840 gd1_mgmt_probe_req probe_req = {{0},};
2841 gd1_mgmt_probe_rsp rsp = {{0},};
2842 glusterd_peerinfo_t *peerinfo = NULL;
2843 glusterd_peerctx_args_t args = {0};
2845 char remote_hostname[UNIX_PATH_MAX + 1] = {0,};
2849 ret = xdr_to_generic (req->msg[0], &probe_req,
2850 (xdrproc_t)xdr_gd1_mgmt_probe_req);
2852 //failed to decode msg;
2853 gf_msg (this->name, GF_LOG_ERROR, 0,
2854 GD_MSG_REQ_DECODE_FAIL, "Failed to decode probe "
2856 req->rpc_err = GARBAGE_ARGS;
2862 conf = this->private;
2864 port = probe_req.port;
2866 port = GF_DEFAULT_BASE_PORT;
2868 gf_msg ("glusterd", GF_LOG_INFO, 0,
2870 "Received probe from uuid: %s", uuid_utoa (probe_req.uuid));
2872 /* Check for uuid collision and handle it in a user friendly way by
2873 * sending the error.
2875 if (!gf_uuid_compare (probe_req.uuid, MY_UUID)) {
2876 gf_msg (THIS->name, GF_LOG_ERROR, 0,
2877 GD_MSG_UUIDS_SAME_RETRY, "Peer uuid %s is same as "
2878 "local uuid. Please check the uuid of both the peers "
2879 "from %s/%s", uuid_utoa (probe_req.uuid),
2880 GLUSTERD_DEFAULT_WORKDIR, GLUSTERD_INFO_FILE);
2882 rsp.op_errno = GF_PROBE_SAME_UUID;
2887 ret = glusterd_remote_hostname_get (req, remote_hostname,
2888 sizeof (remote_hostname));
2890 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2891 GD_MSG_HOSTNAME_RESOLVE_FAIL,
2892 "Unable to get the remote hostname");
2897 peerinfo = glusterd_peerinfo_find (probe_req.uuid, remote_hostname);
2898 if ((peerinfo == NULL) && (!cds_list_empty (&conf->peers))) {
2900 rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
2901 } else if (peerinfo == NULL) {
2902 gf_msg ("glusterd", GF_LOG_INFO, 0,
2903 GD_MSG_PEER_NOT_FOUND,
2904 "Unable to find peerinfo"
2905 " for host: %s (%d)", remote_hostname, port);
2906 args.mode = GD_MODE_ON;
2907 ret = glusterd_friend_add (remote_hostname, port,
2908 GD_FRIEND_STATE_PROBE_RCVD,
2909 NULL, &peerinfo, 0, &args);
2911 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2912 GD_MSG_PEER_ADD_FAIL,
2913 "Failed to add peer %s",
2915 rsp.op_errno = GF_PROBE_ADD_FAILED;
2921 gf_uuid_copy (rsp.uuid, MY_UUID);
2923 rsp.hostname = probe_req.hostname;
2926 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2927 (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
2930 gf_msg ("glusterd", GF_LOG_INFO, 0,
2931 GD_MSG_RESPONSE_INFO, "Responded to %s, op_ret: %d, "
2932 "op_errno: %d, ret: %d", remote_hostname,
2933 rsp.op_ret, rsp.op_errno, ret);
2936 free (probe_req.hostname);//malloced by xdr
2938 glusterd_friend_sm ();
2944 int glusterd_handle_probe_query (rpcsvc_request_t *req)
2946 return glusterd_big_locked_handler (req, __glusterd_handle_probe_query);
2950 __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
2953 gf_cli_req cli_req = {{0,}};
2954 dict_t *dict = NULL;
2955 glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME;
2956 char *volname = NULL;
2958 char err_str[2048] = {0,};
2959 xlator_t *this = NULL;
2965 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2967 //failed to decode msg;
2968 gf_msg (this->name, GF_LOG_ERROR, 0,
2969 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2970 "request received from cli");
2971 req->rpc_err = GARBAGE_ARGS;
2975 if (cli_req.dict.dict_len > 0) {
2979 dict_unserialize (cli_req.dict.dict_val,
2980 cli_req.dict.dict_len, &dict);
2983 ret = dict_get_str (dict, "volname", &volname);
2985 snprintf (err_str, sizeof (err_str), "Unable to get volume "
2987 gf_msg (this->name, GF_LOG_ERROR, 0,
2988 GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
2992 gf_msg (this->name, GF_LOG_INFO, 0,
2993 GD_MSG_VOL_PROFILE_REQ_RCVD,
2994 "Received volume profile req "
2995 "for volume %s", volname);
2996 ret = dict_get_int32 (dict, "op", &op);
2998 snprintf (err_str, sizeof (err_str), "Unable to get operation");
2999 gf_msg (this->name, GF_LOG_ERROR, 0,
3000 GD_MSG_DICT_GET_FAILED, "%s", err_str);
3004 ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str));
3007 glusterd_friend_sm ();
3010 free (cli_req.dict.dict_val);
3013 if (err_str[0] == '\0')
3014 snprintf (err_str, sizeof (err_str),
3015 "Operation failed");
3016 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
3020 gf_msg_debug (this->name, 0, "Returning %d", ret);
3025 glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
3027 return glusterd_big_locked_handler (req,
3028 __glusterd_handle_cli_profile_volume);
3032 __glusterd_handle_getwd (rpcsvc_request_t *req)
3035 gf1_cli_getwd_rsp rsp = {0,};
3036 glusterd_conf_t *priv = NULL;
3040 priv = THIS->private;
3043 gf_msg ("glusterd", GF_LOG_INFO, 0,
3044 GD_MSG_GETWD_REQ_RCVD, "Received getwd req");
3046 rsp.wd = priv->workdir;
3048 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3049 (xdrproc_t)xdr_gf1_cli_getwd_rsp);
3052 glusterd_friend_sm ();
3059 glusterd_handle_getwd (rpcsvc_request_t *req)
3061 return glusterd_big_locked_handler (req, __glusterd_handle_getwd);
3065 __glusterd_handle_mount (rpcsvc_request_t *req)
3067 gf1_cli_mount_req mnt_req = {0,};
3068 gf1_cli_mount_rsp rsp = {0,};
3069 dict_t *dict = NULL;
3071 glusterd_conf_t *priv = NULL;
3074 priv = THIS->private;
3076 ret = xdr_to_generic (req->msg[0], &mnt_req,
3077 (xdrproc_t)xdr_gf1_cli_mount_req);
3079 //failed to decode msg;
3080 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3081 GD_MSG_REQ_DECODE_FAIL, "Failed to decode mount "
3082 "request received");
3083 req->rpc_err = GARBAGE_ARGS;
3085 rsp.op_errno = EINVAL;
3089 gf_msg ("glusterd", GF_LOG_INFO, 0,
3090 GD_MSG_MOUNT_REQ_RCVD,
3091 "Received mount req");
3093 if (mnt_req.dict.dict_len) {
3094 /* Unserialize the dictionary */
3097 ret = dict_unserialize (mnt_req.dict.dict_val,
3098 mnt_req.dict.dict_len,
3101 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3102 GD_MSG_DICT_UNSERIALIZE_FAIL,
3104 "unserialize req-buffer to dictionary");
3106 rsp.op_errno = -EINVAL;
3109 dict->extra_stdfree = mnt_req.dict.dict_val;
3113 synclock_unlock (&priv->big_lock);
3114 rsp.op_ret = glusterd_do_mount (mnt_req.label, dict,
3115 &rsp.path, &rsp.op_errno);
3116 synclock_lock (&priv->big_lock);
3122 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3123 (xdrproc_t)xdr_gf1_cli_mount_rsp);
3131 glusterd_friend_sm ();
3138 glusterd_handle_mount (rpcsvc_request_t *req)
3140 return glusterd_big_locked_handler (req, __glusterd_handle_mount);
3144 __glusterd_handle_umount (rpcsvc_request_t *req)
3146 gf1_cli_umount_req umnt_req = {0,};
3147 gf1_cli_umount_rsp rsp = {0,};
3148 char *mountbroker_root = NULL;
3149 char mntp[PATH_MAX] = {0,};
3151 runner_t runner = {0,};
3153 xlator_t *this = THIS;
3154 gf_boolean_t dir_ok = _gf_false;
3157 glusterd_conf_t *priv = NULL;
3161 priv = this->private;
3163 ret = xdr_to_generic (req->msg[0], &umnt_req,
3164 (xdrproc_t)xdr_gf1_cli_umount_req);
3166 //failed to decode msg;
3167 gf_msg (this->name, GF_LOG_ERROR, 0,
3168 GD_MSG_REQ_DECODE_FAIL, "Failed to decode umount"
3170 req->rpc_err = GARBAGE_ARGS;
3175 gf_msg ("glusterd", GF_LOG_INFO, 0,
3176 GD_MSG_UMOUNT_REQ_RCVD,
3177 "Received umount req");
3179 if (dict_get_str (this->options, "mountbroker-root",
3180 &mountbroker_root) != 0) {
3181 rsp.op_errno = ENOENT;
3185 /* check if it is allowed to umount path */
3186 path = gf_strdup (umnt_req.path);
3188 rsp.op_errno = ENOMEM;
3192 pdir = dirname (path);
3193 t = strtail (pdir, mountbroker_root);
3194 if (t && *t == '/') {
3195 t = strtail(++t, MB_HIVE);
3201 rsp.op_errno = EACCES;
3205 synclock_unlock (&priv->big_lock);
3207 if (umnt_req.lazy) {
3208 rsp.op_ret = gf_umount_lazy (this->name, umnt_req.path, 0);
3211 runner_add_args (&runner, _PATH_UMOUNT, umnt_req.path, NULL);
3212 rsp.op_ret = runner_run (&runner);
3215 synclock_lock (&priv->big_lock);
3216 if (rsp.op_ret == 0) {
3217 if (realpath (umnt_req.path, mntp))
3221 rsp.op_errno = errno;
3223 if (unlink (umnt_req.path) != 0) {
3225 rsp.op_errno = errno;
3233 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3234 (xdrproc_t)xdr_gf1_cli_umount_rsp);
3237 glusterd_friend_sm ();
3244 glusterd_handle_umount (rpcsvc_request_t *req)
3246 return glusterd_big_locked_handler (req, __glusterd_handle_umount);
3250 glusterd_friend_remove (uuid_t uuid, char *hostname)
3253 glusterd_peerinfo_t *peerinfo = NULL;
3257 peerinfo = glusterd_peerinfo_find (uuid, hostname);
3258 if (peerinfo == NULL) {
3263 ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
3265 gf_msg (THIS->name, GF_LOG_WARNING, 0,
3266 GD_MSG_VOL_CLEANUP_FAIL, "Volumes cleanup failed");
3268 /* Giving up the critical section here as glusterd_peerinfo_cleanup must
3269 * be called from outside a critical section
3271 ret = glusterd_peerinfo_cleanup (peerinfo);
3273 gf_msg_debug (THIS->name, 0, "returning %d", ret);
3278 glusterd_rpc_create (struct rpc_clnt **rpc,
3280 rpc_clnt_notify_t notify_fn,
3283 struct rpc_clnt *new_rpc = NULL;
3285 xlator_t *this = NULL;
3290 GF_ASSERT (options);
3292 /* TODO: is 32 enough? or more ? */
3293 new_rpc = rpc_clnt_new (options, this, this->name, 16);
3297 ret = rpc_clnt_register_notify (new_rpc, notify_fn, notify_data);
3301 ret = rpc_clnt_start (new_rpc);
3305 (void) rpc_clnt_unref (new_rpc);
3309 gf_msg_debug (this->name, 0, "returning %d", ret);
3314 glusterd_transport_keepalive_options_get (int *interval, int *time,
3318 xlator_t *this = NULL;
3323 ret = dict_get_int32 (this->options,
3324 "transport.socket.keepalive-interval",
3326 ret = dict_get_int32 (this->options,
3327 "transport.socket.keepalive-time",
3329 ret = dict_get_int32 (this->options,
3330 "transport.tcp-user-timeout",
3336 glusterd_transport_inet_options_build (dict_t **options, const char *hostname,
3339 dict_t *dict = NULL;
3340 int32_t interval = -1;
3342 int32_t timeout = -1;
3345 GF_ASSERT (options);
3346 GF_ASSERT (hostname);
3349 port = GLUSTERD_DEFAULT_PORT;
3351 /* Build default transport options */
3352 ret = rpc_transport_inet_options_build (&dict, hostname, port);
3356 /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long
3357 * when compared to 2 mins for cli timeout. This ensures users don't
3358 * wait too long after cli timesout before being able to resume normal
3361 ret = dict_set_int32 (dict, "frame-timeout", 600);
3363 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3364 GD_MSG_DICT_SET_FAILED,
3365 "Failed to set frame-timeout");
3369 /* Set keepalive options */
3370 glusterd_transport_keepalive_options_get (&interval, &time, &timeout);
3372 if ((interval > 0) || (time > 0))
3373 ret = rpc_transport_keepalive_options_set (dict, interval,
3377 gf_msg_debug ("glusterd", 0, "Returning %d", ret);
3382 glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
3383 glusterd_peerctx_args_t *args)
3385 dict_t *options = NULL;
3387 glusterd_peerctx_t *peerctx = NULL;
3388 data_t *data = NULL;
3390 peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t);
3395 peerctx->args = *args;
3397 gf_uuid_copy (peerctx->peerid, peerinfo->uuid);
3398 peerctx->peername = gf_strdup (peerinfo->hostname);
3399 peerctx->peerinfo_gen = peerinfo->generation; /* A peerinfos generation
3400 number can be used to
3404 ret = glusterd_transport_inet_options_build (&options,
3411 * For simulated multi-node testing, we need to make sure that we
3412 * create our RPC endpoint with the same address that the peer would
3415 if (this->options) {
3416 data = dict_get(this->options,"transport.socket.bind-address");
3418 ret = dict_set(options,
3419 "transport.socket.source-addr",data);
3421 data = dict_get(this->options,"ping-timeout");
3423 ret = dict_set(options,
3424 "ping-timeout",data);
3428 /* Enable encryption for the client connection if management encryption
3431 if (this->ctx->secure_mgmt) {
3432 ret = dict_set_str (options, "transport.socket.ssl-enabled",
3435 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3436 GD_MSG_DICT_SET_FAILED,
3437 "failed to set ssl-enabled in dict");
3442 ret = glusterd_rpc_create (&peerinfo->rpc, options,
3443 glusterd_peer_rpc_notify, peerctx);
3445 gf_msg (this->name, GF_LOG_ERROR, 0,
3446 GD_MSG_RPC_CREATE_FAIL,
3447 "failed to create rpc for"
3448 " peer %s", peerinfo->hostname);
3459 glusterd_friend_add (const char *hoststr, int port,
3460 glusterd_friend_sm_state_t state,
3462 glusterd_peerinfo_t **friend,
3463 gf_boolean_t restore,
3464 glusterd_peerctx_args_t *args)
3467 xlator_t *this = NULL;
3468 glusterd_conf_t *conf = NULL;
3471 conf = this->private;
3473 GF_ASSERT (hoststr);
3476 *friend = glusterd_peerinfo_new (state, uuid, hoststr, port);
3477 if (*friend == NULL) {
3483 * We can't add to the list after calling glusterd_friend_rpc_create,
3484 * even if it succeeds, because by then the callback to take it back
3485 * off and free might have happened already (notably in the case of an
3486 * invalid peer name). That would mean we're adding something that had
3487 * just been free, and we're likely to crash later.
3489 cds_list_add_tail_rcu (&(*friend)->uuid_list, &conf->peers);
3491 //restore needs to first create the list of peers, then create rpcs
3492 //to keep track of quorum in race-free manner. In restore for each peer
3493 //rpc-create calls rpc_notify when the friend-list is partially
3494 //constructed, leading to wrong quorum calculations.
3496 ret = glusterd_store_peerinfo (*friend);
3498 ret = glusterd_friend_rpc_create (this, *friend, args);
3501 gf_msg (this->name, GF_LOG_ERROR, 0,
3502 GD_MSG_PEERINFO_CREATE_FAIL,
3503 "Failed to store peerinfo");
3508 (void) glusterd_peerinfo_cleanup (*friend);
3513 gf_msg (this->name, GF_LOG_INFO, 0,
3514 GD_MSG_CONNECT_RETURNED, "connect returned %d", ret);
3518 /* glusterd_friend_add_from_peerinfo() adds a new peer into the local friends
3519 * list from a pre created @peerinfo object. It otherwise works similarly to
3520 * glusterd_friend_add()
3523 glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend,
3524 gf_boolean_t restore,
3525 glusterd_peerctx_args_t *args)
3528 xlator_t *this = NULL;
3529 glusterd_conf_t *conf = NULL;
3532 conf = this->private;
3535 GF_VALIDATE_OR_GOTO (this->name, (friend != NULL), out);
3538 * We can't add to the list after calling glusterd_friend_rpc_create,
3539 * even if it succeeds, because by then the callback to take it back
3540 * off and free might have happened already (notably in the case of an
3541 * invalid peer name). That would mean we're adding something that had
3542 * just been free, and we're likely to crash later.
3544 cds_list_add_tail_rcu (&friend->uuid_list, &conf->peers);
3546 //restore needs to first create the list of peers, then create rpcs
3547 //to keep track of quorum in race-free manner. In restore for each peer
3548 //rpc-create calls rpc_notify when the friend-list is partially
3549 //constructed, leading to wrong quorum calculations.
3551 ret = glusterd_store_peerinfo (friend);
3553 ret = glusterd_friend_rpc_create (this, friend, args);
3556 gf_msg (this->name, GF_LOG_ERROR, 0,
3557 GD_MSG_PEERINFO_CREATE_FAIL,
3558 "Failed to store peerinfo");
3563 gf_msg (this->name, GF_LOG_INFO, 0,
3564 GD_MSG_CONNECT_RETURNED,
3565 "connect returned %d", ret);
3570 glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
3571 dict_t *dict, int *op_errno)
3574 glusterd_peerinfo_t *peerinfo = NULL;
3575 glusterd_peerctx_args_t args = {0};
3576 glusterd_friend_sm_event_t *event = NULL;
3578 GF_ASSERT (hoststr);
3581 peerinfo = glusterd_peerinfo_find (NULL, hoststr);
3583 if (peerinfo == NULL) {
3584 gf_msg ("glusterd", GF_LOG_INFO, 0,
3585 GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
3586 " for host: %s (%d)", hoststr, port);
3587 args.mode = GD_MODE_ON;
3590 ret = glusterd_friend_add (hoststr, port,
3591 GD_FRIEND_STATE_DEFAULT,
3592 NULL, &peerinfo, 0, &args);
3593 if ((!ret) && (!peerinfo->connected)) {
3594 ret = GLUSTERD_CONNECTION_AWAITED;
3597 } else if (peerinfo->connected &&
3598 (GD_FRIEND_STATE_BEFRIENDED == peerinfo->state.state)) {
3599 if (peerinfo->detaching) {
3602 *op_errno = GF_PROBE_FRIEND_DETACHING;
3605 ret = glusterd_peer_hostname_update (peerinfo, hoststr,
3609 //this is just to rename so inject local acc for cluster update
3610 ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_LOCAL_ACC,
3613 event->peername = gf_strdup (peerinfo->hostname);
3614 gf_uuid_copy (event->peerid, peerinfo->uuid);
3616 ret = glusterd_friend_sm_inject_event (event);
3617 glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS,
3618 NULL, (char*)hoststr,
3622 glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
3623 (char*)hoststr, port, dict);
3628 gf_msg_debug ("glusterd", 0, "returning %d", ret);
3633 glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
3634 uuid_t uuid, dict_t *dict, int *op_errno)
3637 glusterd_peerinfo_t *peerinfo = NULL;
3638 glusterd_friend_sm_event_t *event = NULL;
3639 glusterd_probe_ctx_t *ctx = NULL;
3641 GF_ASSERT (hoststr);
3646 peerinfo = glusterd_peerinfo_find (uuid, hoststr);
3647 if (peerinfo == NULL) {
3649 gf_msg ("glusterd", GF_LOG_INFO, 0,
3650 GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
3651 " for host: %s %d", hoststr, port);
3655 if (!peerinfo->rpc) {
3660 if (peerinfo->detaching) {
3663 *op_errno = GF_DEPROBE_FRIEND_DETACHING;
3667 ret = glusterd_friend_sm_new_event
3668 (GD_FRIEND_EVENT_INIT_REMOVE_FRIEND, &event);
3671 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3672 GD_MSG_EVENT_NEW_GET_FAIL,
3673 "Unable to get new event");
3677 ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
3683 ctx->hostname = gf_strdup (hoststr);
3690 event->peername = gf_strdup (hoststr);
3691 gf_uuid_copy (event->peerid, uuid);
3693 ret = glusterd_friend_sm_inject_event (event);
3696 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3697 GD_MSG_EVENT_INJECT_FAIL, "Unable to inject event %d, "
3698 "ret = %d", event->event, ret);
3701 peerinfo->detaching = _gf_true;
3710 glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int port)
3712 gd1_mgmt_friend_rsp rsp = {{0}, };
3714 xlator_t *this = NULL;
3715 glusterd_conf_t *conf = NULL;
3717 GF_ASSERT (hostname);
3723 conf = this->private;
3725 gf_uuid_copy (rsp.uuid, MY_UUID);
3726 rsp.hostname = hostname;
3728 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3729 (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3731 gf_msg ("glusterd", GF_LOG_INFO, 0,
3732 GD_MSG_RESPONSE_INFO,
3733 "Responded to %s (%d), ret: %d", hostname, port, ret);
3739 glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
3740 char *remote_hostname, int port, int32_t op_ret,
3743 gd1_mgmt_friend_rsp rsp = {{0}, };
3745 xlator_t *this = NULL;
3746 glusterd_conf_t *conf = NULL;
3748 GF_ASSERT (myhostname);
3753 conf = this->private;
3755 gf_uuid_copy (rsp.uuid, MY_UUID);
3756 rsp.op_ret = op_ret;
3757 rsp.op_errno = op_errno;
3758 rsp.hostname = gf_strdup (myhostname);
3761 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3762 (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3764 gf_msg ("glusterd", GF_LOG_INFO, 0,
3765 GD_MSG_RESPONSE_INFO,
3766 "Responded to %s (%d), ret: %d", remote_hostname, port, ret);
3767 GF_FREE (rsp.hostname);
3772 set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
3773 size_t len, char *hostname, int port)
3775 if ((op_errstr) && (strcmp (op_errstr, ""))) {
3776 snprintf (errstr, len, "%s", op_errstr);
3782 case GF_PROBE_LOCALHOST:
3783 snprintf (errstr, len, "Probe on localhost not "
3787 case GF_PROBE_FRIEND:
3788 snprintf (errstr, len, "Host %s port %d already"
3789 " in peer list", hostname, port);
3792 case GF_PROBE_FRIEND_DETACHING:
3793 snprintf (errstr, len, "Peer is already being "
3794 "detached from cluster.\n"
3795 "Check peer status by running "
3796 "gluster peer status");
3800 snprintf (errstr, len, "Probe returned "
3802 strerror (op_errno));
3807 case GF_PROBE_ANOTHER_CLUSTER:
3808 snprintf (errstr, len, "%s is already part of "
3809 "another cluster", hostname);
3812 case GF_PROBE_VOLUME_CONFLICT:
3813 snprintf (errstr, len, "Atleast one volume on "
3814 "%s conflicts with existing volumes "
3815 "in the cluster", hostname);
3818 case GF_PROBE_UNKNOWN_PEER:
3819 snprintf (errstr, len, "%s responded with "
3820 "'unknown peer' error, this could "
3821 "happen if %s doesn't have localhost "
3822 "in its peer database", hostname,
3826 case GF_PROBE_ADD_FAILED:
3827 snprintf (errstr, len, "Failed to add peer "
3828 "information on %s", hostname);
3831 case GF_PROBE_SAME_UUID:
3832 snprintf (errstr, len, "Peer uuid (host %s) is "
3833 "same as local uuid", hostname);
3836 case GF_PROBE_QUORUM_NOT_MET:
3837 snprintf (errstr, len, "Cluster quorum is not "
3838 "met. Changing peers is not allowed "
3842 case GF_PROBE_MISSED_SNAP_CONFLICT:
3843 snprintf (errstr, len, "Failed to update "
3844 "list of missed snapshots from "
3845 "peer %s", hostname);
3848 case GF_PROBE_SNAP_CONFLICT:
3849 snprintf (errstr, len, "Conflict in comparing "
3850 "list of snapshots from "
3851 "peer %s", hostname);
3855 snprintf (errstr, len, "Probe returned with "
3856 "%s", strerror (op_errno));
3863 glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
3864 int32_t op_errno, char *op_errstr, char *hostname,
3865 int port, dict_t *dict)
3867 gf_cli_rsp rsp = {0,};
3869 char errstr[2048] = {0,};
3870 char *cmd_str = NULL;
3871 xlator_t *this = THIS;
3876 (void) set_probe_error_str (op_ret, op_errno, op_errstr, errstr,
3877 sizeof (errstr), hostname, port);
3880 ret = dict_get_str (dict, "cmd-str", &cmd_str);
3882 gf_msg (this->name, GF_LOG_ERROR, 0,
3883 GD_MSG_CMDSTR_NOTFOUND_IN_DICT, "Failed to get "
3887 rsp.op_ret = op_ret;
3888 rsp.op_errno = op_errno;
3889 rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
3891 gf_cmd_log ("", "%s : %s %s %s", cmd_str,
3892 (op_ret) ? "FAILED" : "SUCCESS",
3893 (errstr[0] != '\0') ? ":" : " ",
3894 (errstr[0] != '\0') ? errstr : " ");
3896 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3897 (xdrproc_t)xdr_gf_cli_rsp);
3901 gf_msg_debug (this->name, 0, "Responded to CLI, ret: %d", ret);
3907 set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
3908 size_t len, char *hostname)
3910 if ((op_errstr) && (strcmp (op_errstr, ""))) {
3911 snprintf (errstr, len, "%s", op_errstr);
3917 case GF_DEPROBE_LOCALHOST:
3918 snprintf (errstr, len, "%s is localhost",
3922 case GF_DEPROBE_NOT_FRIEND:
3923 snprintf (errstr, len, "%s is not part of "
3924 "cluster", hostname);
3927 case GF_DEPROBE_BRICK_EXIST:
3928 snprintf (errstr, len, "Brick(s) with the peer "
3929 "%s exist in cluster", hostname);
3932 case GF_DEPROBE_FRIEND_DOWN:
3933 snprintf (errstr, len, "One of the peers is "
3934 "probably down. Check with "
3938 case GF_DEPROBE_QUORUM_NOT_MET:
3939 snprintf (errstr, len, "Cluster quorum is not "
3940 "met. Changing peers is not allowed "
3944 case GF_DEPROBE_FRIEND_DETACHING:
3945 snprintf (errstr, len, "Peer is already being "
3946 "detached from cluster.\n"
3947 "Check peer status by running "
3948 "gluster peer status");
3951 snprintf (errstr, len, "Detach returned with "
3952 "%s", strerror (op_errno));
3961 glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret,
3962 int32_t op_errno, char *op_errstr,
3963 char *hostname, dict_t *dict)
3965 gf_cli_rsp rsp = {0,};
3967 char *cmd_str = NULL;
3968 char errstr[2048] = {0,};
3972 (void) set_deprobe_error_str (op_ret, op_errno, op_errstr, errstr,
3973 sizeof (errstr), hostname);
3976 ret = dict_get_str (dict, "cmd-str", &cmd_str);
3978 gf_msg (THIS->name, GF_LOG_ERROR, 0,
3979 GD_MSG_CMDSTR_NOTFOUND_IN_DICT, "Failed to get "
3983 rsp.op_ret = op_ret;
3984 rsp.op_errno = op_errno;
3985 rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
3987 gf_cmd_log ("", "%s : %s %s %s", cmd_str,
3988 (op_ret) ? "FAILED" : "SUCCESS",
3989 (errstr[0] != '\0') ? ":" : " ",
3990 (errstr[0] != '\0') ? errstr : " ");
3992 ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3993 (xdrproc_t)xdr_gf_cli_rsp);
3995 gf_msg_debug (THIS->name, 0, "Responded to CLI, ret: %d", ret);
4001 glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4004 glusterd_conf_t *priv = NULL;
4005 glusterd_peerinfo_t *entry = NULL;
4007 dict_t *friends = NULL;
4008 gf1_cli_peer_list_rsp rsp = {0,};
4009 char my_uuid_str[64] = {0,};
4010 char key[256] = {0,};
4012 priv = THIS->private;
4015 friends = dict_new ();
4017 gf_msg (THIS->name, GF_LOG_ERROR, ENOMEM,
4018 GD_MSG_NO_MEMORY, "Out of Memory");
4022 /* Reset ret to 0, needed to prevent failure incase no peers exist */
4025 if (!cds_list_empty (&priv->peers)) {
4026 cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
4028 ret = gd_add_peer_detail_to_dict (entry,
4039 if (flags == GF_CLI_LIST_POOL_NODES) {
4041 snprintf (key, 256, "friend%d.uuid", count);
4042 uuid_utoa_r (MY_UUID, my_uuid_str);
4043 ret = dict_set_str (friends, key, my_uuid_str);
4047 snprintf (key, 256, "friend%d.hostname", count);
4048 ret = dict_set_str (friends, key, "localhost");
4052 snprintf (key, 256, "friend%d.connected", count);
4053 ret = dict_set_int32 (friends, key, 1);
4058 ret = dict_set_int32 (friends, "count", count);
4062 ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val,
4063 &rsp.friends.friends_len);
4072 dict_unref (friends);
4076 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
4077 (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
4079 GF_FREE (rsp.friends.friends_val);
4085 glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4088 glusterd_conf_t *priv = NULL;
4089 glusterd_volinfo_t *entry = NULL;
4091 dict_t *volumes = NULL;
4092 gf_cli_rsp rsp = {0,};
4093 char *volname = NULL;
4095 priv = THIS->private;
4098 volumes = dict_new ();
4100 gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
4101 GD_MSG_NO_MEMORY, "Out of Memory");
4105 if (cds_list_empty (&priv->volumes)) {
4110 if (flags == GF_CLI_GET_VOLUME_ALL) {
4111 cds_list_for_each_entry (entry, &priv->volumes, vol_list) {
4112 ret = glusterd_add_volume_detail_to_dict (entry,
4121 } else if (flags == GF_CLI_GET_NEXT_VOLUME) {
4122 ret = dict_get_str (dict, "volname", &volname);
4125 if (priv->volumes.next) {
4126 entry = cds_list_entry (priv->volumes.next,
4131 ret = glusterd_volinfo_find (volname, &entry);
4134 entry = cds_list_entry (entry->vol_list.next,
4139 if (&entry->vol_list == &priv->volumes) {
4142 ret = glusterd_add_volume_detail_to_dict (entry,
4149 } else if (flags == GF_CLI_GET_VOLUME) {
4150 ret = dict_get_str (dict, "volname", &volname);
4154 ret = glusterd_volinfo_find (volname, &entry);
4158 ret = glusterd_add_volume_detail_to_dict (entry,
4167 ret = dict_set_int32 (volumes, "count", count);
4170 ret = dict_allocate_and_serialize (volumes, &rsp.dict.dict_val,
4171 &rsp.dict.dict_len);
4181 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
4182 (xdrproc_t)xdr_gf_cli_rsp);
4186 dict_unref (volumes);
4188 GF_FREE (rsp.dict.dict_val);
4193 __glusterd_handle_status_volume (rpcsvc_request_t *req)
4197 dict_t *dict = NULL;
4199 gf_cli_req cli_req = {{0,}};
4200 glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
4201 char err_str[2048] = {0,};
4202 xlator_t *this = NULL;
4203 glusterd_conf_t *conf = NULL;
4208 conf = this->private;
4211 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4213 //failed to decode msg;
4214 gf_msg (this->name, GF_LOG_ERROR, 0,
4215 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
4216 "request received from cli");
4217 req->rpc_err = GARBAGE_ARGS;
4221 if (cli_req.dict.dict_len > 0) {
4225 ret = dict_unserialize (cli_req.dict.dict_val,
4226 cli_req.dict.dict_len, &dict);
4228 gf_msg (this->name, GF_LOG_ERROR, 0,
4229 GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
4230 "unserialize buffer");
4231 snprintf (err_str, sizeof (err_str), "Unable to decode "
4238 ret = dict_get_uint32 (dict, "cmd", &cmd);
4242 if (!(cmd & GF_CLI_STATUS_ALL)) {
4243 ret = dict_get_str (dict, "volname", &volname);
4245 snprintf (err_str, sizeof (err_str), "Unable to get "
4247 gf_msg (this->name, GF_LOG_ERROR, 0,
4248 GD_MSG_VOL_NOT_FOUND, "%s", err_str);
4251 gf_msg (this->name, GF_LOG_INFO, 0,
4252 GD_MSG_STATUS_VOL_REQ_RCVD,
4253 "Received status volume req for volume %s", volname);
4256 if ((cmd & GF_CLI_STATUS_QUOTAD) &&
4257 (conf->op_version == GD_OP_VERSION_MIN)) {
4258 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4259 "at version 1. Getting the status of quotad is not "
4260 "allowed in this state.");
4265 if ((cmd & GF_CLI_STATUS_SNAPD) &&
4266 (conf->op_version < GD_OP_VERSION_3_6_0)) {
4267 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4268 "at a lesser version than %d. Getting the status of "
4269 "snapd is not allowed in this state",
4270 GD_OP_VERSION_3_6_0);
4275 if ((cmd & GF_CLI_STATUS_BITD) &&
4276 (conf->op_version < GD_OP_VERSION_3_7_0)) {
4277 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4278 "at a lesser version than %d. Getting the status of "
4279 "bitd is not allowed in this state",
4280 GD_OP_VERSION_3_7_0);
4285 if ((cmd & GF_CLI_STATUS_SCRUB) &&
4286 (conf->op_version < GD_OP_VERSION_3_7_0)) {
4287 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4288 "at a lesser version than %d. Getting the status of "
4289 "scrub is not allowed in this state",
4290 GD_OP_VERSION_3_7_0);
4295 ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
4300 if (err_str[0] == '\0')
4301 snprintf (err_str, sizeof (err_str),
4302 "Operation failed");
4303 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
4306 free (cli_req.dict.dict_val);
4312 glusterd_handle_status_volume (rpcsvc_request_t *req)
4314 return glusterd_big_locked_handler (req,
4315 __glusterd_handle_status_volume);
4319 __glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
4322 gf_cli_req cli_req = {{0,}};
4323 glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
4324 char *volname = NULL;
4325 dict_t *dict = NULL;
4326 char err_str[2048] = {0,};
4327 xlator_t *this = NULL;
4334 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4336 gf_msg (this->name, GF_LOG_ERROR, 0,
4337 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
4338 "request received from cli");
4339 req->rpc_err = GARBAGE_ARGS;
4343 if (cli_req.dict.dict_len) {
4346 ret = dict_unserialize (cli_req.dict.dict_val,
4347 cli_req.dict.dict_len,
4350 gf_msg (this->name, GF_LOG_ERROR, 0,
4351 GD_MSG_DICT_UNSERIALIZE_FAIL,
4352 "failed to unserialize req-buffer to"
4354 snprintf (err_str, sizeof (err_str), "unable to decode "
4361 gf_msg (this->name, GF_LOG_ERROR, 0,
4362 GD_MSG_CLI_REQ_EMPTY, "Empty cli request.");
4366 ret = dict_get_str (dict, "volname", &volname);
4368 snprintf (err_str, sizeof (err_str), "Unable to get volume "
4370 gf_msg (this->name, GF_LOG_ERROR, 0,
4371 GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
4375 gf_msg (this->name, GF_LOG_INFO, 0,
4376 GD_MSG_CLRCLK_VOL_REQ_RCVD, "Received clear-locks volume req "
4377 "for volume %s", volname);
4379 ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict);
4383 if (err_str[0] == '\0')
4384 snprintf (err_str, sizeof (err_str),
4385 "Operation failed");
4386 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
4389 free (cli_req.dict.dict_val);
4395 glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
4397 return glusterd_big_locked_handler (req,
4398 __glusterd_handle_cli_clearlocks_volume);
4402 get_volinfo_from_brickid (char *brickid, glusterd_volinfo_t **volinfo)
4405 char *volid_str = NULL;
4407 char *brickid_dup = NULL;
4409 xlator_t *this = NULL;
4413 GF_ASSERT (brickid);
4415 brickid_dup = gf_strdup (brickid);
4419 volid_str = brickid_dup;
4420 brick = strchr (brickid_dup, ':');
4422 gf_msg (this->name, GF_LOG_ERROR, 0,
4423 GD_MSG_BRICK_NOT_FOUND,
4430 gf_uuid_parse (volid_str, volid);
4431 ret = glusterd_volinfo_find_by_volume_id (volid, volinfo);
4433 /* Check if it is a snapshot volume */
4434 ret = glusterd_snap_volinfo_find_by_volume_id (volid, volinfo);
4436 gf_msg (this->name, GF_LOG_WARNING, 0,
4437 GD_MSG_VOLINFO_GET_FAIL,
4438 "Failed to find volinfo");
4445 GF_FREE (brickid_dup);
4450 __glusterd_handle_barrier (rpcsvc_request_t *req)
4453 xlator_t *this = NULL;
4454 gf_cli_req cli_req = {{0,}};
4455 dict_t *dict = NULL;
4456 char *volname = NULL;
4462 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4464 gf_msg (this->name, GF_LOG_ERROR, 0,
4465 GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
4466 "request received from cli");
4467 req->rpc_err = GARBAGE_ARGS;
4471 if (!cli_req.dict.dict_len) {
4481 ret = dict_unserialize (cli_req.dict.dict_val, cli_req.dict.dict_len,
4484 gf_msg (this->name, GF_LOG_ERROR, 0,
4485 GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to unserialize "
4486 "request dictionary.");
4490 ret = dict_get_str (dict, "volname", &volname);
4492 gf_msg (this->name, GF_LOG_ERROR, 0,
4493 GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4494 "Volname not present in "
4498 gf_msg (this->name, GF_LOG_INFO, 0,
4499 GD_MSG_BARRIER_VOL_REQ_RCVD,
4500 "Received barrier volume request for "
4501 "volume %s", volname);
4503 ret = glusterd_op_begin_synctask (req, GD_OP_BARRIER, dict);
4507 ret = glusterd_op_send_cli_response (GD_OP_BARRIER, ret, 0, req,
4508 dict, "Operation failed");
4510 free (cli_req.dict.dict_val);
4515 glusterd_handle_barrier (rpcsvc_request_t *req)
4517 return glusterd_big_locked_handler (req, __glusterd_handle_barrier);
4521 glusterd_get_volume_opts (rpcsvc_request_t *req, dict_t *dict)
4527 char *orig_key = NULL;
4528 char *key_fixed = NULL;
4529 char *volname = NULL;
4530 char err_str[2048] = {0,};
4531 char dict_key[50] = {0,};
4532 xlator_t *this = NULL;
4533 glusterd_conf_t *priv = NULL;
4534 glusterd_volinfo_t *volinfo = NULL;
4535 gf_cli_rsp rsp = {0,};
4536 char op_version_buff[10] = {0,};
4541 priv = this->private;
4547 ret = dict_get_str (dict, "volname", &volname);
4549 snprintf (err_str, sizeof (err_str), "Failed to get volume "
4550 "name while handling get volume option command");
4551 gf_msg (this->name, GF_LOG_ERROR, 0,
4552 GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
4556 ret = dict_get_str (dict, "key", &key);
4558 snprintf (err_str, sizeof (err_str), "Failed to get key "
4559 "while handling get volume option for %s", volname);
4560 gf_msg (this->name, GF_LOG_ERROR, 0,
4561 GD_MSG_DICT_GET_FAILED, "%s", err_str);
4564 gf_msg_debug (this->name, 0, "Received get volume opt request for "
4565 "volume %s", volname);
4567 ret = glusterd_volinfo_find (volname, &volinfo);
4569 snprintf (err_str, sizeof(err_str),
4570 FMTSTR_CHECK_VOL_EXISTS, volname);
4571 gf_msg (this->name, GF_LOG_ERROR, 0,
4572 GD_MSG_VOL_NOT_FOUND, FMTSTR_CHECK_VOL_EXISTS,
4576 if (strcmp(key, "all")) {
4577 exists = glusterd_check_option_exists (key, &key_fixed);
4579 snprintf (err_str, sizeof (err_str), "Option "
4580 "with name: %s does not exist", key);
4581 gf_msg (this->name, GF_LOG_ERROR, EINVAL,
4582 GD_MSG_UNKNOWN_KEY, "%s",
4585 snprintf (err_str + ret,
4586 sizeof (err_str) - ret,
4596 if (strcmp (key, "cluster.op-version") == 0) {
4597 sprintf (dict_key, "key%d", count);
4598 ret = dict_set_str(dict, dict_key, key);
4600 gf_msg (this->name, GF_LOG_ERROR, 0,
4601 GD_MSG_DICT_SET_FAILED, "Failed to "
4602 "set %s in dictionary", key);
4605 sprintf (dict_key, "value%d", count);
4606 sprintf (op_version_buff, "%d", priv->op_version);
4607 ret = dict_set_str (dict, dict_key, op_version_buff);
4609 gf_msg (this->name, GF_LOG_ERROR, 0,
4610 GD_MSG_DICT_SET_FAILED, "Failed to "
4611 "set value for key %s in dictionary",
4616 else if (strcmp (key, "config.memory-accounting") == 0) {
4617 sprintf (dict_key, "key%d", count);
4618 ret = dict_set_str(dict, dict_key, key);
4620 gf_msg (this->name, GF_LOG_ERROR, 0,
4621 GD_MSG_DICT_SET_FAILED, "Failed to "
4622 "set %s in dictionary", key);
4625 sprintf (dict_key, "value%d", count);
4627 if (volinfo->memory_accounting)
4628 ret = dict_set_str(dict, dict_key,"Enabled");
4630 ret = dict_set_str(dict, dict_key,"Disabled");
4632 gf_msg (this->name, GF_LOG_ERROR, 0,
4633 GD_MSG_DICT_SET_FAILED, "Failed to "
4634 "set value for key %s in dictionary",
4639 else if (strcmp (key, "config.transport") == 0) {
4640 sprintf (dict_key, "key%d", count);
4641 ret = dict_set_str(dict, dict_key, key);
4643 gf_msg (this->name, GF_LOG_ERROR, 0,
4644 GD_MSG_DICT_SET_FAILED, "Failed to "
4645 "set %s in dictionary", key);
4648 sprintf (dict_key, "value%d", count);
4650 if (volinfo->transport_type == GF_TRANSPORT_RDMA)
4651 ret = dict_set_str(dict, dict_key,"rdma");
4652 else if (volinfo->transport_type == GF_TRANSPORT_TCP)
4653 ret = dict_set_str(dict, dict_key,"tcp");
4654 else if (volinfo->transport_type ==
4655 GF_TRANSPORT_BOTH_TCP_RDMA)
4656 ret = dict_set_str(dict, dict_key,"tcp,rdma");
4658 ret = dict_set_str(dict, dict_key,"none");
4661 gf_msg (this->name, GF_LOG_ERROR, 0,
4662 GD_MSG_DICT_SET_FAILED, "Failed to "
4663 "set value for key %s in dictionary",
4669 ret = glusterd_get_default_val_for_volopt (dict,
4674 if (ret && !rsp.op_errstr) {
4675 snprintf (err_str, sizeof(err_str),
4676 "Failed to fetch the value of"
4677 " %s, check log file for more"
4682 /* Request is for a single option, explicitly set count to 1
4683 * in the dictionary.
4685 ret = dict_set_int32 (dict, "count", 1);
4687 gf_msg (this->name, GF_LOG_ERROR, errno,
4688 GD_MSG_DICT_SET_FAILED, "Failed to set count "
4689 "value in the dictionary");
4693 /* Handle the "all" volume option request */
4694 ret = glusterd_get_default_val_for_volopt (dict, _gf_true, NULL,
4695 NULL, volinfo->dict,
4697 if (ret && !rsp.op_errstr) {
4698 snprintf (err_str, sizeof(err_str),
4699 "Failed to fetch the value of all volume "
4700 "options, check log file for more details");
4708 rsp.op_errstr = err_str;
4716 ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
4717 &rsp.dict.dict_len);
4719 glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
4720 (xdrproc_t)xdr_gf_cli_rsp);
4725 __glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
4728 gf_cli_req cli_req = {{0,}};
4729 dict_t *dict = NULL;
4730 char err_str[2048] = {0,};
4731 xlator_t *this = NULL;
4738 ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4740 snprintf (err_str, sizeof (err_str), "Failed to decode "
4741 "request received from cli");
4742 gf_msg (this->name, GF_LOG_ERROR, 0,
4743 GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
4744 req->rpc_err = GARBAGE_ARGS;
4748 if (cli_req.dict.dict_len) {
4749 /* Unserialize the dictionary */
4752 ret = dict_unserialize (cli_req.dict.dict_val,
4753 cli_req.dict.dict_len,
4756 gf_msg (this->name, GF_LOG_ERROR, 0,
4757 GD_MSG_DICT_UNSERIALIZE_FAIL,
4759 "unserialize req-buffer to dictionary");
4760 snprintf (err_str, sizeof (err_str), "Unable to decode "
4764 dict->extra_stdfree = cli_req.dict.dict_val;
4767 ret = glusterd_get_volume_opts (req, dict);
4777 glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
4779 return glusterd_big_locked_handler (req, __glusterd_handle_get_vol_opt);
4782 get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
4784 glusterd_volinfo_t *volinfo = NULL;
4785 char *volid_str = NULL;
4787 char *brickid_dup = NULL;
4791 brickid_dup = gf_strdup (brickid);
4795 volid_str = brickid_dup;
4796 brick = strchr (brickid_dup, ':');
4797 if (!volid_str || !brick)
4802 gf_uuid_parse (volid_str, volid);
4803 ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo);
4805 /* Check if it a snapshot volume */
4806 ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo);
4811 ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
4818 GF_FREE (brickid_dup);
4823 __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
4824 rpc_clnt_event_t event, void *data)
4826 char *brickid = NULL;
4828 glusterd_conf_t *conf = NULL;
4829 glusterd_brickinfo_t *brickinfo = NULL;
4830 glusterd_volinfo_t *volinfo = NULL;
4831 xlator_t *this = NULL;
4837 ret = get_brickinfo_from_brickid (brickid, &brickinfo);
4843 conf = this->private;
4847 case RPC_CLNT_CONNECT:
4848 /* If a node on coming back up, already starts a brick
4849 * before the handshake, and the notification comes after
4850 * the handshake is done, then we need to check if this
4851 * is a restored brick with a snapshot pending. If so, we
4852 * need to stop the brick
4854 if (brickinfo->snap_status == -1) {
4855 gf_msg (this->name, GF_LOG_INFO, 0,
4856 GD_MSG_SNAPSHOT_PENDING,
4857 "Snapshot is pending on %s:%s. "
4858 "Hence not starting the brick",
4859 brickinfo->hostname,
4861 ret = get_volinfo_from_brickid (brickid, &volinfo);
4863 gf_msg (this->name, GF_LOG_ERROR, 0,
4864 GD_MSG_VOLINFO_GET_FAIL,
4865 "Failed to get volinfo from "
4866 "brickid(%s)", brickid);
4870 ret = glusterd_brick_stop (volinfo, brickinfo,
4873 gf_msg (THIS->name, GF_LOG_ERROR, 0,
4874 GD_MSG_BRICK_STOP_FAIL,
4875 "Unable to stop %s:%s",
4876 brickinfo->hostname, brickinfo->path);
4882 gf_msg_debug (this->name, 0, "Connected to %s:%s",
4883 brickinfo->hostname, brickinfo->path);
4884 glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED);
4885 ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
4889 case RPC_CLNT_DISCONNECT:
4890 if (glusterd_is_brick_started (brickinfo))
4891 gf_msg (this->name, GF_LOG_INFO, 0,
4892 GD_MSG_BRICK_DISCONNECTED,
4893 "Brick %s:%s has disconnected from glusterd.",
4894 brickinfo->hostname, brickinfo->path);
4896 glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
4899 case RPC_CLNT_DESTROY:
4904 gf_msg_trace (this->name, 0,
4905 "got some other RPC event %d", event);
4914 glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
4915 rpc_clnt_event_t event, void *data)
4917 return glusterd_big_locked_notify (rpc, mydata, event, data,
4918 __glusterd_brick_rpc_notify);
4922 glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx, int32_t op_errno)
4925 glusterd_friend_sm_event_t *new_event = NULL;
4926 glusterd_peerinfo_t *peerinfo = NULL;
4927 rpcsvc_request_t *req = NULL;
4928 char *errstr = NULL;
4929 dict_t *dict = NULL;
4931 GF_ASSERT (peerctx);
4934 peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
4936 gf_msg_debug (THIS->name, 0, "Could not find peer %s(%s). "
4937 "Peer could have been deleted.", peerctx->peername,
4938 uuid_utoa (peerctx->peerid));
4943 req = peerctx->args.req;
4944 dict = peerctx->args.dict;
4945 errstr = peerctx->errstr;
4947 ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND,
4951 gf_msg (THIS->name, GF_LOG_WARNING, 0,
4952 GD_MSG_EVENT_NEW_GET_FAIL,
4953 "Unable to find the request for responding "
4954 "to User (%s)", peerinfo->hostname);
4958 glusterd_xfer_cli_probe_resp (req, -1, op_errno, errstr,
4960 peerinfo->port, dict);
4962 new_event->peername = gf_strdup (peerinfo->hostname);
4963 gf_uuid_copy (new_event->peerid, peerinfo->uuid);
4964 ret = glusterd_friend_sm_inject_event (new_event);
4967 gf_msg ("glusterd", GF_LOG_ERROR, 0,
4968 GD_MSG_EVENT_INJECT_FAIL,
4969 "Unable to create event for removing peer %s",
4970 peerinfo->hostname);
4979 __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
4980 rpc_clnt_event_t event, void *data)
4982 xlator_t *this = NULL;
4983 glusterd_conf_t *conf = NULL;
4985 int32_t op_errno = ENOTCONN;
4986 glusterd_peerinfo_t *peerinfo = NULL;
4987 glusterd_peerctx_t *peerctx = NULL;
4988 gf_boolean_t quorum_action = _gf_false;
4989 glusterd_volinfo_t *volinfo = NULL;
4997 conf = this->private;
4999 if (RPC_CLNT_DESTROY == event) {
5000 GF_FREE (peerctx->errstr);
5001 GF_FREE (peerctx->peername);
5008 peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
5010 /* Peerinfo should be available at this point if its a connect
5011 * event. Not finding it means that something terrible has
5012 * happened. For non-connect event we might end up having a null
5013 * peerinfo, so log at debug level.
5015 gf_msg (THIS->name, (RPC_CLNT_CONNECT == event) ?
5016 GF_LOG_CRITICAL : GF_LOG_DEBUG, ENOENT,
5017 GD_MSG_PEER_NOT_FOUND, "Could not find peer "
5018 "%s(%s)", peerctx->peername,
5019 uuid_utoa (peerctx->peerid));
5026 case RPC_CLNT_CONNECT:
5028 rpc_clnt_set_connected (&rpc->conn);
5029 gf_msg_debug (this->name, 0, "got RPC_CLNT_CONNECT");
5030 peerinfo->connected = 1;
5031 peerinfo->quorum_action = _gf_true;
5032 peerinfo->generation = uatomic_add_return
5033 (&conf->generation, 1);
5034 peerctx->peerinfo_gen = peerinfo->generation;
5036 ret = glusterd_peer_dump_version (this, rpc, peerctx);
5038 gf_msg (this->name, GF_LOG_ERROR, 0,
5039 GD_MSG_HANDSHAKE_FAILED,
5040 "glusterd handshake failed");
5044 case RPC_CLNT_DISCONNECT:
5046 rpc_clnt_unset_connected (&rpc->conn);
5047 gf_msg (this->name, GF_LOG_INFO, 0,
5048 GD_MSG_PEER_DISCONNECTED,
5049 "Peer <%s> (<%s>), in state <%s>, has disconnected "
5051 peerinfo->hostname, uuid_utoa (peerinfo->uuid),
5052 glusterd_friend_sm_state_name_get (peerinfo->state.state));
5054 if (peerinfo->connected) {
5055 if (conf->op_version < GD_OP_VERSION_3_6_0) {
5056 glusterd_get_lock_owner (&uuid);
5057 if (!gf_uuid_is_null (uuid) &&
5058 !gf_uuid_compare (peerinfo->uuid, uuid))
5059 glusterd_unlock (peerinfo->uuid);
5061 cds_list_for_each_entry (volinfo,
5064 ret = glusterd_mgmt_v3_unlock
5071 GD_MSG_MGMTV3_UNLOCK_FAIL,
5072 "Lock not released "
5078 op_errno = GF_PROBE_ANOTHER_CLUSTER;
5082 if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
5083 (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
5084 peerinfo->quorum_contrib = QUORUM_DOWN;
5085 quorum_action = _gf_true;
5086 peerinfo->quorum_action = _gf_false;
5089 /* Remove peer if it is not a friend and connection/handshake
5090 * fails, and notify cli. Happens only during probe.
5092 if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) {
5093 glusterd_friend_remove_notify (peerctx, op_errno);
5097 peerinfo->connected = 0;
5102 gf_msg_trace (this->name, 0,
5103 "got some other RPC event %d", event);
5111 glusterd_friend_sm ();
5114 glusterd_do_quorum_action ();
5119 glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
5120 rpc_clnt_event_t event, void *data)
5122 return glusterd_big_locked_notify (rpc, mydata, event, data,
5123 __glusterd_peer_rpc_notify);
5127 glusterd_null (rpcsvc_request_t *req)
5133 rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
5134 [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
5135 [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, 0, DRC_NA},
5136 [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA},
5137 [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, 0, DRC_NA},
5138 [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, 0, DRC_NA},
5141 struct rpcsvc_program gd_svc_mgmt_prog = {
5142 .progname = "GlusterD svc mgmt",
5143 .prognum = GD_MGMT_PROGRAM,
5144 .progver = GD_MGMT_VERSION,
5145 .numactors = GLUSTERD_MGMT_MAXVALUE,
5146 .actors = gd_svc_mgmt_actors,
5147 .synctask = _gf_true,
5150 rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
5151 [GLUSTERD_FRIEND_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
5152 [GLUSTERD_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_PROBE_QUERY, glusterd_handle_probe_query, NULL, 0, DRC_NA},
5153 [GLUSTERD_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, 0, DRC_NA},
5154 [GLUSTERD_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, 0, DRC_NA},
5155 [GLUSTERD_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, 0, DRC_NA},
5158 struct rpcsvc_program gd_svc_peer_prog = {
5159 .progname = "GlusterD svc peer",
5160 .prognum = GD_FRIEND_PROGRAM,
5161 .progver = GD_FRIEND_VERSION,
5162 .numactors = GLUSTERD_FRIEND_MAXVALUE,
5163 .actors = gd_svc_peer_actors,
5164 .synctask = _gf_false,
5169 rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
5170 [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, 0, DRC_NA},
5171 [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL, 0, DRC_NA},
5172 [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL, 0, DRC_NA},
5173 [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, 0, DRC_NA},
5174 [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
5175 [GLUSTER_CLI_UUID_RESET] = { "UUID_RESET", GLUSTER_CLI_UUID_RESET, glusterd_handle_cli_uuid_reset, NULL, 0, DRC_NA},
5176 [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
5177 [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, 0, DRC_NA},
5178 [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, 0, DRC_NA},
5179 [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, 0, DRC_NA},
5180 [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
5181 [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, 0, DRC_NA},
5182 [GLUSTER_CLI_ATTACH_TIER] = { "ATTACH_TIER", GLUSTER_CLI_ATTACH_TIER, glusterd_handle_attach_tier, NULL, 0, DRC_NA},
5183 [GLUSTER_CLI_DETACH_TIER] = { "DETACH_TIER", GLUSTER_CLI_DETACH_TIER, glusterd_handle_detach_tier, NULL, 0, DRC_NA},
5184 [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, 0, DRC_NA},
5185 [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, 0, DRC_NA},
5186 [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, 0, DRC_NA},
5187 [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, 0, DRC_NA},
5188 [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, 0, DRC_NA},
5189 [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, 0, DRC_NA},
5190 [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, 0, DRC_NA},
5191 [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, 0, DRC_NA},
5192 [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, 0, DRC_NA},
5193 [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, 0, DRC_NA},
5194 [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
5195 [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
5196 [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA},
5197 [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA},
5198 [GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, 0, DRC_NA},
5199 [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA},
5200 [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
5201 [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA},
5202 [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
5203 [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
5204 [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA},
5205 [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", GLUSTER_CLI_BARRIER_VOLUME, glusterd_handle_barrier, NULL, 0, DRC_NA},
5206 [GLUSTER_CLI_GANESHA] = { "GANESHA" , GLUSTER_CLI_GANESHA, glusterd_handle_ganesha_cmd, NULL, 0, DRC_NA},
5207 [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT, glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
5208 [GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT, glusterd_handle_bitrot, NULL, 0, DRC_NA},
5211 struct rpcsvc_program gd_svc_cli_prog = {
5212 .progname = "GlusterD svc cli",
5213 .prognum = GLUSTER_CLI_PROGRAM,
5214 .progver = GLUSTER_CLI_VERSION,
5215 .numactors = GLUSTER_CLI_MAXVALUE,
5216 .actors = gd_svc_cli_actors,
5217 .synctask = _gf_true,
5221 * This set of RPC progs are deemed to be trusted. Most of the actors support
5222 * read only queries, the only exception being MOUNT/UMOUNT which is required
5223 * by geo-replication to supprt unprivileged master -> slave sessions.
5225 rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
5226 [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
5227 [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
5228 [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
5229 [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
5230 [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
5231 [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
5232 [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA},
5233 [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA},
5236 struct rpcsvc_program gd_svc_cli_trusted_progs = {
5237 .progname = "GlusterD svc cli read-only",
5238 .prognum = GLUSTER_CLI_PROGRAM,
5239 .progver = GLUSTER_CLI_VERSION,
5240 .numactors = GLUSTER_CLI_MAXVALUE,
5241 .actors = gd_svc_cli_trusted_actors,
5242 .synctask = _gf_true,