libglusterfs: replace default functions with generated versions
[obnox/glusterfs.git] / xlators / mgmt / glusterd / src / glusterd-handler.c
1 /*
2    Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
3    This file is part of GlusterFS.
4
5    This file is licensed to you under your choice of the GNU Lesser
6    General Public License, version 3 or any later version (LGPLv3 or
7    later), or the GNU General Public License, version 2 (GPLv2), in all
8    cases as published by the Free Software Foundation.
9 */
10 #include <inttypes.h>
11
12 #include "globals.h"
13 #include "glusterfs.h"
14 #include "compat.h"
15 #include "dict.h"
16 #include "protocol-common.h"
17 #include "xlator.h"
18 #include "logging.h"
19 #include "timer.h"
20 #include "defaults.h"
21 #include "compat.h"
22 #include "compat-errno.h"
23 #include "statedump.h"
24 #include "run.h"
25 #include "glusterd-mem-types.h"
26 #include "glusterd.h"
27 #include "glusterd-sm.h"
28 #include "glusterd-op-sm.h"
29 #include "glusterd-utils.h"
30 #include "glusterd-server-quorum.h"
31 #include "glusterd-store.h"
32 #include "glusterd-locks.h"
33 #include "glusterd-snapshot-utils.h"
34
35 #include "glusterd1-xdr.h"
36 #include "cli1-xdr.h"
37 #include "xdr-generic.h"
38 #include "rpc-clnt.h"
39 #include "glusterd-volgen.h"
40 #include "glusterd-mountbroker.h"
41 #include "glusterd-messages.h"
42
43 #include <sys/resource.h>
44 #include <inttypes.h>
45
46 #include "common-utils.h"
47
48 #include "globals.h"
49 #include "glusterd-syncop.h"
50 #include "glusterd-messages.h"
51
52 #ifdef HAVE_BD_XLATOR
53 #include <lvm2app.h>
54 #endif
55
56 extern glusterd_op_info_t opinfo;
57
58 int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
59                                 rpc_clnt_event_t event,
60                                 void *data, rpc_clnt_notify_t notify_fn)
61 {
62         glusterd_conf_t *priv = THIS->private;
63         int              ret   = -1;
64
65         synclock_lock (&priv->big_lock);
66         ret = notify_fn (rpc, mydata, event, data);
67         synclock_unlock (&priv->big_lock);
68
69         return ret;
70 }
71
72 int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn)
73 {
74         glusterd_conf_t *priv = THIS->private;
75         int             ret   = -1;
76
77         synclock_lock (&priv->big_lock);
78         ret = actor_fn (req);
79         synclock_unlock (&priv->big_lock);
80
81         return ret;
82 }
83
84 static int
85 glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t  uuid,
86                             char *hostname, int port,
87                             gd1_mgmt_friend_req *friend_req)
88 {
89         int                             ret = -1;
90         glusterd_peerinfo_t             *peerinfo = NULL;
91         glusterd_friend_sm_event_t      *event = NULL;
92         glusterd_friend_req_ctx_t       *ctx = NULL;
93         char                            rhost[UNIX_PATH_MAX + 1] = {0};
94         uuid_t                          friend_uuid = {0};
95         dict_t                          *dict = NULL;
96
97         gf_uuid_parse (uuid_utoa (uuid), friend_uuid);
98         if (!port)
99                 port = GF_DEFAULT_BASE_PORT;
100
101         ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost));
102
103         rcu_read_lock ();
104
105         peerinfo = glusterd_peerinfo_find (uuid, rhost);
106
107         if (peerinfo == NULL) {
108                 ret = glusterd_xfer_friend_add_resp (req, hostname, rhost, port,
109                                                      -1, GF_PROBE_UNKNOWN_PEER);
110                 if (friend_req->vols.vols_val) {
111                         free (friend_req->vols.vols_val);
112                         friend_req->vols.vols_val = NULL;
113                 }
114                 goto out;
115         }
116
117         ret = glusterd_friend_sm_new_event
118                         (GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
119
120         if (ret) {
121                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
122                         GD_MSG_EVENT_NEW_GET_FAIL,
123                         "event generation failed: %d", ret);
124                 goto out;
125         }
126
127         event->peername = gf_strdup (peerinfo->hostname);
128         gf_uuid_copy (event->peerid, peerinfo->uuid);
129
130         ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
131
132         if (!ctx) {
133                 gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
134                         GD_MSG_NO_MEMORY, "Unable to allocate memory");
135                 ret = -1;
136                 goto out;
137         }
138
139         gf_uuid_copy (ctx->uuid, uuid);
140         if (hostname)
141                 ctx->hostname = gf_strdup (hostname);
142         ctx->req = req;
143
144         dict = dict_new ();
145         if (!dict) {
146                 ret = -1;
147                 goto out;
148         }
149
150         ret = dict_unserialize (friend_req->vols.vols_val,
151                                 friend_req->vols.vols_len,
152                                 &dict);
153
154         if (ret)
155                 goto out;
156         else
157                 dict->extra_stdfree = friend_req->vols.vols_val;
158
159         ctx->vols = dict;
160         event->ctx = ctx;
161
162         ret = glusterd_friend_sm_inject_event (event);
163         if (ret) {
164                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
165                         GD_MSG_EVENT_INJECT_FAIL,
166                         "Unable to inject event %d, "
167                         "ret = %d", event->event, ret);
168                 goto out;
169         }
170
171         ret = 0;
172         if (peerinfo && (0 == peerinfo->connected))
173                 ret = GLUSTERD_CONNECTION_AWAITED;
174
175 out:
176         rcu_read_unlock ();
177
178         if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
179                 if (ctx && ctx->hostname)
180                         GF_FREE (ctx->hostname);
181                 GF_FREE (ctx);
182                 if (dict) {
183                         if ((!dict->extra_stdfree) &&
184                             friend_req->vols.vols_val)
185                                 free (friend_req->vols.vols_val);
186                         dict_unref (dict);
187                 } else {
188                     free (friend_req->vols.vols_val);
189                 }
190                 if (event)
191                         GF_FREE (event->peername);
192                 GF_FREE (event);
193         }
194
195
196         return ret;
197 }
198
199 static int
200 glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t  uuid,
201                               char *hostname, int port)
202 {
203         int                             ret = -1;
204         glusterd_peerinfo_t             *peerinfo = NULL;
205         glusterd_friend_sm_event_t      *event = NULL;
206         glusterd_friend_req_ctx_t       *ctx = NULL;
207
208         if (!port)
209                 port = GF_DEFAULT_BASE_PORT;
210
211         rcu_read_lock ();
212
213         peerinfo = glusterd_peerinfo_find (uuid, hostname);
214
215         if (peerinfo == NULL) {
216                 gf_msg ("glusterd", GF_LOG_CRITICAL, 0,
217                         GD_MSG_REQ_FROM_UNKNOWN_PEER,
218                         "Received remove-friend from unknown peer %s",
219                         hostname);
220                 ret = glusterd_xfer_friend_remove_resp (req, hostname,
221                                                         port);
222                 goto out;
223         }
224
225         ret = glusterd_friend_sm_new_event
226                         (GD_FRIEND_EVENT_RCVD_REMOVE_FRIEND, &event);
227
228         if (ret) {
229                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
230                         GD_MSG_EVENT_NEW_GET_FAIL,
231                         "event generation failed: %d", ret);
232                 goto out;
233         }
234
235         event->peername = gf_strdup (hostname);
236         gf_uuid_copy (event->peerid, uuid);
237
238         ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
239
240         if (!ctx) {
241                 gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
242                         GD_MSG_NO_MEMORY, "Unable to allocate memory");
243                 ret = -1;
244                 goto out;
245         }
246
247         gf_uuid_copy (ctx->uuid, uuid);
248         if (hostname)
249                 ctx->hostname = gf_strdup (hostname);
250         ctx->req = req;
251
252         event->ctx = ctx;
253
254         ret = glusterd_friend_sm_inject_event (event);
255
256         if (ret) {
257                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
258                         GD_MSG_EVENT_INJECT_FAIL, "Unable to inject event %d, "
259                         "ret = %d", event->event, ret);
260                 goto out;
261         }
262
263         ret = 0;
264
265 out:
266         rcu_read_unlock ();
267
268         if (0 != ret) {
269                 if (ctx && ctx->hostname)
270                         GF_FREE (ctx->hostname);
271                 GF_FREE (ctx);
272                 if (event)
273                         GF_FREE (event->peername);
274                 GF_FREE (event);
275         }
276
277         return ret;
278 }
279
280 struct args_pack {
281     dict_t *dict;
282     int vol_count;
283     int opt_count;
284 };
285
286 static int
287 _build_option_key (dict_t *d, char *k, data_t *v, void *tmp)
288 {
289         char                    reconfig_key[256] = {0, };
290         struct args_pack        *pack             = NULL;
291         int                     ret               = -1;
292         xlator_t                *this             = NULL;
293         glusterd_conf_t         *priv             = NULL;
294
295         this = THIS;
296         GF_ASSERT (this);
297         priv = this->private;
298         GF_ASSERT (priv);
299
300         pack = tmp;
301         if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
302                 return 0;
303
304         if (priv->op_version > GD_OP_VERSION_MIN) {
305                 if ((strcmp (k, "features.limit-usage") == 0) ||
306                     (strcmp (k, "features.soft-limit") == 0))
307                         return 0;
308         }
309         snprintf (reconfig_key, 256, "volume%d.option.%s",
310                   pack->vol_count, k);
311         ret = dict_set_str (pack->dict, reconfig_key, v->data);
312         if (0 == ret)
313                 pack->opt_count++;
314
315         return 0;
316 }
317
318 int
319 glusterd_add_tier_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
320                                     dict_t  *dict, int count)
321 {
322         int            ret            = -1;
323         char           key[256]      = {0,};
324
325         GF_ASSERT (volinfo);
326         GF_ASSERT (dict);
327
328         memset (key, 0, sizeof (key));
329         snprintf (key, 256, "volume%d.cold_type", count);
330         ret = dict_set_int32 (dict, key, volinfo->tier_info.cold_type);
331         if (ret)
332                 goto out;
333
334         memset (key, 0, sizeof (key));
335         snprintf (key, 256, "volume%d.cold_brick_count", count);
336         ret = dict_set_int32 (dict, key, volinfo->tier_info.cold_brick_count);
337         if (ret)
338                 goto out;
339
340         memset (key, 0, sizeof (key));
341         snprintf (key, 256, "volume%d.cold_dist_count", count);
342         ret = dict_set_int32 (dict, key,
343                               volinfo->tier_info.cold_dist_leaf_count);
344         if (ret)
345                 goto out;
346
347         memset (key, 0, sizeof (key));
348         snprintf (key, 256, "volume%d.cold_replica_count", count);
349         ret = dict_set_int32 (dict, key,
350                               volinfo->tier_info.cold_replica_count);
351         if (ret)
352                 goto out;
353
354         memset (key, 0, sizeof (key));
355         snprintf (key, 256, "volume%d.cold_disperse_count", count);
356         ret = dict_set_int32 (dict, key,
357                               volinfo->tier_info.cold_disperse_count);
358         if (ret)
359                 goto out;
360
361         memset (key, 0, sizeof (key));
362         snprintf (key, 256, "volume%d.cold_redundancy_count", count);
363         ret = dict_set_int32 (dict, key,
364                               volinfo->tier_info.cold_redundancy_count);
365         if (ret)
366                 goto out;
367
368         memset (key, 0, sizeof (key));
369         snprintf (key, 256, "volume%d.hot_type", count);
370         ret = dict_set_int32 (dict, key, volinfo->tier_info.hot_type);
371         if (ret)
372                 goto out;
373
374         memset (key, 0, sizeof (key));
375         snprintf (key, 256, "volume%d.hot_brick_count", count);
376         ret = dict_set_int32 (dict, key, volinfo->tier_info.hot_brick_count);
377         if (ret)
378                 goto out;
379
380         memset (key, 0, sizeof (key));
381         snprintf (key, 256, "volume%d.hot_replica_count", count);
382         ret = dict_set_int32 (dict, key, volinfo->tier_info.hot_replica_count);
383         if (ret)
384                 goto out;
385
386 out:
387         return ret;
388
389 }
390
391 int
392 glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
393                                     dict_t  *volumes, int count)
394 {
395
396         int                     ret = -1;
397         char                    key[256] = {0, };
398         glusterd_brickinfo_t    *brickinfo = NULL;
399         char                    *buf = NULL;
400         int                     i = 1;
401         dict_t                  *dict = NULL;
402         glusterd_conf_t         *priv = NULL;
403         char                    *volume_id_str  = NULL;
404         struct args_pack        pack = {0,};
405         xlator_t                *this = NULL;
406         GF_UNUSED int           caps = 0;
407
408         GF_ASSERT (volinfo);
409         GF_ASSERT (volumes);
410
411         this = THIS;
412         priv = this->private;
413
414         GF_ASSERT (priv);
415
416         snprintf (key, 256, "volume%d.name", count);
417         ret = dict_set_str (volumes, key, volinfo->volname);
418         if (ret)
419                 goto out;
420
421         snprintf (key, 256, "volume%d.type", count);
422         ret = dict_set_int32 (volumes, key, volinfo->type);
423         if (ret)
424                 goto out;
425
426         snprintf (key, 256, "volume%d.status", count);
427         ret = dict_set_int32 (volumes, key, volinfo->status);
428         if (ret)
429                 goto out;
430
431         snprintf (key, 256, "volume%d.brick_count", count);
432         ret = dict_set_int32 (volumes, key, volinfo->brick_count);
433         if (ret)
434                 goto out;
435
436         snprintf (key, 256, "volume%d.hot_brick_count", count);
437         ret = dict_set_int32 (volumes, key, volinfo->tier_info.hot_brick_count);
438         if (ret)
439                 goto out;
440
441         if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
442                 ret = glusterd_add_tier_volume_detail_to_dict (volinfo,
443                                                        volumes, count);
444                 if (ret)
445                         goto out;
446         }
447
448         snprintf (key, 256, "volume%d.dist_count", count);
449         ret = dict_set_int32 (volumes, key, volinfo->dist_leaf_count);
450         if (ret)
451                 goto out;
452
453         snprintf (key, 256, "volume%d.stripe_count", count);
454         ret = dict_set_int32 (volumes, key, volinfo->stripe_count);
455         if (ret)
456                 goto out;
457
458         snprintf (key, 256, "volume%d.replica_count", count);
459         ret = dict_set_int32 (volumes, key, volinfo->replica_count);
460         if (ret)
461                 goto out;
462
463         snprintf (key, 256, "volume%d.disperse_count", count);
464         ret = dict_set_int32 (volumes, key, volinfo->disperse_count);
465         if (ret)
466                 goto out;
467
468         snprintf (key, 256, "volume%d.redundancy_count", count);
469         ret = dict_set_int32 (volumes, key, volinfo->redundancy_count);
470         if (ret)
471                 goto out;
472
473         snprintf (key, 256, "volume%d.transport", count);
474         ret = dict_set_int32 (volumes, key, volinfo->transport_type);
475         if (ret)
476                 goto out;
477
478         volume_id_str = gf_strdup (uuid_utoa (volinfo->volume_id));
479         if (!volume_id_str)
480                 goto out;
481
482         snprintf (key, sizeof (key), "volume%d.volume_id", count);
483         ret = dict_set_dynstr (volumes, key, volume_id_str);
484         if (ret)
485                 goto out;
486
487         snprintf (key, 256, "volume%d.rebalance", count);
488         ret = dict_set_int32 (volumes, key, volinfo->rebal.defrag_cmd);
489         if (ret)
490                 goto out;
491
492 #ifdef HAVE_BD_XLATOR
493         if (volinfo->caps) {
494                 caps = 0;
495                 snprintf (key, 256, "volume%d.xlator0", count);
496                 buf = GF_MALLOC (256, gf_common_mt_char);
497                 if (!buf) {
498                         ret = ENOMEM;
499                         goto out;
500                 }
501                 if (volinfo->caps & CAPS_BD)
502                         snprintf (buf, 256, "BD");
503                 ret = dict_set_dynstr (volumes, key, buf);
504                 if (ret) {
505                         GF_FREE (buf);
506                         goto out;
507                 }
508
509                 if (volinfo->caps & CAPS_THIN) {
510                         snprintf (key, 256, "volume%d.xlator0.caps%d", count,
511                                   caps++);
512                         buf = GF_MALLOC (256, gf_common_mt_char);
513                         if (!buf) {
514                                 ret = ENOMEM;
515                                 goto out;
516                         }
517                         snprintf (buf, 256, "thin");
518                         ret = dict_set_dynstr (volumes, key, buf);
519                         if (ret) {
520                                 GF_FREE (buf);
521                                 goto out;
522                         }
523                 }
524
525                 if (volinfo->caps & CAPS_OFFLOAD_COPY) {
526                         snprintf (key, 256, "volume%d.xlator0.caps%d", count,
527                                   caps++);
528                         buf = GF_MALLOC (256, gf_common_mt_char);
529                         if (!buf) {
530                                 ret = ENOMEM;
531                                 goto out;
532                         }
533                         snprintf (buf, 256, "offload_copy");
534                         ret = dict_set_dynstr (volumes, key, buf);
535                         if (ret) {
536                                 GF_FREE (buf);
537                                 goto out;
538                         }
539                 }
540
541                 if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
542                         snprintf (key, 256, "volume%d.xlator0.caps%d", count,
543                                   caps++);
544                         buf = GF_MALLOC (256, gf_common_mt_char);
545                         if (!buf) {
546                                 ret = ENOMEM;
547                                 goto out;
548                         }
549                         snprintf (buf, 256, "offload_snapshot");
550                         ret = dict_set_dynstr (volumes, key, buf);
551                         if (ret)  {
552                                 GF_FREE (buf);
553                                 goto out;
554                         }
555                 }
556
557                 if (volinfo->caps & CAPS_OFFLOAD_ZERO) {
558                         snprintf (key, 256, "volume%d.xlator0.caps%d", count,
559                                   caps++);
560                         buf = GF_MALLOC (256, gf_common_mt_char);
561                         if (!buf) {
562                                 ret = ENOMEM;
563                                 goto out;
564                         }
565                         snprintf (buf, 256, "offload_zerofill");
566                         ret = dict_set_dynstr (volumes, key, buf);
567                         if (ret)  {
568                                 GF_FREE (buf);
569                                 goto out;
570                         }
571                 }
572
573         }
574 #endif
575
576         cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
577                 char    brick[1024] = {0,};
578                 char    brick_uuid[64] = {0,};
579                 snprintf (key, 256, "volume%d.brick%d", count, i);
580                 snprintf (brick, 1024, "%s:%s", brickinfo->hostname,
581                           brickinfo->path);
582                 buf = gf_strdup (brick);
583                 ret = dict_set_dynstr (volumes, key, buf);
584                 if (ret)
585                         goto out;
586                 snprintf (key, 256, "volume%d.brick%d.uuid", count, i);
587                 snprintf (brick_uuid, 64, "%s", uuid_utoa (brickinfo->uuid));
588                 buf = gf_strdup (brick_uuid);
589                 if (!buf)
590                         goto out;
591                 ret = dict_set_dynstr (volumes, key, buf);
592                 if (ret)
593                         goto out;
594
595 #ifdef HAVE_BD_XLATOR
596                 if (volinfo->caps & CAPS_BD) {
597                         snprintf (key, 256, "volume%d.vg%d", count, i);
598                         snprintf (brick, 1024, "%s", brickinfo->vg);
599                         buf = gf_strdup (brick);
600                         ret = dict_set_dynstr (volumes, key, buf);
601                         if (ret)
602                                 goto out;
603                 }
604 #endif
605                 i++;
606         }
607
608         dict = volinfo->dict;
609         if (!dict) {
610                 ret = 0;
611                 goto out;
612         }
613
614         pack.dict = volumes;
615         pack.vol_count = count;
616         pack.opt_count = 0;
617         dict_foreach (dict, _build_option_key, (void *) &pack);
618         dict_foreach (priv->opts, _build_option_key, &pack);
619
620         snprintf (key, 256, "volume%d.opt_count", pack.vol_count);
621         ret = dict_set_int32 (volumes, key, pack.opt_count);
622 out:
623         return ret;
624 }
625
626 int32_t
627 glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
628                        char *err_str, size_t err_len)
629 {
630         int32_t                     ret             = -1;
631         int                         npeers          = 0;
632         dict_t                     *dict            = NULL;
633         xlator_t                   *this            = NULL;
634         glusterd_conf_t            *priv            = NULL;
635         int32_t                     locked          = 0;
636         char                       *tmp             = NULL;
637         char                       *volname         = NULL;
638         uuid_t                     *txn_id          = NULL;
639         glusterd_op_info_t          txn_op_info     = {{0},};
640         glusterd_op_sm_event_type_t event_type      = GD_OP_EVENT_NONE;
641         uint32_t                    op_errno        = 0;
642
643         GF_ASSERT (req);
644         GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
645         GF_ASSERT (NULL != ctx);
646
647         this = THIS;
648         GF_ASSERT (this);
649         priv = this->private;
650         GF_ASSERT (priv);
651
652         dict = ctx;
653
654         /* Generate a transaction-id for this operation and
655          * save it in the dict. This transaction id distinguishes
656          * each transaction, and helps separate opinfos in the
657          * op state machine. */
658         ret = glusterd_generate_txn_id (dict, &txn_id);
659         if (ret) {
660                 gf_msg (this->name, GF_LOG_ERROR, 0,
661                         GD_MSG_TRANS_IDGEN_FAIL,
662                         "Failed to generate transaction id");
663                 goto out;
664         }
665
666         /* Save the MY_UUID as the originator_uuid. This originator_uuid
667          * will be used by is_origin_glusterd() to determine if a node
668          * is the originator node for a command. */
669         ret = glusterd_set_originator_uuid (dict);
670         if (ret) {
671                 gf_msg (this->name, GF_LOG_ERROR, 0,
672                         GD_MSG_UUID_SET_FAIL,
673                         "Failed to set originator_uuid.");
674                 goto out;
675         }
676
677         /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
678         if (priv->op_version < GD_OP_VERSION_3_6_0) {
679                 ret = glusterd_lock (MY_UUID);
680                 if (ret) {
681                         gf_msg (this->name, GF_LOG_ERROR, 0,
682                                 GD_MSG_GLUSTERD_LOCK_FAIL,
683                                 "Unable to acquire lock on localhost, ret: %d",
684                                 ret);
685                         snprintf (err_str, err_len,
686                                   "Another transaction is in progress. "
687                                   "Please try again after sometime.");
688                         goto out;
689                 }
690         } else {
691                 /* If no volname is given as a part of the command, locks will
692                  * not be held */
693                 ret = dict_get_str (dict, "volname", &tmp);
694                 if (ret) {
695                         gf_msg (this->name, GF_LOG_INFO, errno,
696                                 GD_MSG_DICT_GET_FAILED,
697                                 "No Volume name present. "
698                                 "Locks not being held.");
699                         goto local_locking_done;
700                 } else {
701                         /* Use a copy of volname, as cli response will be
702                          * sent before the unlock, and the volname in the
703                          * dict, might be removed */
704                         volname = gf_strdup (tmp);
705                         if (!volname)
706                                 goto out;
707                 }
708
709                 ret = glusterd_mgmt_v3_lock (volname, MY_UUID, &op_errno,
710                                              "vol");
711                 if (ret) {
712                         gf_msg (this->name, GF_LOG_ERROR, 0,
713                                 GD_MSG_MGMTV3_LOCK_GET_FAIL,
714                                 "Unable to acquire lock for %s", volname);
715                         snprintf (err_str, err_len,
716                                   "Another transaction is in progress for %s. "
717                                   "Please try again after sometime.", volname);
718                         goto out;
719                 }
720         }
721
722         locked = 1;
723         gf_msg_debug (this->name, 0, "Acquired lock on localhost");
724
725 local_locking_done:
726         /* If no volname is given as a part of the command, locks will
727          * not be held, hence sending stage event. */
728         if (volname || (priv->op_version < GD_OP_VERSION_3_6_0))
729                 event_type = GD_OP_EVENT_START_LOCK;
730         else {
731                 txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
732                 event_type = GD_OP_EVENT_ALL_ACC;
733         }
734
735         /* Save opinfo for this transaction with the transaction id */
736         glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req);
737
738         ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
739         if (ret) {
740                 gf_msg (this->name, GF_LOG_ERROR, 0,
741                         GD_MSG_TRANS_OPINFO_SET_FAIL,
742                         "Unable to set transaction's opinfo");
743                 if (ctx)
744                         dict_unref (ctx);
745                 goto out;
746         }
747
748         ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);
749         if (ret) {
750                 gf_msg (this->name, GF_LOG_ERROR, 0,
751                         GD_MSG_EVENT_INJECT_FAIL, "Failed to acquire cluster"
752                         " lock.");
753                 goto out;
754         }
755
756 out:
757         if (locked && ret) {
758                 /* Based on the op-version, we release the
759                  * cluster or mgmt_v3 lock */
760                 if (priv->op_version < GD_OP_VERSION_3_6_0)
761                         glusterd_unlock (MY_UUID);
762                 else {
763                         ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
764                                                        "vol");
765                         if (ret)
766                                 gf_msg (this->name, GF_LOG_ERROR, 0,
767                                         GD_MSG_MGMTV3_UNLOCK_FAIL,
768                                         "Unable to release lock for %s",
769                                         volname);
770                         ret = -1;
771                 }
772         }
773
774         if (volname)
775                 GF_FREE (volname);
776
777         gf_msg_debug (this->name, 0, "Returning %d", ret);
778         return ret;
779 }
780
781 int
782 __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
783 {
784         dict_t                         *op_ctx      = NULL;
785         int32_t                         ret         = -1;
786         gd1_mgmt_cluster_lock_req       lock_req    = {{0},};
787         glusterd_op_lock_ctx_t         *ctx         = NULL;
788         glusterd_op_t                   op          = GD_OP_EVENT_LOCK;
789         glusterd_op_info_t              txn_op_info = {{0},};
790         glusterd_conf_t                *priv        = NULL;
791         uuid_t                         *txn_id      = NULL;
792         xlator_t                       *this        = NULL;
793
794         this = THIS;
795         GF_ASSERT (this);
796         priv = this->private;
797         GF_ASSERT (priv);
798         GF_ASSERT (req);
799
800         txn_id = &priv->global_txn_id;
801
802         ret = xdr_to_generic (req->msg[0], &lock_req,
803                               (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
804         if (ret < 0) {
805                 gf_msg (this->name, GF_LOG_ERROR, 0,
806                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode lock "
807                         "request received from peer");
808                 req->rpc_err = GARBAGE_ARGS;
809                 goto out;
810         }
811
812         gf_msg_debug (this->name, 0, "Received LOCK from uuid: %s",
813                 uuid_utoa (lock_req.uuid));
814
815         rcu_read_lock ();
816         ret = (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL);
817         rcu_read_unlock ();
818         if (ret) {
819                 gf_msg (this->name, GF_LOG_WARNING, 0,
820                         GD_MSG_PEER_NOT_FOUND, "%s doesn't "
821                         "belong to the cluster. Ignoring request.",
822                         uuid_utoa (lock_req.uuid));
823                 ret = -1;
824                 goto out;
825         }
826
827         ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
828
829         if (!ctx) {
830                 //respond here
831                 return -1;
832         }
833
834         gf_uuid_copy (ctx->uuid, lock_req.uuid);
835         ctx->req = req;
836         ctx->dict = NULL;
837
838         op_ctx =  dict_new ();
839         if (!op_ctx) {
840                 gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
841                         GD_MSG_DICT_CREATE_FAIL,
842                         "Unable to set new dict");
843                 goto out;
844         }
845
846         glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req);
847
848         ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
849         if (ret) {
850                 gf_msg (this->name, GF_LOG_ERROR, 0,
851                         GD_MSG_TRANS_OPINFO_SET_FAIL,
852                         "Unable to set transaction's opinfo");
853                 dict_unref (txn_op_info.op_ctx);
854                 goto out;
855         }
856
857         ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx);
858         if (ret)
859                 gf_msg (this->name, GF_LOG_ERROR, 0,
860                         GD_MSG_EVENT_INJECT_FAIL,
861                         "Failed to inject event GD_OP_EVENT_LOCK");
862
863 out:
864         gf_msg_debug (this->name, 0, "Returning %d", ret);
865
866         glusterd_friend_sm ();
867         glusterd_op_sm ();
868
869         return ret;
870 }
871
872 int
873 glusterd_handle_cluster_lock (rpcsvc_request_t *req)
874 {
875         return glusterd_big_locked_handler (req,
876                                             __glusterd_handle_cluster_lock);
877 }
878
879 int
880 glusterd_req_ctx_create (rpcsvc_request_t *rpc_req,
881                          glusterd_op_t op, uuid_t uuid,
882                          char *buf_val, size_t buf_len,
883                          gf_gld_mem_types_t mem_type,
884                          glusterd_req_ctx_t **req_ctx_out)
885 {
886         int                 ret     = -1;
887         char                str[50] = {0,};
888         glusterd_req_ctx_t *req_ctx = NULL;
889         dict_t             *dict    = NULL;
890         xlator_t           *this    = NULL;
891
892         this = THIS;
893         GF_ASSERT (this);
894
895         gf_uuid_unparse (uuid, str);
896         gf_msg_debug (this->name, 0, "Received op from uuid %s", str);
897
898         dict = dict_new ();
899         if (!dict)
900                 goto out;
901
902         req_ctx = GF_CALLOC (1, sizeof (*req_ctx), mem_type);
903         if (!req_ctx) {
904                 goto out;
905         }
906
907         gf_uuid_copy (req_ctx->uuid, uuid);
908         req_ctx->op = op;
909         ret = dict_unserialize (buf_val, buf_len, &dict);
910         if (ret) {
911                 gf_msg (this->name, GF_LOG_WARNING, 0,
912                         GD_MSG_DICT_UNSERIALIZE_FAIL,
913                         "failed to unserialize the dictionary");
914                 goto out;
915         }
916
917         req_ctx->dict = dict;
918         req_ctx->req = rpc_req;
919         *req_ctx_out = req_ctx;
920         ret = 0;
921 out:
922         if (ret) {
923                 if (dict)
924                         dict_unref (dict);
925                 GF_FREE (req_ctx);
926         }
927         return ret;
928 }
929
930 int
931 __glusterd_handle_stage_op (rpcsvc_request_t *req)
932 {
933         int32_t                         ret = -1;
934         glusterd_req_ctx_t              *req_ctx = NULL;
935         gd1_mgmt_stage_op_req           op_req = {{0},};
936         xlator_t                        *this = NULL;
937         uuid_t                          *txn_id = NULL;
938         glusterd_op_info_t              txn_op_info = {{0},};
939         glusterd_op_sm_state_info_t     state = {0,};
940         glusterd_conf_t                 *priv = NULL;
941
942         this = THIS;
943         GF_ASSERT (this);
944         priv = this->private;
945         GF_ASSERT (priv);
946         GF_ASSERT (req);
947
948         txn_id = &priv->global_txn_id;
949
950         ret = xdr_to_generic (req->msg[0], &op_req,
951                               (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
952         if (ret < 0) {
953                 gf_msg (this->name, GF_LOG_ERROR, 0,
954                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode stage "
955                         "request received from peer");
956                 req->rpc_err = GARBAGE_ARGS;
957                 goto out;
958         }
959
960         ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
961                                        op_req.buf.buf_val, op_req.buf.buf_len,
962                                        gf_gld_mt_op_stage_ctx_t, &req_ctx);
963         if (ret) {
964                 gf_msg (this->name, GF_LOG_ERROR, 0,
965                         GD_MSG_REQ_CTX_CREATE_FAIL, "Failed to create req_ctx");
966                 goto out;
967         }
968
969         ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
970         gf_msg_debug (this->name, 0, "transaction ID = %s",
971                 uuid_utoa (*txn_id));
972
973         rcu_read_lock ();
974         ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
975         rcu_read_unlock ();
976         if (ret) {
977                 gf_msg (this->name, GF_LOG_WARNING, 0,
978                         GD_MSG_PEER_NOT_FOUND, "%s doesn't "
979                         "belong to the cluster. Ignoring request.",
980                         uuid_utoa (op_req.uuid));
981                 ret = -1;
982                 goto out;
983         }
984
985         /* In cases where there is no volname, the receivers won't have a
986          * transaction opinfo created, as for those operations, the locking
987          * phase where the transaction opinfos are created, won't be called. */
988         ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
989         if (ret) {
990                 gf_msg_debug (this->name, 0,
991                         "No transaction's opinfo set");
992
993                 state.state = GD_OP_STATE_LOCKED;
994                 glusterd_txn_opinfo_init (&txn_op_info, &state, &op_req.op,
995                                           req_ctx->dict, req);
996
997                 ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
998                 if (ret) {
999                         gf_msg (this->name, GF_LOG_ERROR, 0,
1000                                 GD_MSG_TRANS_OPINFO_SET_FAIL,
1001                                 "Unable to set transaction's opinfo");
1002                         dict_unref (req_ctx->dict);
1003                         goto out;
1004                 }
1005         }
1006
1007         ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP,
1008                                            txn_id, req_ctx);
1009         if (ret)
1010                 gf_msg (this->name, GF_LOG_ERROR, 0,
1011                         GD_MSG_EVENT_INJECT_FAIL,
1012                         "Failed to inject event GD_OP_EVENT_STAGE_OP");
1013
1014  out:
1015         free (op_req.buf.buf_val);//malloced by xdr
1016         glusterd_friend_sm ();
1017         glusterd_op_sm ();
1018         return ret;
1019 }
1020
1021 int
1022 glusterd_handle_stage_op (rpcsvc_request_t *req)
1023 {
1024         return glusterd_big_locked_handler (req, __glusterd_handle_stage_op);
1025 }
1026
1027
1028 int
1029 __glusterd_handle_commit_op (rpcsvc_request_t *req)
1030 {
1031         int32_t                         ret = -1;
1032         glusterd_req_ctx_t              *req_ctx = NULL;
1033         gd1_mgmt_commit_op_req          op_req = {{0},};
1034         xlator_t                        *this = NULL;
1035         uuid_t                          *txn_id = NULL;
1036         glusterd_conf_t                 *priv = NULL;
1037
1038         this = THIS;
1039         GF_ASSERT (this);
1040         priv = this->private;
1041         GF_ASSERT (priv);
1042         GF_ASSERT (req);
1043
1044         txn_id = &priv->global_txn_id;
1045
1046         ret = xdr_to_generic (req->msg[0], &op_req,
1047                               (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
1048         if (ret < 0) {
1049                 gf_msg (this->name, GF_LOG_ERROR, 0,
1050                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode commit "
1051                         "request received from peer");
1052                 req->rpc_err = GARBAGE_ARGS;
1053                 goto out;
1054         }
1055
1056         rcu_read_lock ();
1057         ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
1058         rcu_read_unlock ();
1059         if (ret) {
1060                 gf_msg (this->name, GF_LOG_WARNING, 0,
1061                         GD_MSG_PEER_NOT_FOUND, "%s doesn't "
1062                         "belong to the cluster. Ignoring request.",
1063                         uuid_utoa (op_req.uuid));
1064                 ret = -1;
1065                 goto out;
1066         }
1067
1068         //the structures should always be equal
1069         GF_ASSERT (sizeof (gd1_mgmt_commit_op_req) == sizeof (gd1_mgmt_stage_op_req));
1070         ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
1071                                        op_req.buf.buf_val, op_req.buf.buf_len,
1072                                        gf_gld_mt_op_commit_ctx_t, &req_ctx);
1073         if (ret)
1074                 goto out;
1075
1076         ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
1077         gf_msg_debug (this->name, 0, "transaction ID = %s",
1078                 uuid_utoa (*txn_id));
1079
1080         ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP,
1081                                            txn_id, req_ctx);
1082
1083 out:
1084         free (op_req.buf.buf_val);//malloced by xdr
1085         glusterd_friend_sm ();
1086         glusterd_op_sm ();
1087         return ret;
1088 }
1089
1090 int
1091 glusterd_handle_commit_op (rpcsvc_request_t *req)
1092 {
1093         return glusterd_big_locked_handler (req, __glusterd_handle_commit_op);
1094 }
1095
1096 int
1097 __glusterd_handle_cli_probe (rpcsvc_request_t *req)
1098 {
1099         int32_t                         ret = -1;
1100         gf_cli_req                  cli_req = {{0,},};
1101         glusterd_peerinfo_t       *peerinfo = NULL;
1102         gf_boolean_t                run_fsm = _gf_true;
1103         xlator_t                      *this = NULL;
1104         char                     *bind_name = NULL;
1105         dict_t                        *dict = NULL;
1106         char                      *hostname = NULL;
1107         int                            port = 0;
1108         int                        op_errno = 0;
1109
1110         GF_ASSERT (req);
1111         this = THIS;
1112
1113         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1114         if (ret < 0)  {
1115                 //failed to decode msg;
1116                 gf_msg (this->name, GF_LOG_ERROR, 0,
1117                         GD_MSG_REQ_DECODE_FAIL, "xdr decoding error");
1118                 req->rpc_err = GARBAGE_ARGS;
1119                 goto out;
1120         }
1121
1122         if (cli_req.dict.dict_len) {
1123                 dict = dict_new ();
1124
1125                 ret = dict_unserialize (cli_req.dict.dict_val,
1126                                         cli_req.dict.dict_len, &dict);
1127                 if (ret < 0) {
1128                         gf_msg (this->name, GF_LOG_ERROR, 0,
1129                                 GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to "
1130                                 "unserialize req-buffer to dictionary");
1131                         goto out;
1132                 }
1133         }
1134
1135         ret = dict_get_str (dict, "hostname", &hostname);
1136         if (ret) {
1137                 gf_msg (this->name, GF_LOG_ERROR, 0,
1138                         GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1139                         "Failed to get hostname");
1140                 goto out;
1141         }
1142
1143         ret = dict_get_int32 (dict, "port", &port);
1144         if (ret) {
1145                 gf_msg (this->name, GF_LOG_ERROR, 0,
1146                         GD_MSG_PORT_NOTFOUND_IN_DICT, "Failed to get port");
1147                 goto out;
1148         }
1149
1150         if (glusterd_is_any_volume_in_server_quorum (this) &&
1151             !does_gd_meet_server_quorum (this)) {
1152                 glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET,
1153                                               NULL, hostname, port, dict);
1154                 gf_msg (this->name, GF_LOG_CRITICAL, 0,
1155                         GD_MSG_SERVER_QUORUM_NOT_MET,
1156                         "Server quorum not met. Rejecting operation.");
1157                 ret = 0;
1158                 goto out;
1159         }
1160
1161         gf_msg ("glusterd", GF_LOG_INFO, 0,
1162                 GD_MSG_CLI_REQ_RECVD,
1163                 "Received CLI probe req %s %d",
1164                 hostname, port);
1165
1166         if (dict_get_str(this->options,"transport.socket.bind-address",
1167                          &bind_name) == 0) {
1168                 gf_msg_debug ("glusterd", 0,
1169                         "only checking probe address vs. bind address");
1170                 ret = gf_is_same_address (bind_name, hostname);
1171         }
1172         else {
1173                 ret = gf_is_local_addr (hostname);
1174         }
1175         if (ret) {
1176                 glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST,
1177                                               NULL, hostname, port, dict);
1178                 ret = 0;
1179                 goto out;
1180         }
1181
1182         rcu_read_lock ();
1183
1184         peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
1185         ret = (peerinfo && gd_peer_has_address (peerinfo, hostname));
1186
1187         rcu_read_unlock ();
1188
1189         if (ret) {
1190                 gf_msg_debug ("glusterd", 0, "Probe host %s port %d "
1191                         "already a peer", hostname, port);
1192                 glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
1193                                               hostname, port, dict);
1194                 ret = 0;
1195                 goto out;
1196         }
1197
1198         ret = glusterd_probe_begin (req, hostname, port, dict, &op_errno);
1199
1200         if (ret == GLUSTERD_CONNECTION_AWAITED) {
1201                 //fsm should be run after connection establishes
1202                 run_fsm = _gf_false;
1203                 ret = 0;
1204
1205         } else if (ret == -1) {
1206                 glusterd_xfer_cli_probe_resp (req, -1, op_errno,
1207                                               NULL, hostname, port, dict);
1208                 goto out;
1209         }
1210
1211 out:
1212         free (cli_req.dict.dict_val);
1213
1214         if (run_fsm) {
1215                 glusterd_friend_sm ();
1216                 glusterd_op_sm ();
1217         }
1218
1219         return ret;
1220 }
1221
1222 int
1223 glusterd_handle_cli_probe (rpcsvc_request_t *req)
1224 {
1225         return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe);
1226 }
1227
1228 int
1229 __glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
1230 {
1231         int32_t                         ret = -1;
1232         gf_cli_req                  cli_req = {{0,},};
1233         uuid_t                         uuid = {0};
1234         int                        op_errno = 0;
1235         xlator_t                      *this = NULL;
1236         glusterd_conf_t               *priv = NULL;
1237         dict_t                        *dict = NULL;
1238         char                      *hostname = NULL;
1239         int                            port = 0;
1240         int                           flags = 0;
1241         glusterd_volinfo_t         *volinfo = NULL;
1242         glusterd_volinfo_t             *tmp = NULL;
1243
1244         this = THIS;
1245         GF_ASSERT (this);
1246         priv = this->private;
1247         GF_ASSERT (priv);
1248         GF_ASSERT (req);
1249
1250         ret = xdr_to_generic (req->msg[0], &cli_req,
1251                               (xdrproc_t)xdr_gf_cli_req);
1252         if (ret < 0) {
1253                 //failed to decode msg;
1254                 gf_msg (this->name, GF_LOG_ERROR, 0,
1255                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1256                         "request received from cli");
1257                 req->rpc_err = GARBAGE_ARGS;
1258                 goto out;
1259         }
1260
1261         if (cli_req.dict.dict_len) {
1262                 dict = dict_new ();
1263
1264                 ret = dict_unserialize (cli_req.dict.dict_val,
1265                                         cli_req.dict.dict_len, &dict);
1266                 if (ret < 0) {
1267                         gf_msg (this->name, GF_LOG_ERROR, 0,
1268                                 GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to "
1269                                 "unserialize req-buffer to dictionary");
1270                         goto out;
1271                 }
1272         }
1273
1274         gf_msg ("glusterd", GF_LOG_INFO, 0,
1275                 GD_MSG_CLI_REQ_RECVD,
1276                 "Received CLI deprobe req");
1277
1278         ret = dict_get_str (dict, "hostname", &hostname);
1279         if (ret) {
1280                 gf_msg (this->name, GF_LOG_ERROR, 0,
1281                         GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1282                         "Failed to get hostname");
1283                 goto out;
1284         }
1285
1286         ret = dict_get_int32 (dict, "port", &port);
1287         if (ret) {
1288                 gf_msg (this->name, GF_LOG_ERROR, 0,
1289                         GD_MSG_PORT_NOTFOUND_IN_DICT, "Failed to get port");
1290                 goto out;
1291         }
1292         ret = dict_get_int32 (dict, "flags", &flags);
1293         if (ret) {
1294                 gf_msg (this->name, GF_LOG_ERROR, 0,
1295                         GD_MSG_FLAGS_NOTFOUND_IN_DICT, "Failed to get flags");
1296                 goto out;
1297         }
1298
1299         ret = glusterd_hostname_to_uuid (hostname, uuid);
1300         if (ret) {
1301                 op_errno = GF_DEPROBE_NOT_FRIEND;
1302                 goto out;
1303         }
1304
1305         if (!gf_uuid_compare (uuid, MY_UUID)) {
1306                 op_errno = GF_DEPROBE_LOCALHOST;
1307                 ret = -1;
1308                 goto out;
1309         }
1310
1311         if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1312                 /* Check if peers are connected, except peer being
1313                 * detached*/
1314                 if (!glusterd_chk_peers_connected_befriended (uuid)) {
1315                         ret = -1;
1316                         op_errno = GF_DEPROBE_FRIEND_DOWN;
1317                         goto out;
1318                 }
1319         }
1320
1321         /* Check for if volumes exist with some bricks on the peer being
1322         * detached. It's not a problem if a volume contains none or all
1323         * of its bricks on the peer being detached
1324         */
1325         cds_list_for_each_entry_safe (volinfo, tmp, &priv->volumes,
1326                                       vol_list) {
1327                 ret = glusterd_friend_contains_vol_bricks (volinfo,
1328                                                            uuid);
1329                 if (ret == 1) {
1330                         op_errno = GF_DEPROBE_BRICK_EXIST;
1331                         goto out;
1332                 }
1333         }
1334
1335         if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1336                 if (glusterd_is_any_volume_in_server_quorum (this) &&
1337                     !does_gd_meet_server_quorum (this)) {
1338                         gf_msg (this->name, GF_LOG_CRITICAL, 0,
1339                                 GD_MSG_SERVER_QUORUM_NOT_MET,
1340                                 "Server quorum not met. Rejecting operation.");
1341                         ret = -1;
1342                         op_errno = GF_DEPROBE_QUORUM_NOT_MET;
1343                         goto out;
1344                 }
1345         }
1346
1347         if (!gf_uuid_is_null (uuid)) {
1348                 ret = glusterd_deprobe_begin (req, hostname, port, uuid, dict,
1349                                               &op_errno);
1350         } else {
1351                 ret = glusterd_deprobe_begin (req, hostname, port, NULL, dict,
1352                                               &op_errno);
1353         }
1354
1355 out:
1356         free (cli_req.dict.dict_val);
1357
1358         if (ret) {
1359                 ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, NULL,
1360                                                       hostname, dict);
1361         }
1362
1363         glusterd_friend_sm ();
1364         glusterd_op_sm ();
1365
1366         return ret;
1367 }
1368
1369 int
1370 glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
1371 {
1372         return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe);
1373 }
1374
1375 int
1376 __glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
1377 {
1378         int32_t                         ret = -1;
1379         gf1_cli_peer_list_req           cli_req = {0,};
1380         dict_t                          *dict = NULL;
1381
1382         GF_ASSERT (req);
1383
1384         ret = xdr_to_generic (req->msg[0], &cli_req,
1385                               (xdrproc_t)xdr_gf1_cli_peer_list_req);
1386         if (ret < 0) {
1387                 //failed to decode msg;
1388                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1389                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1390                         "request received from cli");
1391                 req->rpc_err = GARBAGE_ARGS;
1392                 goto out;
1393         }
1394
1395         gf_msg ("glusterd", GF_LOG_INFO, 0,
1396                 GD_MSG_CLI_REQ_RECVD,
1397                 "Received cli list req");
1398
1399         if (cli_req.dict.dict_len) {
1400                 /* Unserialize the dictionary */
1401                 dict  = dict_new ();
1402
1403                 ret = dict_unserialize (cli_req.dict.dict_val,
1404                                         cli_req.dict.dict_len,
1405                                         &dict);
1406                 if (ret < 0) {
1407                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
1408                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
1409                                 "failed to "
1410                                 "unserialize req-buffer to dictionary");
1411                         goto out;
1412                 } else {
1413                         dict->extra_stdfree = cli_req.dict.dict_val;
1414                 }
1415         }
1416
1417         ret = glusterd_list_friends (req, dict, cli_req.flags);
1418
1419 out:
1420         if (dict)
1421                 dict_unref (dict);
1422
1423         glusterd_friend_sm ();
1424         glusterd_op_sm ();
1425
1426         return ret;
1427 }
1428
1429 int
1430 glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
1431 {
1432         return glusterd_big_locked_handler (req,
1433                                             __glusterd_handle_cli_list_friends);
1434 }
1435
1436 int
1437 __glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
1438 {
1439         int32_t                         ret = -1;
1440         gf_cli_req                      cli_req = {{0,}};
1441         dict_t                          *dict = NULL;
1442         int32_t                         flags = 0;
1443
1444         GF_ASSERT (req);
1445
1446         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1447         if (ret < 0) {
1448                 //failed to decode msg;
1449                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
1450                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1451                         "request received from cli");
1452                 req->rpc_err = GARBAGE_ARGS;
1453                 goto out;
1454         }
1455
1456         gf_msg ("glusterd", GF_LOG_INFO, 0,
1457                 GD_MSG_GET_VOL_REQ_RCVD,
1458                 "Received get vol req");
1459
1460         if (cli_req.dict.dict_len) {
1461                 /* Unserialize the dictionary */
1462                 dict  = dict_new ();
1463
1464                 ret = dict_unserialize (cli_req.dict.dict_val,
1465                                         cli_req.dict.dict_len,
1466                                         &dict);
1467                 if (ret < 0) {
1468                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
1469                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
1470                                 "failed to "
1471                                 "unserialize req-buffer to dictionary");
1472                         goto out;
1473                 } else {
1474                         dict->extra_stdfree = cli_req.dict.dict_val;
1475                 }
1476         }
1477
1478         ret = dict_get_int32 (dict, "flags", &flags);
1479         if (ret) {
1480                 gf_msg (THIS->name, GF_LOG_ERROR, 0,
1481                          GD_MSG_FLAGS_NOTFOUND_IN_DICT, "failed to get flags");
1482                 goto out;
1483         }
1484
1485         ret = glusterd_get_volumes (req, dict, flags);
1486
1487 out:
1488         if (dict)
1489                 dict_unref (dict);
1490
1491         glusterd_friend_sm ();
1492         glusterd_op_sm ();
1493
1494         return ret;
1495 }
1496
1497 int
1498 glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
1499 {
1500         return glusterd_big_locked_handler (req,
1501                                             __glusterd_handle_cli_get_volume);
1502 }
1503
1504 int
1505 __glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
1506 {
1507         int                     ret     = -1;
1508         dict_t                  *dict   = NULL;
1509         xlator_t                *this   = NULL;
1510         glusterd_conf_t         *priv   = NULL;
1511         uuid_t                  uuid    = {0};
1512         gf_cli_rsp              rsp     = {0,};
1513         gf_cli_req              cli_req = {{0,}};
1514         char                    msg_str[2048] = {0,};
1515
1516         GF_ASSERT (req);
1517
1518         this = THIS;
1519         priv = this->private;
1520         GF_ASSERT (priv);
1521
1522         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1523         if (ret < 0) {
1524                 //failed to decode msg;
1525                 gf_msg (this->name, GF_LOG_ERROR, 0,
1526                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1527                         "request received from cli");
1528                 req->rpc_err = GARBAGE_ARGS;
1529                 goto out;
1530         }
1531
1532         gf_msg_debug ("glusterd", 0, "Received uuid reset req");
1533
1534         if (cli_req.dict.dict_len) {
1535                 /* Unserialize the dictionary */
1536                 dict  = dict_new ();
1537
1538                 ret = dict_unserialize (cli_req.dict.dict_val,
1539                                         cli_req.dict.dict_len,
1540                                         &dict);
1541                 if (ret < 0) {
1542                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
1543                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
1544                                 "failed to "
1545                                 "unserialize req-buffer to dictionary");
1546                         snprintf (msg_str, sizeof (msg_str), "Unable to decode "
1547                                   "the buffer");
1548                         goto out;
1549                 } else {
1550                         dict->extra_stdfree = cli_req.dict.dict_val;
1551                 }
1552         }
1553
1554         /* In the above section if dict_unserialize is successful, ret is set
1555          * to zero.
1556          */
1557         ret = -1;
1558         // Do not allow peer reset if there are any volumes in the cluster
1559         if (!cds_list_empty (&priv->volumes)) {
1560                 snprintf (msg_str, sizeof (msg_str), "volumes are already "
1561                           "present in the cluster. Resetting uuid is not "
1562                           "allowed");
1563                 gf_msg (this->name, GF_LOG_WARNING, 0,
1564                         GD_MSG_VOLS_ALREADY_PRESENT, "%s", msg_str);
1565                 goto out;
1566         }
1567
1568         // Do not allow peer reset if trusted storage pool is already formed
1569         if (!cds_list_empty (&priv->peers)) {
1570                 snprintf (msg_str, sizeof (msg_str),"trusted storage pool "
1571                           "has been already formed. Please detach this peer "
1572                           "from the pool and reset its uuid.");
1573                 gf_msg (this->name, GF_LOG_WARNING, 0,
1574                         GD_MSG_TSP_ALREADY_FORMED, "%s", msg_str);
1575                 goto out;
1576         }
1577
1578         gf_uuid_copy (uuid, priv->uuid);
1579         ret = glusterd_uuid_generate_save ();
1580
1581         if (!gf_uuid_compare (uuid, MY_UUID)) {
1582                 snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid"
1583                           " are same. Try gluster peer reset again");
1584                 gf_msg (this->name, GF_LOG_ERROR, 0,
1585                         GD_MSG_UUIDS_SAME_RETRY, "%s", msg_str);
1586                 ret = -1;
1587                 goto out;
1588         }
1589
1590 out:
1591         if (ret) {
1592                 rsp.op_ret = -1;
1593                 if (msg_str[0] == '\0')
1594                         snprintf (msg_str, sizeof (msg_str), "Operation "
1595                                   "failed");
1596                 rsp.op_errstr = msg_str;
1597                 ret = 0;
1598         } else {
1599                 rsp.op_errstr = "";
1600         }
1601
1602         glusterd_to_cli (req, &rsp, NULL, 0, NULL,
1603                          (xdrproc_t)xdr_gf_cli_rsp, dict);
1604
1605         return ret;
1606 }
1607
1608 int
1609 glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
1610 {
1611         return glusterd_big_locked_handler (req,
1612                                             __glusterd_handle_cli_uuid_reset);
1613 }
1614
1615 int
1616 __glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
1617 {
1618         int                     ret         = -1;
1619         dict_t                  *dict       = NULL;
1620         dict_t                  *rsp_dict   = NULL;
1621         xlator_t                *this       = NULL;
1622         glusterd_conf_t         *priv       = NULL;
1623         gf_cli_rsp              rsp         = {0,};
1624         gf_cli_req              cli_req     = {{0,}};
1625         char                    msg_str[2048] = {0,};
1626         char                    uuid_str[64] = {0,};
1627
1628         GF_ASSERT (req);
1629
1630         this = THIS;
1631         priv = this->private;
1632         GF_ASSERT (priv);
1633
1634         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1635         if (ret < 0) {
1636                 gf_msg (this->name, GF_LOG_ERROR, 0,
1637                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
1638                         "request received from cli");
1639                 req->rpc_err = GARBAGE_ARGS;
1640                 goto out;
1641         }
1642
1643         gf_msg_debug ("glusterd", 0, "Received uuid get req");
1644
1645         if (cli_req.dict.dict_len) {
1646                 dict  = dict_new ();
1647                 if (!dict) {
1648                         ret = -1;
1649                         goto out;
1650                 }
1651
1652                 ret = dict_unserialize (cli_req.dict.dict_val,
1653                                         cli_req.dict.dict_len,
1654                                         &dict);
1655                 if (ret < 0) {
1656                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
1657                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
1658                                 "failed to "
1659                                 "unserialize req-buffer to dictionary");
1660                         snprintf (msg_str, sizeof (msg_str), "Unable to decode "
1661                                   "the buffer");
1662                         goto out;
1663
1664                 } else {
1665                         dict->extra_stdfree = cli_req.dict.dict_val;
1666
1667                 }
1668         }
1669
1670         rsp_dict = dict_new ();
1671         if (!rsp_dict) {
1672                 ret = -1;
1673                 goto out;
1674         }
1675
1676         uuid_utoa_r (MY_UUID, uuid_str);
1677         ret = dict_set_str (rsp_dict, "uuid", uuid_str);
1678         if (ret) {
1679                 gf_msg (this->name, GF_LOG_ERROR, 0,
1680                         GD_MSG_DICT_SET_FAILED, "Failed to set uuid in "
1681                         "dictionary.");
1682                 goto out;
1683         }
1684
1685         ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
1686                                            &rsp.dict.dict_len);
1687         if (ret) {
1688                 gf_msg (this->name, GF_LOG_ERROR, 0,
1689                         GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
1690                         "Failed to serialize "
1691                         "dictionary.");
1692                 goto out;
1693         }
1694         ret = 0;
1695 out:
1696         if (ret) {
1697                 rsp.op_ret = -1;
1698                 if (msg_str[0] == '\0')
1699                         snprintf (msg_str, sizeof (msg_str), "Operation "
1700                                   "failed");
1701                 rsp.op_errstr = msg_str;
1702
1703         } else {
1704                 rsp.op_errstr = "";
1705
1706         }
1707
1708         glusterd_to_cli (req, &rsp, NULL, 0, NULL,
1709                          (xdrproc_t)xdr_gf_cli_rsp, dict);
1710
1711         return 0;
1712 }
1713 int
1714 glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
1715 {
1716         return glusterd_big_locked_handler (req,
1717                                             __glusterd_handle_cli_uuid_get);
1718 }
1719
1720 int
1721 __glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
1722 {
1723         int                     ret = -1;
1724         dict_t                  *dict = NULL;
1725         glusterd_conf_t         *priv = NULL;
1726         glusterd_volinfo_t      *volinfo = NULL;
1727         int                     count = 0;
1728         char                    key[1024] = {0,};
1729         gf_cli_rsp              rsp = {0,};
1730
1731         GF_ASSERT (req);
1732
1733         priv = THIS->private;
1734         GF_ASSERT (priv);
1735
1736         dict = dict_new ();
1737         if (!dict)
1738                 goto out;
1739
1740         cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
1741                 memset (key, 0, sizeof (key));
1742                 snprintf (key, sizeof (key), "volume%d", count);
1743                 ret = dict_set_str (dict, key, volinfo->volname);
1744                 if (ret)
1745                         goto out;
1746                 count++;
1747         }
1748
1749         ret = dict_set_int32 (dict, "count", count);
1750         if (ret)
1751                 goto out;
1752
1753         ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
1754                                            &rsp.dict.dict_len);
1755         if (ret)
1756                 goto out;
1757
1758         ret = 0;
1759
1760 out:
1761         rsp.op_ret = ret;
1762         if (ret)
1763                 rsp.op_errstr = "Error listing volumes";
1764         else
1765                 rsp.op_errstr = "";
1766
1767         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
1768                                      (xdrproc_t)xdr_gf_cli_rsp);
1769         ret = 0;
1770
1771         if (dict)
1772                 dict_unref (dict);
1773
1774         glusterd_friend_sm ();
1775         glusterd_op_sm ();
1776
1777         return ret;
1778 }
1779
1780 int
1781 glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
1782 {
1783         return glusterd_big_locked_handler (req,
1784                                             __glusterd_handle_cli_list_volume);
1785 }
1786
1787 int32_t
1788 glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
1789                    char *err_str, size_t err_len)
1790 {
1791         int             ret = -1;
1792
1793         ret = glusterd_op_txn_begin (req, op, ctx, err_str, err_len);
1794
1795         return ret;
1796 }
1797
1798 int
1799 __glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
1800 {
1801         int32_t                         ret = -1;
1802         gf_cli_req                      cli_req = { {0,} } ;
1803         dict_t                          *dict = NULL;
1804         glusterd_op_t                   cli_op = GD_OP_GANESHA;
1805         char                            *volname = NULL;
1806         char                            *op_errstr = NULL;
1807         gf_boolean_t                    help = _gf_false;
1808         char                            err_str[2048] = {0,};
1809         xlator_t                        *this = NULL;
1810
1811         this = THIS;
1812         GF_ASSERT (this);
1813
1814         GF_ASSERT (req);
1815
1816         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1817         if (ret < 0) {
1818                 snprintf (err_str, sizeof (err_str), "Failed to decode "
1819                           "request received from cli");
1820                 gf_msg (this->name, GF_LOG_ERROR, 0,
1821                         GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
1822                 req->rpc_err = GARBAGE_ARGS;
1823                 goto out;
1824         }
1825
1826         if (cli_req.dict.dict_len) {
1827                 /* Unserialize the dictionary */
1828                 dict  = dict_new ();
1829                 if (!dict) {
1830                         ret = -1;
1831                         goto out;
1832                 }
1833
1834                 ret = dict_unserialize (cli_req.dict.dict_val,
1835                                         cli_req.dict.dict_len,
1836                                         &dict);
1837                 if (ret < 0) {
1838                         gf_msg (this->name, GF_LOG_ERROR, 0,
1839                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
1840                                 "failed to "
1841                                 "unserialize req-buffer to dictionary");
1842                         snprintf (err_str, sizeof (err_str), "Unable to decode "
1843                                   "the command");
1844                         goto out;
1845                 } else {
1846                         dict->extra_stdfree = cli_req.dict.dict_val;
1847                 }
1848         }
1849
1850         gf_msg_trace (this->name, 0, "Received global option request");
1851
1852         ret = glusterd_op_begin_synctask (req, GD_OP_GANESHA, dict);
1853 out:
1854         if (ret) {
1855                 if (err_str[0] == '\0')
1856                         snprintf (err_str, sizeof (err_str),
1857                                   "Operation failed");
1858                 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
1859                                                      dict, err_str);
1860         }
1861         if (op_errstr)
1862                 GF_FREE (op_errstr);
1863         if (dict)
1864                 dict_unref(dict);
1865
1866         return ret;
1867 }
1868
1869
1870 int
1871 glusterd_handle_ganesha_cmd (rpcsvc_request_t *req)
1872 {
1873         return glusterd_big_locked_handler (req, __glusterd_handle_ganesha_cmd);
1874 }
1875
1876 int
1877 __glusterd_handle_reset_volume (rpcsvc_request_t *req)
1878 {
1879         int32_t                         ret = -1;
1880         gf_cli_req                      cli_req = {{0,}};
1881         dict_t                          *dict = NULL;
1882         glusterd_op_t                   cli_op = GD_OP_RESET_VOLUME;
1883         char                            *volname = NULL;
1884         char                            err_str[2048] = {0,};
1885         xlator_t                        *this = NULL;
1886
1887         GF_ASSERT (req);
1888         this = THIS;
1889         GF_ASSERT (this);
1890
1891         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1892         if (ret < 0) {
1893                 snprintf (err_str, sizeof (err_str), "Failed to decode request "
1894                           "received from cli");
1895                 gf_msg (this->name, GF_LOG_ERROR, 0,
1896                         GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
1897                 req->rpc_err = GARBAGE_ARGS;
1898                 goto out;
1899         }
1900
1901         if (cli_req.dict.dict_len) {
1902                 /* Unserialize the dictionary */
1903                 dict  = dict_new ();
1904
1905                 ret = dict_unserialize (cli_req.dict.dict_val,
1906                                         cli_req.dict.dict_len,
1907                                         &dict);
1908                 if (ret < 0) {
1909                         gf_msg (this->name, GF_LOG_ERROR, 0,
1910                                 GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
1911                                 "unserialize req-buffer to dictionary");
1912                         snprintf (err_str, sizeof (err_str), "Unable to decode "
1913                                   "the command");
1914                         goto out;
1915                 } else {
1916                         dict->extra_stdfree = cli_req.dict.dict_val;
1917                 }
1918         }
1919
1920         ret = dict_get_str (dict, "volname", &volname);
1921         if (ret) {
1922                 snprintf (err_str, sizeof (err_str), "Failed to get volume "
1923                           "name");
1924                 gf_msg (this->name, GF_LOG_ERROR, 0,
1925                         GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
1926                 goto out;
1927         }
1928         gf_msg_debug (this->name, 0, "Received volume reset request for "
1929                 "volume %s", volname);
1930
1931         ret = glusterd_op_begin_synctask (req, GD_OP_RESET_VOLUME, dict);
1932
1933 out:
1934         if (ret) {
1935                 if (err_str[0] == '\0')
1936                         snprintf (err_str, sizeof (err_str),
1937                                   "Operation failed");
1938                 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
1939                                                      dict, err_str);
1940         }
1941
1942         return ret;
1943 }
1944
1945 int
1946 glusterd_handle_reset_volume (rpcsvc_request_t *req)
1947 {
1948         return glusterd_big_locked_handler (req,
1949                                             __glusterd_handle_reset_volume);
1950 }
1951
1952 int
1953 __glusterd_handle_set_volume (rpcsvc_request_t *req)
1954 {
1955         int32_t                         ret = -1;
1956         gf_cli_req                      cli_req = {{0,}};
1957         dict_t                          *dict = NULL;
1958         glusterd_op_t                   cli_op = GD_OP_SET_VOLUME;
1959         char                            *key = NULL;
1960         char                            *value = NULL;
1961         char                            *volname = NULL;
1962         char                            *op_errstr = NULL;
1963         gf_boolean_t                    help = _gf_false;
1964         char                            err_str[2048] = {0,};
1965         xlator_t                        *this = NULL;
1966
1967         this = THIS;
1968         GF_ASSERT (this);
1969
1970         GF_ASSERT (req);
1971
1972         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1973         if (ret < 0) {
1974                 snprintf (err_str, sizeof (err_str), "Failed to decode "
1975                           "request received from cli");
1976                 gf_msg (this->name, GF_LOG_ERROR, 0,
1977                         GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
1978                 req->rpc_err = GARBAGE_ARGS;
1979                 goto out;
1980         }
1981
1982         if (cli_req.dict.dict_len) {
1983                 /* Unserialize the dictionary */
1984                 dict  = dict_new ();
1985
1986                 ret = dict_unserialize (cli_req.dict.dict_val,
1987                                         cli_req.dict.dict_len,
1988                                         &dict);
1989                 if (ret < 0) {
1990                         gf_msg (this->name, GF_LOG_ERROR, errno,
1991                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
1992                                 "failed to "
1993                                 "unserialize req-buffer to dictionary");
1994                         snprintf (err_str, sizeof (err_str), "Unable to decode "
1995                                   "the command");
1996                         goto out;
1997                 } else {
1998                         dict->extra_stdfree = cli_req.dict.dict_val;
1999                 }
2000         }
2001
2002         ret = dict_get_str (dict, "volname", &volname);
2003         if (ret) {
2004                 snprintf (err_str, sizeof (err_str), "Failed to get volume "
2005                           "name while handling volume set command");
2006                 gf_msg (this->name, GF_LOG_ERROR, 0,
2007                         GD_MSG_DICT_GET_FAILED, "%s", err_str);
2008                 goto out;
2009         }
2010
2011         if (strcmp (volname, "help") == 0 ||
2012             strcmp (volname, "help-xml") == 0) {
2013                 ret = glusterd_volset_help (dict, &op_errstr);
2014                 help = _gf_true;
2015                 goto out;
2016         }
2017
2018         ret = dict_get_str (dict, "key1", &key);
2019         if (ret) {
2020                 snprintf (err_str, sizeof (err_str), "Failed to get key while"
2021                           " handling volume set for %s", volname);
2022                 gf_msg (this->name, GF_LOG_ERROR, 0,
2023                         GD_MSG_DICT_GET_FAILED, "%s", err_str);
2024                 goto out;
2025         }
2026
2027         ret = dict_get_str (dict, "value1", &value);
2028         if (ret) {
2029                 snprintf (err_str, sizeof (err_str), "Failed to get value while"
2030                           " handling volume set for %s", volname);
2031                 gf_msg (this->name, GF_LOG_ERROR, 0,
2032                         GD_MSG_DICT_GET_FAILED, "%s", err_str);
2033                 goto out;
2034         }
2035         gf_msg_debug (this->name, 0, "Received volume set request for "
2036                 "volume %s", volname);
2037
2038         ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
2039
2040 out:
2041         if (help)
2042                 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, dict,
2043                                                      (op_errstr)? op_errstr:"");
2044         else if (ret) {
2045                 if (err_str[0] == '\0')
2046                         snprintf (err_str, sizeof (err_str),
2047                                   "Operation failed");
2048                 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
2049                                                      dict, err_str);
2050         }
2051         if (op_errstr)
2052                 GF_FREE (op_errstr);
2053
2054         return ret;
2055 }
2056
2057 int
2058 glusterd_handle_set_volume (rpcsvc_request_t *req)
2059 {
2060         return glusterd_big_locked_handler (req, __glusterd_handle_set_volume);
2061 }
2062
2063 int
2064 __glusterd_handle_sync_volume (rpcsvc_request_t *req)
2065 {
2066         int32_t                          ret     = -1;
2067         gf_cli_req                       cli_req = {{0,}};
2068         dict_t                           *dict = NULL;
2069         gf_cli_rsp                       cli_rsp = {0.};
2070         char                             msg[2048] = {0,};
2071         char                             *volname = NULL;
2072         gf1_cli_sync_volume              flags = 0;
2073         char                             *hostname = NULL;
2074         xlator_t                         *this = NULL;
2075
2076         GF_ASSERT (req);
2077         this = THIS;
2078         GF_ASSERT (this);
2079
2080         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2081         if (ret < 0) {
2082                 //failed to decode msg;
2083                 gf_msg (this->name, GF_LOG_ERROR, 0,
2084                         GD_MSG_REQ_DECODE_FAIL, "%s", "Failed to decode "
2085                         "request received from cli");
2086                 req->rpc_err = GARBAGE_ARGS;
2087                 goto out;
2088         }
2089
2090         if (cli_req.dict.dict_len) {
2091                 /* Unserialize the dictionary */
2092                 dict  = dict_new ();
2093
2094                 ret = dict_unserialize (cli_req.dict.dict_val,
2095                                         cli_req.dict.dict_len,
2096                                         &dict);
2097                 if (ret < 0) {
2098                         gf_msg (this->name, GF_LOG_ERROR, 0,
2099                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
2100                                 "failed to "
2101                                 "unserialize req-buffer to dictionary");
2102                         snprintf (msg, sizeof (msg), "Unable to decode the "
2103                                   "command");
2104                         goto out;
2105                 } else {
2106                         dict->extra_stdfree = cli_req.dict.dict_val;
2107                 }
2108         }
2109
2110         ret = dict_get_str (dict, "hostname", &hostname);
2111         if (ret) {
2112                 snprintf (msg, sizeof (msg), "Failed to get hostname");
2113                 gf_msg (this->name, GF_LOG_ERROR, 0,
2114                         GD_MSG_HOSTNAME_NOTFOUND_IN_DICT, "%s", msg);
2115                 goto out;
2116         }
2117
2118         ret = dict_get_str (dict, "volname", &volname);
2119         if (ret) {
2120                 ret = dict_get_int32 (dict, "flags", (int32_t*)&flags);
2121                 if (ret) {
2122                         snprintf (msg, sizeof (msg), "Failed to get volume name"
2123                                   " or flags");
2124                         gf_msg (this->name, GF_LOG_ERROR, 0,
2125                                 GD_MSG_FLAGS_NOTFOUND_IN_DICT, "%s", msg);
2126                         goto out;
2127                 }
2128         }
2129
2130         gf_msg (this->name, GF_LOG_INFO, 0,
2131                 GD_MSG_VOL_SYNC_REQ_RCVD, "Received volume sync req "
2132                 "for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
2133
2134         if (gf_is_local_addr (hostname)) {
2135                 ret = -1;
2136                 snprintf (msg, sizeof (msg), "sync from localhost"
2137                           " not allowed");
2138                 gf_msg (this->name, GF_LOG_ERROR, 0,
2139                         GD_MSG_SYNC_FROM_LOCALHOST_UNALLOWED, "%s", msg);
2140                 goto out;
2141         }
2142
2143         ret = glusterd_op_begin_synctask (req, GD_OP_SYNC_VOLUME, dict);
2144
2145 out:
2146         if (ret) {
2147                 cli_rsp.op_ret = -1;
2148                 cli_rsp.op_errstr = msg;
2149                 if (msg[0] == '\0')
2150                         snprintf (msg, sizeof (msg), "Operation failed");
2151                 glusterd_to_cli (req, &cli_rsp, NULL, 0, NULL,
2152                                  (xdrproc_t)xdr_gf_cli_rsp, dict);
2153
2154                 ret = 0; //sent error to cli, prevent second reply
2155         }
2156
2157         return ret;
2158 }
2159
2160 int
2161 glusterd_handle_sync_volume (rpcsvc_request_t *req)
2162 {
2163         return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume);
2164 }
2165
2166 int
2167 glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret,
2168                             char *op_errstr, dict_t *dict)
2169 {
2170
2171         int                             ret = -1;
2172         gf1_cli_fsm_log_rsp             rsp = {0};
2173
2174         GF_ASSERT (req);
2175         GF_ASSERT (op_errstr);
2176
2177         rsp.op_ret = op_ret;
2178         rsp.op_errstr = op_errstr;
2179         if (rsp.op_ret == 0)
2180                 ret = dict_allocate_and_serialize (dict, &rsp.fsm_log.fsm_log_val,
2181                                                 &rsp.fsm_log.fsm_log_len);
2182
2183         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2184                                      (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
2185         GF_FREE (rsp.fsm_log.fsm_log_val);
2186
2187         gf_msg_debug ("glusterd", 0, "Responded, ret: %d", ret);
2188
2189         return 0;
2190 }
2191
2192 int
2193 __glusterd_handle_fsm_log (rpcsvc_request_t *req)
2194 {
2195         int32_t                         ret = -1;
2196         gf1_cli_fsm_log_req             cli_req = {0,};
2197         dict_t                          *dict = NULL;
2198         glusterd_sm_tr_log_t            *log = NULL;
2199         xlator_t                        *this = NULL;
2200         glusterd_conf_t                 *conf = NULL;
2201         char                            msg[2048] = {0};
2202         glusterd_peerinfo_t             *peerinfo = NULL;
2203
2204         GF_ASSERT (req);
2205
2206         ret = xdr_to_generic (req->msg[0], &cli_req,
2207                               (xdrproc_t)xdr_gf1_cli_fsm_log_req);
2208         if (ret < 0) {
2209                 //failed to decode msg;
2210                 gf_msg (this->name, GF_LOG_ERROR, 0,
2211                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2212                         "request received from client.");
2213                 req->rpc_err = GARBAGE_ARGS;
2214                 snprintf (msg, sizeof (msg), "Garbage request");
2215                 goto out;
2216         }
2217
2218         dict = dict_new ();
2219         if (!dict) {
2220                 ret = -1;
2221                 goto out;
2222         }
2223
2224         if (strcmp ("", cli_req.name) == 0) {
2225                 this = THIS;
2226                 conf = this->private;
2227                 ret = glusterd_sm_tr_log_add_to_dict (dict, &conf->op_sm_log);
2228         } else {
2229                 rcu_read_lock ();
2230
2231                 peerinfo = glusterd_peerinfo_find_by_hostname (cli_req.name);
2232                 if (!peerinfo) {
2233                         ret = -1;
2234                         snprintf (msg, sizeof (msg), "%s is not a peer",
2235                                   cli_req.name);
2236                 } else {
2237                         ret = glusterd_sm_tr_log_add_to_dict
2238                                 (dict, &peerinfo->sm_log);
2239                 }
2240
2241                 rcu_read_unlock ();
2242         }
2243
2244 out:
2245         (void)glusterd_fsm_log_send_resp (req, ret, msg, dict);
2246         free (cli_req.name);//malloced by xdr
2247         if (dict)
2248                 dict_unref (dict);
2249
2250         glusterd_friend_sm ();
2251         glusterd_op_sm ();
2252
2253         return 0;//send 0 to avoid double reply
2254 }
2255
2256 int
2257 glusterd_handle_fsm_log (rpcsvc_request_t *req)
2258 {
2259         return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log);
2260 }
2261
2262 int
2263 glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
2264 {
2265
2266         gd1_mgmt_cluster_lock_rsp       rsp = {{0},};
2267         int                             ret = -1;
2268
2269         GF_ASSERT (req);
2270         glusterd_get_uuid (&rsp.uuid);
2271         rsp.op_ret = status;
2272
2273         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2274                                      (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
2275
2276         gf_msg_debug (THIS->name, 0, "Responded to lock, ret: %d", ret);
2277
2278         return 0;
2279 }
2280
2281 int
2282 glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
2283 {
2284
2285         gd1_mgmt_cluster_unlock_rsp     rsp = {{0},};
2286         int                             ret = -1;
2287
2288         GF_ASSERT (req);
2289         rsp.op_ret = status;
2290         glusterd_get_uuid (&rsp.uuid);
2291
2292         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2293                                      (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
2294
2295         gf_msg_debug (THIS->name, 0, "Responded to unlock, ret: %d", ret);
2296
2297         return ret;
2298 }
2299
2300 int
2301 glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
2302                                    int32_t status)
2303 {
2304
2305         gd1_mgmt_v3_lock_rsp    rsp = {{0},};
2306         int                     ret = -1;
2307
2308         GF_ASSERT (req);
2309         GF_ASSERT (txn_id);
2310         glusterd_get_uuid (&rsp.uuid);
2311         rsp.op_ret = status;
2312         if (rsp.op_ret)
2313                rsp.op_errno = errno;
2314         gf_uuid_copy (rsp.txn_id, *txn_id);
2315
2316         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2317                                      (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
2318
2319         gf_msg_debug (THIS->name, 0, "Responded to mgmt_v3 lock, ret: %d",
2320                 ret);
2321
2322         return ret;
2323 }
2324
2325 int
2326 glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
2327                                      int32_t status)
2328 {
2329
2330         gd1_mgmt_v3_unlock_rsp      rsp = {{0},};
2331         int                             ret = -1;
2332
2333         GF_ASSERT (req);
2334         GF_ASSERT (txn_id);
2335         rsp.op_ret = status;
2336         if (rsp.op_ret)
2337                rsp.op_errno = errno;
2338         glusterd_get_uuid (&rsp.uuid);
2339         gf_uuid_copy (rsp.txn_id, *txn_id);
2340
2341         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2342                                      (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
2343
2344         gf_msg_debug (THIS->name, 0, "Responded to mgmt_v3 unlock, ret: %d",
2345                 ret);
2346
2347         return ret;
2348 }
2349
2350 int
2351 __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
2352 {
2353         gd1_mgmt_cluster_unlock_req     unlock_req = {{0}, };
2354         int32_t                         ret = -1;
2355         glusterd_op_lock_ctx_t          *ctx = NULL;
2356         xlator_t                        *this = NULL;
2357         uuid_t                          *txn_id = NULL;
2358         glusterd_conf_t                 *priv = NULL;
2359
2360         this = THIS;
2361         GF_ASSERT (this);
2362         priv = this->private;
2363         GF_ASSERT (priv);
2364         GF_ASSERT (req);
2365
2366         txn_id = &priv->global_txn_id;
2367
2368         ret = xdr_to_generic (req->msg[0], &unlock_req,
2369                               (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
2370         if (ret < 0) {
2371                 gf_msg (this->name, GF_LOG_ERROR, 0,
2372                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode unlock "
2373                         "request received from peer");
2374                 req->rpc_err = GARBAGE_ARGS;
2375                 goto out;
2376         }
2377
2378
2379         gf_msg_debug (this->name, 0,
2380                 "Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
2381
2382         rcu_read_lock ();
2383         ret = (glusterd_peerinfo_find_by_uuid (unlock_req.uuid) == NULL);
2384         rcu_read_unlock ();
2385         if (ret) {
2386                 gf_msg (this->name, GF_LOG_WARNING, 0,
2387                         GD_MSG_PEER_NOT_FOUND, "%s doesn't "
2388                         "belong to the cluster. Ignoring request.",
2389                         uuid_utoa (unlock_req.uuid));
2390                 ret = -1;
2391                 goto out;
2392         }
2393
2394         ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
2395
2396         if (!ctx) {
2397                 //respond here
2398                 gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
2399                         GD_MSG_NO_MEMORY, "No memory.");
2400                 return -1;
2401         }
2402         gf_uuid_copy (ctx->uuid, unlock_req.uuid);
2403         ctx->req = req;
2404         ctx->dict = NULL;
2405
2406         ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);
2407
2408 out:
2409         glusterd_friend_sm ();
2410         glusterd_op_sm ();
2411
2412         return ret;
2413 }
2414
2415 int
2416 glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
2417 {
2418         return glusterd_big_locked_handler (req,
2419                                             __glusterd_handle_cluster_unlock);
2420 }
2421
2422 int
2423 glusterd_op_stage_send_resp (rpcsvc_request_t   *req,
2424                              int32_t op, int32_t status,
2425                              char *op_errstr, dict_t *rsp_dict)
2426 {
2427         gd1_mgmt_stage_op_rsp           rsp      = {{0},};
2428         int                             ret      = -1;
2429         xlator_t                       *this     = NULL;
2430
2431         this = THIS;
2432         GF_ASSERT (this);
2433         GF_ASSERT (req);
2434
2435         rsp.op_ret = status;
2436         glusterd_get_uuid (&rsp.uuid);
2437         rsp.op = op;
2438         if (op_errstr)
2439                 rsp.op_errstr = op_errstr;
2440         else
2441                 rsp.op_errstr = "";
2442
2443         ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
2444                                            &rsp.dict.dict_len);
2445         if (ret < 0) {
2446                 gf_msg (this->name, GF_LOG_ERROR, 0,
2447                         GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
2448                         "failed to get serialized length of dict");
2449                 return ret;
2450         }
2451
2452         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2453                                      (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
2454
2455         gf_msg_debug (this->name, 0, "Responded to stage, ret: %d", ret);
2456         GF_FREE (rsp.dict.dict_val);
2457
2458         return ret;
2459 }
2460
2461 int
2462 glusterd_op_commit_send_resp (rpcsvc_request_t *req,
2463                                int32_t op, int32_t status, char *op_errstr,
2464                                dict_t *rsp_dict)
2465 {
2466         gd1_mgmt_commit_op_rsp          rsp      = {{0}, };
2467         int                             ret      = -1;
2468         xlator_t                        *this = NULL;
2469
2470         this = THIS;
2471         GF_ASSERT (this);
2472         GF_ASSERT (req);
2473         rsp.op_ret = status;
2474         glusterd_get_uuid (&rsp.uuid);
2475         rsp.op = op;
2476
2477         if (op_errstr)
2478                 rsp.op_errstr = op_errstr;
2479         else
2480                 rsp.op_errstr = "";
2481
2482         if (rsp_dict) {
2483                 ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
2484                                                    &rsp.dict.dict_len);
2485                 if (ret < 0) {
2486                         gf_msg (this->name, GF_LOG_ERROR, 0,
2487                                 GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
2488                                 "failed to get serialized length of dict");
2489                         goto out;
2490                 }
2491         }
2492
2493
2494         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2495                                      (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
2496
2497         gf_msg_debug (this->name, 0, "Responded to commit, ret: %d", ret);
2498
2499 out:
2500         GF_FREE (rsp.dict.dict_val);
2501         return ret;
2502 }
2503
2504 int
2505 __glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
2506 {
2507         int32_t                 ret = -1;
2508         gd1_mgmt_friend_req     friend_req = {{0},};
2509         gf_boolean_t            run_fsm = _gf_true;
2510
2511         GF_ASSERT (req);
2512         ret = xdr_to_generic (req->msg[0], &friend_req,
2513                               (xdrproc_t)xdr_gd1_mgmt_friend_req);
2514         if (ret < 0) {
2515                 //failed to decode msg;
2516                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2517                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2518                         "request received from friend");
2519                 req->rpc_err = GARBAGE_ARGS;
2520                 goto out;
2521         }
2522
2523         gf_msg ("glusterd", GF_LOG_INFO, 0,
2524                 GD_MSG_PROBE_RCVD,
2525                 "Received probe from uuid: %s", uuid_utoa (friend_req.uuid));
2526         ret = glusterd_handle_friend_req (req, friend_req.uuid,
2527                                           friend_req.hostname, friend_req.port,
2528                                           &friend_req);
2529
2530         if (ret == GLUSTERD_CONNECTION_AWAITED) {
2531                 //fsm should be run after connection establishes
2532                 run_fsm = _gf_false;
2533                 ret = 0;
2534         }
2535
2536 out:
2537         free (friend_req.hostname);//malloced by xdr
2538
2539         if (run_fsm) {
2540                 glusterd_friend_sm ();
2541                 glusterd_op_sm ();
2542         }
2543
2544         return ret;
2545 }
2546
2547 int
2548 glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
2549 {
2550         return glusterd_big_locked_handler (req,
2551                                             __glusterd_handle_incoming_friend_req);
2552 }
2553
2554 int
2555 __glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
2556 {
2557         int32_t                 ret = -1;
2558         gd1_mgmt_friend_req     friend_req = {{0},};
2559         char               remote_hostname[UNIX_PATH_MAX + 1] = {0,};
2560
2561         GF_ASSERT (req);
2562         ret = xdr_to_generic (req->msg[0], &friend_req,
2563                               (xdrproc_t)xdr_gd1_mgmt_friend_req);
2564         if (ret < 0) {
2565                 //failed to decode msg;
2566                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2567                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2568                         "request received.");
2569                 req->rpc_err = GARBAGE_ARGS;
2570                 goto out;
2571         }
2572
2573         gf_msg ("glusterd", GF_LOG_INFO, 0,
2574                 GD_MSG_UNFRIEND_REQ_RCVD,
2575                 "Received unfriend from uuid: %s", uuid_utoa (friend_req.uuid));
2576
2577         ret = glusterd_remote_hostname_get (req, remote_hostname,
2578                                             sizeof (remote_hostname));
2579         if (ret) {
2580                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2581                         GD_MSG_HOSTNAME_RESOLVE_FAIL,
2582                         "Unable to get the remote hostname");
2583                 goto out;
2584         }
2585         ret = glusterd_handle_unfriend_req (req, friend_req.uuid,
2586                                             remote_hostname, friend_req.port);
2587
2588 out:
2589         free (friend_req.hostname);//malloced by xdr
2590         free (friend_req.vols.vols_val);//malloced by xdr
2591
2592         glusterd_friend_sm ();
2593         glusterd_op_sm ();
2594
2595         return ret;
2596 }
2597
2598 int
2599 glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
2600 {
2601         return glusterd_big_locked_handler (req,
2602                                             __glusterd_handle_incoming_unfriend_req);
2603
2604 }
2605
2606 int
2607 glusterd_handle_friend_update_delete (dict_t *dict)
2608 {
2609         char                    *hostname = NULL;
2610         int32_t                 ret = -1;
2611
2612         GF_ASSERT (dict);
2613
2614         ret = dict_get_str (dict, "hostname", &hostname);
2615         if (ret)
2616                 goto out;
2617
2618         ret = glusterd_friend_remove (NULL, hostname);
2619
2620 out:
2621         gf_msg_debug ("glusterd", 0, "Returning %d", ret);
2622         return ret;
2623 }
2624
2625 int
2626 glusterd_peer_hostname_update (glusterd_peerinfo_t *peerinfo,
2627                                const char *hostname, gf_boolean_t store_update)
2628 {
2629         int                     ret = 0;
2630
2631         GF_ASSERT (peerinfo);
2632         GF_ASSERT (hostname);
2633
2634         ret = gd_add_address_to_peer (peerinfo, hostname);
2635         if (ret) {
2636                 gf_msg (THIS->name, GF_LOG_ERROR, 0,
2637                         GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
2638                         "Couldn't add address to the peer info");
2639                 goto out;
2640         }
2641
2642         if (store_update)
2643                 ret = glusterd_store_peerinfo (peerinfo);
2644 out:
2645         gf_msg_debug (THIS->name, 0, "Returning %d", ret);
2646         return ret;
2647 }
2648
2649 int
2650 __glusterd_handle_friend_update (rpcsvc_request_t *req)
2651 {
2652         int32_t                 ret = -1;
2653         gd1_mgmt_friend_update     friend_req = {{0},};
2654         glusterd_peerinfo_t     *peerinfo = NULL;
2655         glusterd_conf_t         *priv = NULL;
2656         xlator_t                *this = NULL;
2657         gd1_mgmt_friend_update_rsp rsp = {{0},};
2658         dict_t                  *dict = NULL;
2659         char                    key[100] = {0,};
2660         char                    *uuid_buf = NULL;
2661         int                     i = 1;
2662         int                     count = 0;
2663         uuid_t                  uuid = {0,};
2664         glusterd_peerctx_args_t args = {0};
2665         int32_t                 op = 0;
2666
2667         GF_ASSERT (req);
2668
2669         this = THIS;
2670         GF_ASSERT (this);
2671         priv = this->private;
2672         GF_ASSERT (priv);
2673
2674         ret = xdr_to_generic (req->msg[0], &friend_req,
2675                               (xdrproc_t)xdr_gd1_mgmt_friend_update);
2676         if (ret < 0) {
2677                 //failed to decode msg;
2678                 gf_msg (this->name, GF_LOG_ERROR, 0,
2679                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2680                         "request received");
2681                 req->rpc_err = GARBAGE_ARGS;
2682                 goto out;
2683         }
2684
2685         ret = 0;
2686         rcu_read_lock ();
2687         if (glusterd_peerinfo_find (friend_req.uuid, NULL) == NULL) {
2688                 ret = -1;
2689         }
2690         rcu_read_unlock ();
2691         if (ret) {
2692                 gf_msg (this->name, GF_LOG_CRITICAL, 0,
2693                         GD_MSG_REQ_FROM_UNKNOWN_PEER,
2694                         "Received friend update request "
2695                         "from unknown peer %s", uuid_utoa (friend_req.uuid));
2696                 goto out;
2697         }
2698
2699         gf_msg ("glusterd", GF_LOG_INFO, 0,
2700                 GD_MSG_FRIEND_UPDATE_RCVD,
2701                 "Received friend update from uuid: %s", uuid_utoa (friend_req.uuid));
2702
2703         if (friend_req.friends.friends_len) {
2704                 /* Unserialize the dictionary */
2705                 dict  = dict_new ();
2706
2707                 ret = dict_unserialize (friend_req.friends.friends_val,
2708                                         friend_req.friends.friends_len,
2709                                         &dict);
2710                 if (ret < 0) {
2711                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
2712                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
2713                                 "failed to "
2714                                 "unserialize req-buffer to dictionary");
2715                         goto out;
2716                 } else {
2717                         dict->extra_stdfree = friend_req.friends.friends_val;
2718                 }
2719         }
2720
2721         ret = dict_get_int32 (dict, "count", &count);
2722         if (ret)
2723                 goto out;
2724
2725         ret = dict_get_int32 (dict, "op", &op);
2726         if (ret)
2727                 goto out;
2728
2729         if (GD_FRIEND_UPDATE_DEL == op) {
2730                 ret = glusterd_handle_friend_update_delete (dict);
2731                 goto out;
2732         }
2733
2734         args.mode = GD_MODE_ON;
2735         while ( i <= count) {
2736                 memset (key, 0, sizeof (key));
2737                 snprintf (key, sizeof (key), "friend%d.uuid", i);
2738                 ret = dict_get_str (dict, key, &uuid_buf);
2739                 if (ret)
2740                         goto out;
2741                 gf_uuid_parse (uuid_buf, uuid);
2742
2743                 if (!gf_uuid_compare (uuid, MY_UUID)) {
2744                         gf_msg (this->name, GF_LOG_INFO, 0,
2745                                 GD_MSG_UUID_RECEIVED,
2746                                 "Received my uuid as Friend");
2747                         i++;
2748                         continue;
2749                 }
2750
2751                 memset (key, 0, sizeof (key));
2752                 snprintf (key, sizeof (key), "friend%d", i);
2753
2754                 rcu_read_lock ();
2755                 peerinfo = glusterd_peerinfo_find (uuid, NULL);
2756                 if (peerinfo == NULL) {
2757                         /* Create a new peer and add it to the list as there is
2758                          * no existing peer with the uuid
2759                          */
2760                         peerinfo = gd_peerinfo_from_dict (dict, key);
2761                         if (peerinfo == NULL) {
2762                                 ret = -1;
2763                                 gf_msg (this->name, GF_LOG_ERROR, 0,
2764                                         GD_MSG_PEERINFO_CREATE_FAIL,
2765                                         "Could not create peerinfo from dict "
2766                                         "for prefix %s", key);
2767                                 goto unlock;
2768                         }
2769
2770                         /* As this is a new peer, it should be added as a
2771                          * friend.  The friend state machine will take care of
2772                          * correcting the state as required
2773                          */
2774                         peerinfo->state.state = GD_FRIEND_STATE_BEFRIENDED;
2775
2776                         ret = glusterd_friend_add_from_peerinfo (peerinfo, 0,
2777                                                                  &args);
2778                 } else {
2779                         /* As an existing peer was found, update it with the new
2780                          * information
2781                          */
2782                         ret = gd_update_peerinfo_from_dict (peerinfo, dict,
2783                                                             key);
2784                         if (ret) {
2785                                 gf_msg (this->name, GF_LOG_ERROR, 0,
2786                                         GD_MSG_PEER_INFO_UPDATE_FAIL,
2787                                         "Failed to "
2788                                         "update peer %s", peerinfo->hostname);
2789                                 goto unlock;
2790                         }
2791                         ret = glusterd_store_peerinfo (peerinfo);
2792                         if (ret)
2793                                 gf_msg (this->name, GF_LOG_ERROR, 0,
2794                                         GD_MSG_PEERINFO_CREATE_FAIL,
2795                                         "Failed to store peerinfo");
2796                 }
2797 unlock:
2798                 rcu_read_unlock ();
2799                 if (ret)
2800                         break;
2801
2802                 peerinfo = NULL;
2803                 i++;
2804         }
2805
2806 out:
2807         gf_uuid_copy (rsp.uuid, MY_UUID);
2808         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2809                                      (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
2810         if (dict) {
2811                 if (!dict->extra_stdfree && friend_req.friends.friends_val)
2812                         free (friend_req.friends.friends_val);//malloced by xdr
2813                 dict_unref (dict);
2814         } else {
2815                 free (friend_req.friends.friends_val);//malloced by xdr
2816         }
2817
2818         if (peerinfo)
2819                 glusterd_peerinfo_cleanup (peerinfo);
2820
2821         glusterd_friend_sm ();
2822         glusterd_op_sm ();
2823
2824         return ret;
2825 }
2826
2827 int
2828 glusterd_handle_friend_update (rpcsvc_request_t *req)
2829 {
2830         return glusterd_big_locked_handler (req,
2831                                             __glusterd_handle_friend_update);
2832 }
2833
2834 int
2835 __glusterd_handle_probe_query (rpcsvc_request_t *req)
2836 {
2837         int32_t                         ret = -1;
2838         xlator_t                        *this = NULL;
2839         glusterd_conf_t                 *conf = NULL;
2840         gd1_mgmt_probe_req              probe_req = {{0},};
2841         gd1_mgmt_probe_rsp              rsp = {{0},};
2842         glusterd_peerinfo_t             *peerinfo = NULL;
2843         glusterd_peerctx_args_t         args = {0};
2844         int                             port = 0;
2845         char               remote_hostname[UNIX_PATH_MAX + 1] = {0,};
2846
2847         GF_ASSERT (req);
2848
2849         ret = xdr_to_generic (req->msg[0], &probe_req,
2850                               (xdrproc_t)xdr_gd1_mgmt_probe_req);
2851         if (ret < 0) {
2852                 //failed to decode msg;
2853                 gf_msg (this->name, GF_LOG_ERROR, 0,
2854                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode probe "
2855                         "request");
2856                 req->rpc_err = GARBAGE_ARGS;
2857                 goto out;
2858         }
2859
2860         this = THIS;
2861
2862         conf = this->private;
2863         if (probe_req.port)
2864                 port = probe_req.port;
2865         else
2866                 port = GF_DEFAULT_BASE_PORT;
2867
2868         gf_msg ("glusterd", GF_LOG_INFO, 0,
2869                 GD_MSG_PROBE_RCVD,
2870                 "Received probe from uuid: %s", uuid_utoa (probe_req.uuid));
2871
2872         /* Check for uuid collision and handle it in a user friendly way by
2873          * sending the error.
2874          */
2875         if (!gf_uuid_compare (probe_req.uuid, MY_UUID)) {
2876                 gf_msg (THIS->name, GF_LOG_ERROR, 0,
2877                         GD_MSG_UUIDS_SAME_RETRY, "Peer uuid %s is same as "
2878                         "local uuid. Please check the uuid of both the peers "
2879                         "from %s/%s", uuid_utoa (probe_req.uuid),
2880                         GLUSTERD_DEFAULT_WORKDIR, GLUSTERD_INFO_FILE);
2881                 rsp.op_ret = -1;
2882                 rsp.op_errno = GF_PROBE_SAME_UUID;
2883                 rsp.port = port;
2884                 goto respond;
2885         }
2886
2887         ret = glusterd_remote_hostname_get (req, remote_hostname,
2888                                             sizeof (remote_hostname));
2889         if (ret) {
2890                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
2891                         GD_MSG_HOSTNAME_RESOLVE_FAIL,
2892                         "Unable to get the remote hostname");
2893                 goto out;
2894         }
2895
2896         rcu_read_lock ();
2897         peerinfo = glusterd_peerinfo_find (probe_req.uuid, remote_hostname);
2898         if ((peerinfo == NULL) && (!cds_list_empty (&conf->peers))) {
2899                 rsp.op_ret = -1;
2900                 rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
2901         } else if (peerinfo == NULL) {
2902                 gf_msg ("glusterd", GF_LOG_INFO, 0,
2903                         GD_MSG_PEER_NOT_FOUND,
2904                         "Unable to find peerinfo"
2905                         " for host: %s (%d)", remote_hostname, port);
2906                 args.mode = GD_MODE_ON;
2907                 ret = glusterd_friend_add (remote_hostname, port,
2908                                            GD_FRIEND_STATE_PROBE_RCVD,
2909                                            NULL, &peerinfo, 0, &args);
2910                 if (ret) {
2911                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
2912                                 GD_MSG_PEER_ADD_FAIL,
2913                                 "Failed to add peer %s",
2914                                 remote_hostname);
2915                         rsp.op_errno = GF_PROBE_ADD_FAILED;
2916                 }
2917         }
2918         rcu_read_unlock ();
2919
2920 respond:
2921         gf_uuid_copy (rsp.uuid, MY_UUID);
2922
2923         rsp.hostname = probe_req.hostname;
2924         rsp.op_errstr = "";
2925
2926         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
2927                                (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
2928         ret = 0;
2929
2930         gf_msg ("glusterd", GF_LOG_INFO, 0,
2931                 GD_MSG_RESPONSE_INFO, "Responded to %s, op_ret: %d, "
2932                 "op_errno: %d, ret: %d", remote_hostname,
2933                 rsp.op_ret, rsp.op_errno, ret);
2934
2935 out:
2936         free (probe_req.hostname);//malloced by xdr
2937
2938         glusterd_friend_sm ();
2939         glusterd_op_sm ();
2940
2941         return ret;
2942 }
2943
2944 int glusterd_handle_probe_query (rpcsvc_request_t *req)
2945 {
2946         return glusterd_big_locked_handler (req, __glusterd_handle_probe_query);
2947 }
2948
2949 int
2950 __glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
2951 {
2952         int32_t                         ret     = -1;
2953         gf_cli_req                      cli_req = {{0,}};
2954         dict_t                          *dict = NULL;
2955         glusterd_op_t                   cli_op = GD_OP_PROFILE_VOLUME;
2956         char                            *volname = NULL;
2957         int32_t                         op = 0;
2958         char                            err_str[2048] = {0,};
2959         xlator_t                        *this = NULL;
2960
2961         GF_ASSERT (req);
2962         this = THIS;
2963         GF_ASSERT (this);
2964
2965         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2966         if (ret < 0) {
2967                 //failed to decode msg;
2968                 gf_msg (this->name, GF_LOG_ERROR, 0,
2969                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
2970                         "request received from cli");
2971                 req->rpc_err = GARBAGE_ARGS;
2972                 goto out;
2973         }
2974
2975         if (cli_req.dict.dict_len > 0) {
2976                 dict = dict_new();
2977                 if (!dict)
2978                         goto out;
2979                 dict_unserialize (cli_req.dict.dict_val,
2980                                   cli_req.dict.dict_len, &dict);
2981         }
2982
2983         ret = dict_get_str (dict, "volname", &volname);
2984         if (ret) {
2985                 snprintf (err_str, sizeof (err_str), "Unable to get volume "
2986                           "name");
2987                 gf_msg (this->name, GF_LOG_ERROR, 0,
2988                         GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
2989                 goto out;
2990         }
2991
2992         gf_msg (this->name, GF_LOG_INFO, 0,
2993                 GD_MSG_VOL_PROFILE_REQ_RCVD,
2994                 "Received volume profile req "
2995                 "for volume %s", volname);
2996         ret = dict_get_int32 (dict, "op", &op);
2997         if (ret) {
2998                 snprintf (err_str, sizeof (err_str), "Unable to get operation");
2999                 gf_msg (this->name, GF_LOG_ERROR, 0,
3000                         GD_MSG_DICT_GET_FAILED, "%s", err_str);
3001                 goto out;
3002         }
3003
3004         ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str));
3005
3006 out:
3007         glusterd_friend_sm ();
3008         glusterd_op_sm ();
3009
3010         free (cli_req.dict.dict_val);
3011
3012         if (ret) {
3013                 if (err_str[0] == '\0')
3014                         snprintf (err_str, sizeof (err_str),
3015                                   "Operation failed");
3016                 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
3017                                                      dict, err_str);
3018         }
3019
3020         gf_msg_debug (this->name, 0, "Returning %d", ret);
3021         return ret;
3022 }
3023
3024 int
3025 glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
3026 {
3027         return glusterd_big_locked_handler (req,
3028                                             __glusterd_handle_cli_profile_volume);
3029 }
3030
3031 int
3032 __glusterd_handle_getwd (rpcsvc_request_t *req)
3033 {
3034         int32_t                 ret = -1;
3035         gf1_cli_getwd_rsp     rsp = {0,};
3036         glusterd_conf_t         *priv = NULL;
3037
3038         GF_ASSERT (req);
3039
3040         priv = THIS->private;
3041         GF_ASSERT (priv);
3042
3043         gf_msg ("glusterd", GF_LOG_INFO, 0,
3044                 GD_MSG_GETWD_REQ_RCVD, "Received getwd req");
3045
3046         rsp.wd = priv->workdir;
3047
3048         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3049                                (xdrproc_t)xdr_gf1_cli_getwd_rsp);
3050         ret = 0;
3051
3052         glusterd_friend_sm ();
3053         glusterd_op_sm ();
3054
3055         return ret;
3056 }
3057
3058 int
3059 glusterd_handle_getwd (rpcsvc_request_t *req)
3060 {
3061         return glusterd_big_locked_handler (req, __glusterd_handle_getwd);
3062 }
3063
3064 int
3065 __glusterd_handle_mount (rpcsvc_request_t *req)
3066 {
3067         gf1_cli_mount_req mnt_req = {0,};
3068         gf1_cli_mount_rsp rsp     = {0,};
3069         dict_t *dict              = NULL;
3070         int ret                   = 0;
3071         glusterd_conf_t     *priv   = NULL;
3072
3073         GF_ASSERT (req);
3074         priv = THIS->private;
3075
3076         ret = xdr_to_generic (req->msg[0], &mnt_req,
3077                               (xdrproc_t)xdr_gf1_cli_mount_req);
3078         if (ret < 0) {
3079                 //failed to decode msg;
3080                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3081                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode mount "
3082                         "request received");
3083                 req->rpc_err = GARBAGE_ARGS;
3084                 rsp.op_ret = -1;
3085                 rsp.op_errno = EINVAL;
3086                 goto out;
3087         }
3088
3089         gf_msg ("glusterd", GF_LOG_INFO, 0,
3090                 GD_MSG_MOUNT_REQ_RCVD,
3091                 "Received mount req");
3092
3093         if (mnt_req.dict.dict_len) {
3094                 /* Unserialize the dictionary */
3095                 dict  = dict_new ();
3096
3097                 ret = dict_unserialize (mnt_req.dict.dict_val,
3098                                         mnt_req.dict.dict_len,
3099                                         &dict);
3100                 if (ret < 0) {
3101                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
3102                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
3103                                 "failed to "
3104                                 "unserialize req-buffer to dictionary");
3105                         rsp.op_ret = -1;
3106                         rsp.op_errno = -EINVAL;
3107                         goto out;
3108                 } else {
3109                         dict->extra_stdfree = mnt_req.dict.dict_val;
3110                 }
3111         }
3112
3113         synclock_unlock (&priv->big_lock);
3114         rsp.op_ret = glusterd_do_mount (mnt_req.label, dict,
3115                                         &rsp.path, &rsp.op_errno);
3116         synclock_lock (&priv->big_lock);
3117
3118  out:
3119         if (!rsp.path)
3120                 rsp.path = "";
3121
3122         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3123                                (xdrproc_t)xdr_gf1_cli_mount_rsp);
3124         ret = 0;
3125
3126         if (dict)
3127                 dict_unref (dict);
3128         if (*rsp.path)
3129                 GF_FREE (rsp.path);
3130
3131         glusterd_friend_sm ();
3132         glusterd_op_sm ();
3133
3134         return ret;
3135 }
3136
3137 int
3138 glusterd_handle_mount (rpcsvc_request_t *req)
3139 {
3140         return glusterd_big_locked_handler (req, __glusterd_handle_mount);
3141 }
3142
3143 int
3144 __glusterd_handle_umount (rpcsvc_request_t *req)
3145 {
3146         gf1_cli_umount_req umnt_req = {0,};
3147         gf1_cli_umount_rsp rsp      = {0,};
3148         char *mountbroker_root      = NULL;
3149         char mntp[PATH_MAX]         = {0,};
3150         char *path                  = NULL;
3151         runner_t runner             = {0,};
3152         int ret                     = 0;
3153         xlator_t *this              = THIS;
3154         gf_boolean_t dir_ok         = _gf_false;
3155         char *pdir                  = NULL;
3156         char *t                     = NULL;
3157         glusterd_conf_t     *priv   = NULL;
3158
3159         GF_ASSERT (req);
3160         GF_ASSERT (this);
3161         priv = this->private;
3162
3163         ret = xdr_to_generic (req->msg[0], &umnt_req,
3164                               (xdrproc_t)xdr_gf1_cli_umount_req);
3165         if (ret < 0) {
3166                 //failed to decode msg;
3167                 gf_msg (this->name, GF_LOG_ERROR, 0,
3168                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode umount"
3169                         "request");
3170                 req->rpc_err = GARBAGE_ARGS;
3171                 rsp.op_ret = -1;
3172                 goto out;
3173         }
3174
3175         gf_msg ("glusterd", GF_LOG_INFO, 0,
3176                 GD_MSG_UMOUNT_REQ_RCVD,
3177                 "Received umount req");
3178
3179         if (dict_get_str (this->options, "mountbroker-root",
3180                           &mountbroker_root) != 0) {
3181                 rsp.op_errno = ENOENT;
3182                 goto out;
3183         }
3184
3185         /* check if it is allowed to umount path */
3186         path = gf_strdup (umnt_req.path);
3187         if (!path) {
3188                 rsp.op_errno = ENOMEM;
3189                 goto out;
3190         }
3191         dir_ok = _gf_false;
3192         pdir = dirname (path);
3193         t = strtail (pdir, mountbroker_root);
3194         if (t && *t == '/') {
3195                 t = strtail(++t, MB_HIVE);
3196                 if (t && !*t)
3197                         dir_ok = _gf_true;
3198         }
3199         GF_FREE (path);
3200         if (!dir_ok) {
3201                 rsp.op_errno = EACCES;
3202                 goto out;
3203         }
3204
3205         synclock_unlock (&priv->big_lock);
3206
3207         if (umnt_req.lazy) {
3208                 rsp.op_ret = gf_umount_lazy (this->name, umnt_req.path, 0);
3209         } else {
3210                 runinit (&runner);
3211                 runner_add_args (&runner, _PATH_UMOUNT, umnt_req.path, NULL);
3212                 rsp.op_ret = runner_run (&runner);
3213         }
3214
3215         synclock_lock (&priv->big_lock);
3216         if (rsp.op_ret == 0) {
3217                 if (realpath (umnt_req.path, mntp))
3218                         rmdir (mntp);
3219                 else {
3220                         rsp.op_ret = -1;
3221                         rsp.op_errno = errno;
3222                 }
3223                 if (unlink (umnt_req.path) != 0) {
3224                         rsp.op_ret = -1;
3225                         rsp.op_errno = errno;
3226                 }
3227         }
3228
3229  out:
3230         if (rsp.op_errno)
3231                 rsp.op_ret = -1;
3232
3233         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3234                                (xdrproc_t)xdr_gf1_cli_umount_rsp);
3235         ret = 0;
3236
3237         glusterd_friend_sm ();
3238         glusterd_op_sm ();
3239
3240         return ret;
3241 }
3242
3243 int
3244 glusterd_handle_umount (rpcsvc_request_t *req)
3245 {
3246         return glusterd_big_locked_handler (req, __glusterd_handle_umount);
3247 }
3248
3249 int
3250 glusterd_friend_remove (uuid_t uuid, char *hostname)
3251 {
3252         int                           ret = -1;
3253         glusterd_peerinfo_t           *peerinfo = NULL;
3254
3255         rcu_read_lock ();
3256
3257         peerinfo = glusterd_peerinfo_find (uuid, hostname);
3258         if (peerinfo == NULL) {
3259                 rcu_read_unlock ();
3260                 goto out;
3261         }
3262
3263         ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
3264         if (ret)
3265                 gf_msg (THIS->name, GF_LOG_WARNING, 0,
3266                         GD_MSG_VOL_CLEANUP_FAIL, "Volumes cleanup failed");
3267         rcu_read_unlock ();
3268         /* Giving up the critical section here as glusterd_peerinfo_cleanup must
3269          * be called from outside a critical section
3270          */
3271         ret = glusterd_peerinfo_cleanup (peerinfo);
3272 out:
3273         gf_msg_debug (THIS->name, 0, "returning %d", ret);
3274         return ret;
3275 }
3276
3277 int
3278 glusterd_rpc_create (struct rpc_clnt **rpc,
3279                      dict_t *options,
3280                      rpc_clnt_notify_t notify_fn,
3281                      void *notify_data)
3282 {
3283         struct rpc_clnt         *new_rpc = NULL;
3284         int                     ret = -1;
3285         xlator_t                *this = NULL;
3286
3287         this = THIS;
3288         GF_ASSERT (this);
3289
3290         GF_ASSERT (options);
3291
3292         /* TODO: is 32 enough? or more ? */
3293         new_rpc = rpc_clnt_new (options, this, this->name, 16);
3294         if (!new_rpc)
3295                 goto out;
3296
3297         ret = rpc_clnt_register_notify (new_rpc, notify_fn, notify_data);
3298         *rpc = new_rpc;
3299         if (ret)
3300                 goto out;
3301         ret = rpc_clnt_start (new_rpc);
3302 out:
3303         if (ret) {
3304                 if (new_rpc) {
3305                         (void) rpc_clnt_unref (new_rpc);
3306                 }
3307         }
3308
3309         gf_msg_debug (this->name, 0, "returning %d", ret);
3310         return ret;
3311 }
3312
3313 int
3314 glusterd_transport_keepalive_options_get (int *interval, int *time,
3315                                           int *timeout)
3316 {
3317         int     ret = 0;
3318         xlator_t *this = NULL;
3319
3320         this = THIS;
3321         GF_ASSERT (this);
3322
3323         ret = dict_get_int32 (this->options,
3324                               "transport.socket.keepalive-interval",
3325                               interval);
3326         ret = dict_get_int32 (this->options,
3327                               "transport.socket.keepalive-time",
3328                               time);
3329         ret = dict_get_int32 (this->options,
3330                               "transport.tcp-user-timeout",
3331                               timeout);
3332         return 0;
3333 }
3334
3335 int
3336 glusterd_transport_inet_options_build (dict_t **options, const char *hostname,
3337                                        int port)
3338 {
3339         dict_t  *dict = NULL;
3340         int32_t interval = -1;
3341         int32_t time     = -1;
3342         int32_t timeout  = -1;
3343         int     ret = 0;
3344
3345         GF_ASSERT (options);
3346         GF_ASSERT (hostname);
3347
3348         if (!port)
3349                 port = GLUSTERD_DEFAULT_PORT;
3350
3351         /* Build default transport options */
3352         ret = rpc_transport_inet_options_build (&dict, hostname, port);
3353         if (ret)
3354                 goto out;
3355
3356         /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long
3357          * when compared to 2 mins for cli timeout. This ensures users don't
3358          * wait too long after cli timesout before being able to resume normal
3359          * operations
3360          */
3361         ret = dict_set_int32 (dict, "frame-timeout", 600);
3362         if (ret) {
3363                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3364                         GD_MSG_DICT_SET_FAILED,
3365                         "Failed to set frame-timeout");
3366                 goto out;
3367         }
3368
3369         /* Set keepalive options */
3370         glusterd_transport_keepalive_options_get (&interval, &time, &timeout);
3371
3372         if ((interval > 0) || (time > 0))
3373                 ret = rpc_transport_keepalive_options_set (dict, interval,
3374                                                            time, timeout);
3375         *options = dict;
3376 out:
3377         gf_msg_debug ("glusterd", 0, "Returning %d", ret);
3378         return ret;
3379 }
3380
3381 int
3382 glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
3383                             glusterd_peerctx_args_t *args)
3384 {
3385         dict_t                   *options = NULL;
3386         int                       ret     = -1;
3387         glusterd_peerctx_t       *peerctx = NULL;
3388         data_t                   *data    = NULL;
3389
3390         peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t);
3391         if (!peerctx)
3392                 goto out;
3393
3394         if (args)
3395                 peerctx->args = *args;
3396
3397         gf_uuid_copy (peerctx->peerid, peerinfo->uuid);
3398         peerctx->peername = gf_strdup (peerinfo->hostname);
3399         peerctx->peerinfo_gen = peerinfo->generation; /* A peerinfos generation
3400                                                          number can be used to
3401                                                          uniquely identify a
3402                                                          peerinfo */
3403
3404         ret = glusterd_transport_inet_options_build (&options,
3405                                                      peerinfo->hostname,
3406                                                      peerinfo->port);
3407         if (ret)
3408                 goto out;
3409
3410         /*
3411          * For simulated multi-node testing, we need to make sure that we
3412          * create our RPC endpoint with the same address that the peer would
3413          * use to reach us.
3414          */
3415         if (this->options) {
3416                 data = dict_get(this->options,"transport.socket.bind-address");
3417                 if (data) {
3418                         ret = dict_set(options,
3419                                        "transport.socket.source-addr",data);
3420                 }
3421                 data = dict_get(this->options,"ping-timeout");
3422                 if (data) {
3423                         ret = dict_set(options,
3424                                        "ping-timeout",data);
3425                 }
3426         }
3427
3428         /* Enable encryption for the client connection if management encryption
3429          * is enabled
3430          */
3431         if (this->ctx->secure_mgmt) {
3432                 ret = dict_set_str (options, "transport.socket.ssl-enabled",
3433                                     "on");
3434                 if (ret) {
3435                         gf_msg ("glusterd", GF_LOG_ERROR, 0,
3436                                 GD_MSG_DICT_SET_FAILED,
3437                                 "failed to set ssl-enabled in dict");
3438                         goto out;
3439                 }
3440         }
3441
3442         ret = glusterd_rpc_create (&peerinfo->rpc, options,
3443                                    glusterd_peer_rpc_notify, peerctx);
3444         if (ret) {
3445                 gf_msg (this->name, GF_LOG_ERROR, 0,
3446                         GD_MSG_RPC_CREATE_FAIL,
3447                         "failed to create rpc for"
3448                         " peer %s", peerinfo->hostname);
3449                 goto out;
3450         }
3451         peerctx = NULL;
3452         ret = 0;
3453 out:
3454         GF_FREE (peerctx);
3455         return ret;
3456 }
3457
3458 int
3459 glusterd_friend_add (const char *hoststr, int port,
3460                      glusterd_friend_sm_state_t state,
3461                      uuid_t *uuid,
3462                      glusterd_peerinfo_t **friend,
3463                      gf_boolean_t restore,
3464                      glusterd_peerctx_args_t *args)
3465 {
3466         int                     ret = 0;
3467         xlator_t               *this = NULL;
3468         glusterd_conf_t        *conf = NULL;
3469
3470         this = THIS;
3471         conf = this->private;
3472         GF_ASSERT (conf);
3473         GF_ASSERT (hoststr);
3474         GF_ASSERT (friend);
3475
3476         *friend = glusterd_peerinfo_new (state, uuid, hoststr, port);
3477         if (*friend == NULL) {
3478                 ret = -1;
3479                 goto out;
3480         }
3481
3482         /*
3483          * We can't add to the list after calling glusterd_friend_rpc_create,
3484          * even if it succeeds, because by then the callback to take it back
3485          * off and free might have happened already (notably in the case of an
3486          * invalid peer name).  That would mean we're adding something that had
3487          * just been free, and we're likely to crash later.
3488          */
3489         cds_list_add_tail_rcu (&(*friend)->uuid_list, &conf->peers);
3490
3491         //restore needs to first create the list of peers, then create rpcs
3492         //to keep track of quorum in race-free manner. In restore for each peer
3493         //rpc-create calls rpc_notify when the friend-list is partially
3494         //constructed, leading to wrong quorum calculations.
3495         if (!restore) {
3496                 ret = glusterd_store_peerinfo (*friend);
3497                 if (ret == 0) {
3498                         ret = glusterd_friend_rpc_create (this, *friend, args);
3499                 }
3500                 else {
3501                         gf_msg (this->name, GF_LOG_ERROR, 0,
3502                                 GD_MSG_PEERINFO_CREATE_FAIL,
3503                                 "Failed to store peerinfo");
3504                 }
3505         }
3506
3507         if (ret) {
3508                 (void) glusterd_peerinfo_cleanup (*friend);
3509                 *friend = NULL;
3510         }
3511
3512 out:
3513         gf_msg (this->name, GF_LOG_INFO, 0,
3514                 GD_MSG_CONNECT_RETURNED, "connect returned %d", ret);
3515         return ret;
3516 }
3517
3518 /* glusterd_friend_add_from_peerinfo() adds a new peer into the local friends
3519  * list from a pre created @peerinfo object. It otherwise works similarly to
3520  * glusterd_friend_add()
3521  */
3522 int
3523 glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend,
3524                                    gf_boolean_t restore,
3525                                    glusterd_peerctx_args_t *args)
3526 {
3527         int                     ret = 0;
3528         xlator_t               *this = NULL;
3529         glusterd_conf_t        *conf = NULL;
3530
3531         this = THIS;
3532         conf = this->private;
3533         GF_ASSERT (conf);
3534
3535         GF_VALIDATE_OR_GOTO (this->name, (friend != NULL), out);
3536
3537         /*
3538          * We can't add to the list after calling glusterd_friend_rpc_create,
3539          * even if it succeeds, because by then the callback to take it back
3540          * off and free might have happened already (notably in the case of an
3541          * invalid peer name).  That would mean we're adding something that had
3542          * just been free, and we're likely to crash later.
3543          */
3544         cds_list_add_tail_rcu (&friend->uuid_list, &conf->peers);
3545
3546         //restore needs to first create the list of peers, then create rpcs
3547         //to keep track of quorum in race-free manner. In restore for each peer
3548         //rpc-create calls rpc_notify when the friend-list is partially
3549         //constructed, leading to wrong quorum calculations.
3550         if (!restore) {
3551                 ret = glusterd_store_peerinfo (friend);
3552                 if (ret == 0) {
3553                         ret = glusterd_friend_rpc_create (this, friend, args);
3554                 }
3555                 else {
3556                         gf_msg (this->name, GF_LOG_ERROR, 0,
3557                                 GD_MSG_PEERINFO_CREATE_FAIL,
3558                                 "Failed to store peerinfo");
3559                 }
3560         }
3561
3562 out:
3563         gf_msg (this->name, GF_LOG_INFO, 0,
3564                 GD_MSG_CONNECT_RETURNED,
3565                 "connect returned %d", ret);
3566         return ret;
3567 }
3568
3569 int
3570 glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
3571                       dict_t *dict, int *op_errno)
3572 {
3573         int                             ret = -1;
3574         glusterd_peerinfo_t             *peerinfo = NULL;
3575         glusterd_peerctx_args_t         args = {0};
3576         glusterd_friend_sm_event_t      *event = NULL;
3577
3578         GF_ASSERT (hoststr);
3579
3580         rcu_read_lock ();
3581         peerinfo = glusterd_peerinfo_find (NULL, hoststr);
3582
3583         if (peerinfo == NULL) {
3584                 gf_msg ("glusterd", GF_LOG_INFO, 0,
3585                         GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
3586                         " for host: %s (%d)", hoststr, port);
3587                 args.mode = GD_MODE_ON;
3588                 args.req  = req;
3589                 args.dict = dict;
3590                 ret = glusterd_friend_add (hoststr, port,
3591                                            GD_FRIEND_STATE_DEFAULT,
3592                                            NULL, &peerinfo, 0, &args);
3593                 if ((!ret) && (!peerinfo->connected)) {
3594                         ret = GLUSTERD_CONNECTION_AWAITED;
3595                 }
3596
3597         } else if (peerinfo->connected &&
3598                    (GD_FRIEND_STATE_BEFRIENDED == peerinfo->state.state)) {
3599                 if (peerinfo->detaching) {
3600                         ret = -1;
3601                         if (op_errno)
3602                                 *op_errno = GF_PROBE_FRIEND_DETACHING;
3603                         goto out;
3604                 }
3605                 ret = glusterd_peer_hostname_update (peerinfo, hoststr,
3606                                                      _gf_false);
3607                 if (ret)
3608                         goto out;
3609                 //this is just to rename so inject local acc for cluster update
3610                 ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_LOCAL_ACC,
3611                                                     &event);
3612                 if (!ret) {
3613                         event->peername = gf_strdup (peerinfo->hostname);
3614                         gf_uuid_copy (event->peerid, peerinfo->uuid);
3615
3616                         ret = glusterd_friend_sm_inject_event (event);
3617                         glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS,
3618                                                       NULL, (char*)hoststr,
3619                                                       port, dict);
3620                 }
3621         } else {
3622                 glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
3623                                               (char*)hoststr, port, dict);
3624         }
3625
3626 out:
3627         rcu_read_unlock ();
3628         gf_msg_debug ("glusterd", 0, "returning %d", ret);
3629         return ret;
3630 }
3631
3632 int
3633 glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
3634                         uuid_t uuid, dict_t *dict, int *op_errno)
3635 {
3636         int                             ret = -1;
3637         glusterd_peerinfo_t             *peerinfo = NULL;
3638         glusterd_friend_sm_event_t      *event = NULL;
3639         glusterd_probe_ctx_t            *ctx = NULL;
3640
3641         GF_ASSERT (hoststr);
3642         GF_ASSERT (req);
3643
3644         rcu_read_lock ();
3645
3646         peerinfo = glusterd_peerinfo_find (uuid, hoststr);
3647         if (peerinfo == NULL) {
3648                 ret = -1;
3649                 gf_msg ("glusterd", GF_LOG_INFO, 0,
3650                         GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
3651                         " for host: %s %d", hoststr, port);
3652                 goto out;
3653         }
3654
3655         if (!peerinfo->rpc) {
3656                 //handle this case
3657                 goto out;
3658         }
3659
3660         if (peerinfo->detaching) {
3661                 ret = -1;
3662                 if (op_errno)
3663                         *op_errno = GF_DEPROBE_FRIEND_DETACHING;
3664                 goto out;
3665         }
3666
3667         ret = glusterd_friend_sm_new_event
3668                 (GD_FRIEND_EVENT_INIT_REMOVE_FRIEND, &event);
3669
3670         if (ret) {
3671                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3672                         GD_MSG_EVENT_NEW_GET_FAIL,
3673                         "Unable to get new event");
3674                 goto out;
3675         }
3676
3677         ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
3678
3679         if (!ctx) {
3680                 goto out;
3681         }
3682
3683         ctx->hostname = gf_strdup (hoststr);
3684         ctx->port = port;
3685         ctx->req = req;
3686         ctx->dict = dict;
3687
3688         event->ctx = ctx;
3689
3690         event->peername = gf_strdup (hoststr);
3691         gf_uuid_copy (event->peerid, uuid);
3692
3693         ret = glusterd_friend_sm_inject_event (event);
3694
3695         if (ret) {
3696                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
3697                         GD_MSG_EVENT_INJECT_FAIL, "Unable to inject event %d, "
3698                         "ret = %d", event->event, ret);
3699                 goto out;
3700         }
3701         peerinfo->detaching = _gf_true;
3702
3703 out:
3704         rcu_read_unlock ();
3705         return ret;
3706 }
3707
3708
3709 int
3710 glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int port)
3711 {
3712         gd1_mgmt_friend_rsp  rsp = {{0}, };
3713         int32_t              ret = -1;
3714         xlator_t             *this = NULL;
3715         glusterd_conf_t      *conf = NULL;
3716
3717         GF_ASSERT (hostname);
3718
3719         rsp.op_ret = 0;
3720         this = THIS;
3721         GF_ASSERT (this);
3722
3723         conf = this->private;
3724
3725         gf_uuid_copy (rsp.uuid, MY_UUID);
3726         rsp.hostname = hostname;
3727         rsp.port = port;
3728         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3729                                      (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3730
3731         gf_msg ("glusterd", GF_LOG_INFO, 0,
3732                 GD_MSG_RESPONSE_INFO,
3733                 "Responded to %s (%d), ret: %d", hostname, port, ret);
3734         return ret;
3735 }
3736
3737
3738 int
3739 glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
3740                                char *remote_hostname, int port, int32_t op_ret,
3741                                int32_t op_errno)
3742 {
3743         gd1_mgmt_friend_rsp  rsp = {{0}, };
3744         int32_t              ret = -1;
3745         xlator_t             *this = NULL;
3746         glusterd_conf_t      *conf = NULL;
3747
3748         GF_ASSERT (myhostname);
3749
3750         this = THIS;
3751         GF_ASSERT (this);
3752
3753         conf = this->private;
3754
3755         gf_uuid_copy (rsp.uuid, MY_UUID);
3756         rsp.op_ret = op_ret;
3757         rsp.op_errno = op_errno;
3758         rsp.hostname = gf_strdup (myhostname);
3759         rsp.port = port;
3760
3761         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3762                                      (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3763
3764         gf_msg ("glusterd", GF_LOG_INFO, 0,
3765                 GD_MSG_RESPONSE_INFO,
3766                 "Responded to %s (%d), ret: %d", remote_hostname, port, ret);
3767         GF_FREE (rsp.hostname);
3768         return ret;
3769 }
3770
3771 static void
3772 set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
3773                      size_t len, char *hostname, int port)
3774 {
3775         if ((op_errstr) && (strcmp (op_errstr, ""))) {
3776                 snprintf (errstr, len, "%s", op_errstr);
3777                 return;
3778         }
3779
3780         if (!op_ret) {
3781                 switch (op_errno) {
3782                         case GF_PROBE_LOCALHOST:
3783                                 snprintf (errstr, len, "Probe on localhost not "
3784                                           "needed");
3785                                 break;
3786
3787                         case GF_PROBE_FRIEND:
3788                                 snprintf (errstr, len, "Host %s port %d already"
3789                                           " in peer list", hostname, port);
3790                                 break;
3791
3792                         case GF_PROBE_FRIEND_DETACHING:
3793                                 snprintf (errstr, len, "Peer is already being "
3794                                           "detached from cluster.\n"
3795                                           "Check peer status by running "
3796                                           "gluster peer status");
3797                                 break;
3798                         default:
3799                                 if (op_errno != 0)
3800                                         snprintf (errstr, len, "Probe returned "
3801                                                   "with %s",
3802                                                   strerror (op_errno));
3803                                 break;
3804                 }
3805         } else {
3806                 switch (op_errno) {
3807                         case GF_PROBE_ANOTHER_CLUSTER:
3808                                 snprintf (errstr, len, "%s is already part of "
3809                                           "another cluster", hostname);
3810                                 break;
3811
3812                         case GF_PROBE_VOLUME_CONFLICT:
3813                                 snprintf (errstr, len, "Atleast one volume on "
3814                                           "%s conflicts with existing volumes "
3815                                           "in the cluster", hostname);
3816                                 break;
3817
3818                         case GF_PROBE_UNKNOWN_PEER:
3819                                 snprintf (errstr, len, "%s responded with "
3820                                           "'unknown peer' error, this could "
3821                                           "happen if %s doesn't have localhost "
3822                                           "in its peer database", hostname,
3823                                           hostname);
3824                                 break;
3825
3826                         case GF_PROBE_ADD_FAILED:
3827                                 snprintf (errstr, len, "Failed to add peer "
3828                                           "information on %s", hostname);
3829                                 break;
3830
3831                         case GF_PROBE_SAME_UUID:
3832                                 snprintf (errstr, len, "Peer uuid (host %s) is "
3833                                           "same as local uuid", hostname);
3834                                 break;
3835
3836                         case GF_PROBE_QUORUM_NOT_MET:
3837                                 snprintf (errstr, len, "Cluster quorum is not "
3838                                           "met. Changing peers is not allowed "
3839                                           "in this state");
3840                                 break;
3841
3842                         case GF_PROBE_MISSED_SNAP_CONFLICT:
3843                                 snprintf (errstr, len, "Failed to update "
3844                                           "list of missed snapshots from "
3845                                           "peer %s", hostname);
3846                                 break;
3847
3848                         case GF_PROBE_SNAP_CONFLICT:
3849                                 snprintf (errstr, len, "Conflict in comparing "
3850                                           "list of snapshots from "
3851                                           "peer %s", hostname);
3852                                 break;
3853
3854                         default:
3855                                 snprintf (errstr, len, "Probe returned with "
3856                                           "%s", strerror (op_errno));
3857                                 break;
3858                 }
3859         }
3860 }
3861
3862 int
3863 glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
3864                               int32_t op_errno, char *op_errstr, char *hostname,
3865                               int port, dict_t *dict)
3866 {
3867         gf_cli_rsp           rsp = {0,};
3868         int32_t              ret = -1;
3869         char        errstr[2048] = {0,};
3870         char            *cmd_str = NULL;
3871         xlator_t           *this = THIS;
3872
3873         GF_ASSERT (req);
3874         GF_ASSERT (this);
3875
3876         (void) set_probe_error_str (op_ret, op_errno, op_errstr, errstr,
3877                                     sizeof (errstr), hostname, port);
3878
3879         if (dict) {
3880                 ret = dict_get_str (dict, "cmd-str", &cmd_str);
3881                 if (ret)
3882                         gf_msg (this->name, GF_LOG_ERROR, 0,
3883                                 GD_MSG_CMDSTR_NOTFOUND_IN_DICT, "Failed to get "
3884                                 "command string");
3885         }
3886
3887         rsp.op_ret = op_ret;
3888         rsp.op_errno = op_errno;
3889         rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
3890
3891         gf_cmd_log ("", "%s : %s %s %s", cmd_str,
3892                     (op_ret) ? "FAILED" : "SUCCESS",
3893                     (errstr[0] != '\0') ? ":" : " ",
3894                     (errstr[0] != '\0') ? errstr : " ");
3895
3896         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3897                                      (xdrproc_t)xdr_gf_cli_rsp);
3898
3899         if (dict)
3900                 dict_unref (dict);
3901         gf_msg_debug (this->name, 0, "Responded to CLI, ret: %d", ret);
3902
3903         return ret;
3904 }
3905
3906 static void
3907 set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
3908                        size_t len, char *hostname)
3909 {
3910         if ((op_errstr) && (strcmp (op_errstr, ""))) {
3911                 snprintf (errstr, len, "%s", op_errstr);
3912                 return;
3913         }
3914
3915         if (op_ret) {
3916                 switch (op_errno) {
3917                         case GF_DEPROBE_LOCALHOST:
3918                                 snprintf (errstr, len, "%s is localhost",
3919                                 hostname);
3920                                 break;
3921
3922                         case GF_DEPROBE_NOT_FRIEND:
3923                                 snprintf (errstr, len, "%s is not part of "
3924                                           "cluster", hostname);
3925                                 break;
3926
3927                         case GF_DEPROBE_BRICK_EXIST:
3928                                 snprintf (errstr, len, "Brick(s) with the peer "
3929                                           "%s exist in cluster", hostname);
3930                                 break;
3931
3932                         case GF_DEPROBE_FRIEND_DOWN:
3933                                 snprintf (errstr, len, "One of the peers is "
3934                                           "probably down. Check with "
3935                                           "'peer status'");
3936                                 break;
3937
3938                         case GF_DEPROBE_QUORUM_NOT_MET:
3939                                 snprintf (errstr, len, "Cluster quorum is not "
3940                                           "met. Changing peers is not allowed "
3941                                           "in this state");
3942                                 break;
3943
3944                         case GF_DEPROBE_FRIEND_DETACHING:
3945                                 snprintf (errstr, len, "Peer is already being "
3946                                           "detached from cluster.\n"
3947                                           "Check peer status by running "
3948                                           "gluster peer status");
3949                                 break;
3950                         default:
3951                                 snprintf (errstr, len, "Detach returned with "
3952                                           "%s", strerror (op_errno));
3953                                 break;
3954
3955                 }
3956         }
3957 }
3958
3959
3960 int
3961 glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret,
3962                                 int32_t op_errno, char *op_errstr,
3963                                 char *hostname, dict_t *dict)
3964 {
3965         gf_cli_rsp             rsp = {0,};
3966         int32_t                ret = -1;
3967         char              *cmd_str = NULL;
3968         char          errstr[2048] = {0,};
3969
3970         GF_ASSERT (req);
3971
3972         (void) set_deprobe_error_str (op_ret, op_errno, op_errstr, errstr,
3973                                       sizeof (errstr), hostname);
3974
3975         if (dict) {
3976                 ret = dict_get_str (dict, "cmd-str", &cmd_str);
3977                 if (ret)
3978                         gf_msg (THIS->name, GF_LOG_ERROR, 0,
3979                                 GD_MSG_CMDSTR_NOTFOUND_IN_DICT, "Failed to get "
3980                                 "command string");
3981         }
3982
3983         rsp.op_ret = op_ret;
3984         rsp.op_errno = op_errno;
3985         rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
3986
3987         gf_cmd_log ("", "%s : %s %s %s", cmd_str,
3988                     (op_ret) ? "FAILED" : "SUCCESS",
3989                     (errstr[0] != '\0') ? ":" : " ",
3990                     (errstr[0] != '\0') ? errstr : " ");
3991
3992         ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
3993                                      (xdrproc_t)xdr_gf_cli_rsp);
3994
3995         gf_msg_debug (THIS->name, 0, "Responded to CLI, ret: %d", ret);
3996
3997         return ret;
3998 }
3999
4000 int32_t
4001 glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4002 {
4003         int32_t                 ret = -1;
4004         glusterd_conf_t         *priv = NULL;
4005         glusterd_peerinfo_t     *entry = NULL;
4006         int32_t                 count = 0;
4007         dict_t                  *friends = NULL;
4008         gf1_cli_peer_list_rsp   rsp = {0,};
4009         char                    my_uuid_str[64] = {0,};
4010         char                    key[256] = {0,};
4011
4012         priv = THIS->private;
4013         GF_ASSERT (priv);
4014
4015         friends = dict_new ();
4016         if (!friends) {
4017                 gf_msg (THIS->name, GF_LOG_ERROR, ENOMEM,
4018                         GD_MSG_NO_MEMORY, "Out of Memory");
4019                 goto out;
4020         }
4021
4022         /* Reset ret to 0, needed to prevent failure incase no peers exist */
4023         ret = 0;
4024         rcu_read_lock ();
4025         if (!cds_list_empty (&priv->peers)) {
4026                 cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
4027                         count++;
4028                         ret = gd_add_peer_detail_to_dict (entry,
4029                                                                 friends, count);
4030                         if (ret)
4031                                 goto unlock;
4032                 }
4033         }
4034 unlock:
4035         rcu_read_unlock ();
4036         if (ret)
4037                 goto out;
4038
4039         if (flags == GF_CLI_LIST_POOL_NODES) {
4040                 count++;
4041                 snprintf (key, 256, "friend%d.uuid", count);
4042                 uuid_utoa_r (MY_UUID, my_uuid_str);
4043                 ret = dict_set_str (friends, key, my_uuid_str);
4044                 if (ret)
4045                         goto out;
4046
4047                 snprintf (key, 256, "friend%d.hostname", count);
4048                 ret = dict_set_str (friends, key, "localhost");
4049                 if (ret)
4050                         goto out;
4051
4052                 snprintf (key, 256, "friend%d.connected", count);
4053                 ret = dict_set_int32 (friends, key, 1);
4054                 if (ret)
4055                         goto out;
4056         }
4057
4058         ret = dict_set_int32 (friends, "count", count);
4059         if (ret)
4060                 goto out;
4061
4062         ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val,
4063                                            &rsp.friends.friends_len);
4064
4065         if (ret)
4066                 goto out;
4067
4068         ret = 0;
4069 out:
4070
4071         if (friends)
4072                 dict_unref (friends);
4073
4074         rsp.op_ret = ret;
4075
4076         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
4077                                (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
4078         ret = 0;
4079         GF_FREE (rsp.friends.friends_val);
4080
4081         return ret;
4082 }
4083
4084 int32_t
4085 glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4086 {
4087         int32_t                 ret = -1;
4088         glusterd_conf_t         *priv = NULL;
4089         glusterd_volinfo_t      *entry = NULL;
4090         int32_t                 count = 0;
4091         dict_t                  *volumes = NULL;
4092         gf_cli_rsp              rsp = {0,};
4093         char                    *volname = NULL;
4094
4095         priv = THIS->private;
4096         GF_ASSERT (priv);
4097
4098         volumes = dict_new ();
4099         if (!volumes) {
4100                 gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
4101                         GD_MSG_NO_MEMORY, "Out of Memory");
4102                 goto out;
4103         }
4104
4105         if (cds_list_empty (&priv->volumes)) {
4106                 ret = 0;
4107                 goto respond;
4108         }
4109
4110         if (flags == GF_CLI_GET_VOLUME_ALL) {
4111                 cds_list_for_each_entry (entry, &priv->volumes, vol_list) {
4112                         ret = glusterd_add_volume_detail_to_dict (entry,
4113                                                         volumes, count);
4114                         if (ret)
4115                                 goto respond;
4116
4117                         count++;
4118
4119                 }
4120
4121         } else if (flags == GF_CLI_GET_NEXT_VOLUME) {
4122                 ret = dict_get_str (dict, "volname", &volname);
4123
4124                 if (ret) {
4125                         if (priv->volumes.next) {
4126                                 entry = cds_list_entry (priv->volumes.next,
4127                                                         typeof (*entry),
4128                                                         vol_list);
4129                         }
4130                 } else {
4131                         ret = glusterd_volinfo_find (volname, &entry);
4132                         if (ret)
4133                                 goto respond;
4134                         entry = cds_list_entry (entry->vol_list.next,
4135                                                 typeof (*entry),
4136                                                 vol_list);
4137                 }
4138
4139                 if (&entry->vol_list == &priv->volumes) {
4140                        goto respond;
4141                 } else {
4142                         ret = glusterd_add_volume_detail_to_dict (entry,
4143                                                          volumes, count);
4144                         if (ret)
4145                                 goto respond;
4146
4147                         count++;
4148                 }
4149         } else if (flags == GF_CLI_GET_VOLUME) {
4150                 ret = dict_get_str (dict, "volname", &volname);
4151                 if (ret)
4152                         goto respond;
4153
4154                 ret = glusterd_volinfo_find (volname, &entry);
4155                 if (ret)
4156                         goto respond;
4157
4158                 ret = glusterd_add_volume_detail_to_dict (entry,
4159                                                  volumes, count);
4160                 if (ret)
4161                         goto respond;
4162
4163                 count++;
4164         }
4165
4166 respond:
4167         ret = dict_set_int32 (volumes, "count", count);
4168         if (ret)
4169                 goto out;
4170         ret = dict_allocate_and_serialize (volumes, &rsp.dict.dict_val,
4171                                            &rsp.dict.dict_len);
4172
4173         if (ret)
4174                 goto out;
4175
4176         ret = 0;
4177 out:
4178         rsp.op_ret = ret;
4179
4180         rsp.op_errstr = "";
4181         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
4182                                (xdrproc_t)xdr_gf_cli_rsp);
4183         ret = 0;
4184
4185         if (volumes)
4186                 dict_unref (volumes);
4187
4188         GF_FREE (rsp.dict.dict_val);
4189         return ret;
4190 }
4191
4192 int
4193 __glusterd_handle_status_volume (rpcsvc_request_t *req)
4194 {
4195         int32_t                         ret     = -1;
4196         uint32_t                        cmd     = 0;
4197         dict_t                         *dict    = NULL;
4198         char                           *volname = 0;
4199         gf_cli_req                      cli_req = {{0,}};
4200         glusterd_op_t                   cli_op  = GD_OP_STATUS_VOLUME;
4201         char                            err_str[2048] = {0,};
4202         xlator_t                       *this = NULL;
4203         glusterd_conf_t                *conf = NULL;
4204
4205         GF_ASSERT (req);
4206         this = THIS;
4207         GF_ASSERT (this);
4208         conf = this->private;
4209         GF_ASSERT (conf);
4210
4211         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4212         if (ret < 0) {
4213                 //failed to decode msg;
4214                 gf_msg (this->name, GF_LOG_ERROR, 0,
4215                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
4216                         "request received from cli");
4217                 req->rpc_err = GARBAGE_ARGS;
4218                 goto out;
4219         }
4220
4221         if (cli_req.dict.dict_len > 0) {
4222                 dict = dict_new();
4223                 if (!dict)
4224                         goto out;
4225                 ret = dict_unserialize (cli_req.dict.dict_val,
4226                                         cli_req.dict.dict_len, &dict);
4227                 if (ret < 0) {
4228                         gf_msg (this->name, GF_LOG_ERROR, 0,
4229                                 GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
4230                                 "unserialize buffer");
4231                         snprintf (err_str, sizeof (err_str), "Unable to decode "
4232                                   "the command");
4233                         goto out;
4234                 }
4235
4236         }
4237
4238         ret = dict_get_uint32 (dict, "cmd", &cmd);
4239         if (ret)
4240                 goto out;
4241
4242         if (!(cmd & GF_CLI_STATUS_ALL)) {
4243                 ret = dict_get_str (dict, "volname", &volname);
4244                 if (ret) {
4245                         snprintf (err_str, sizeof (err_str), "Unable to get "
4246                                   "volume name");
4247                         gf_msg (this->name, GF_LOG_ERROR, 0,
4248                                 GD_MSG_VOL_NOT_FOUND, "%s", err_str);
4249                         goto out;
4250                 }
4251                 gf_msg (this->name, GF_LOG_INFO, 0,
4252                         GD_MSG_STATUS_VOL_REQ_RCVD,
4253                         "Received status volume req for volume %s", volname);
4254
4255         }
4256         if ((cmd & GF_CLI_STATUS_QUOTAD) &&
4257             (conf->op_version == GD_OP_VERSION_MIN)) {
4258                 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4259                           "at version 1. Getting the status of quotad is not "
4260                           "allowed in this state.");
4261                 ret = -1;
4262                 goto out;
4263         }
4264
4265         if ((cmd & GF_CLI_STATUS_SNAPD) &&
4266             (conf->op_version < GD_OP_VERSION_3_6_0)) {
4267                 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4268                           "at a lesser version than %d. Getting the status of "
4269                           "snapd is not allowed in this state",
4270                           GD_OP_VERSION_3_6_0);
4271                 ret = -1;
4272                 goto out;
4273         }
4274
4275         if ((cmd & GF_CLI_STATUS_BITD) &&
4276             (conf->op_version < GD_OP_VERSION_3_7_0)) {
4277                 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4278                           "at a lesser version than %d. Getting the status of "
4279                           "bitd is not allowed in this state",
4280                           GD_OP_VERSION_3_7_0);
4281                 ret = -1;
4282                 goto out;
4283         }
4284
4285         if ((cmd & GF_CLI_STATUS_SCRUB) &&
4286             (conf->op_version < GD_OP_VERSION_3_7_0)) {
4287                 snprintf (err_str, sizeof (err_str), "The cluster is operating "
4288                           "at a lesser version than %d. Getting the status of "
4289                           "scrub is not allowed in this state",
4290                           GD_OP_VERSION_3_7_0);
4291                 ret = -1;
4292                 goto out;
4293         }
4294
4295         ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
4296
4297 out:
4298
4299         if (ret) {
4300                 if (err_str[0] == '\0')
4301                         snprintf (err_str, sizeof (err_str),
4302                                   "Operation failed");
4303                 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
4304                                                      dict, err_str);
4305         }
4306         free (cli_req.dict.dict_val);
4307
4308         return ret;
4309 }
4310
4311 int
4312 glusterd_handle_status_volume (rpcsvc_request_t *req)
4313 {
4314         return glusterd_big_locked_handler (req,
4315                                             __glusterd_handle_status_volume);
4316 }
4317
4318 int
4319 __glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
4320 {
4321         int32_t                         ret = -1;
4322         gf_cli_req                      cli_req = {{0,}};
4323         glusterd_op_t                   cli_op = GD_OP_CLEARLOCKS_VOLUME;
4324         char                            *volname = NULL;
4325         dict_t                          *dict = NULL;
4326         char                            err_str[2048] = {0,};
4327         xlator_t                        *this = NULL;
4328
4329         GF_ASSERT (req);
4330         this = THIS;
4331         GF_ASSERT (this);
4332
4333         ret = -1;
4334         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4335         if (ret < 0) {
4336                 gf_msg (this->name, GF_LOG_ERROR, 0,
4337                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
4338                         "request received from cli");
4339                 req->rpc_err = GARBAGE_ARGS;
4340                 goto out;
4341         }
4342
4343         if (cli_req.dict.dict_len) {
4344                 dict  = dict_new ();
4345
4346                 ret = dict_unserialize (cli_req.dict.dict_val,
4347                                         cli_req.dict.dict_len,
4348                                         &dict);
4349                 if (ret < 0) {
4350                         gf_msg (this->name, GF_LOG_ERROR, 0,
4351                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
4352                                 "failed to unserialize req-buffer to"
4353                                 " dictionary");
4354                         snprintf (err_str, sizeof (err_str), "unable to decode "
4355                                   "the command");
4356                         goto out;
4357                 }
4358
4359         } else {
4360                 ret = -1;
4361                 gf_msg (this->name, GF_LOG_ERROR, 0,
4362                         GD_MSG_CLI_REQ_EMPTY, "Empty cli request.");
4363                 goto out;
4364         }
4365
4366         ret = dict_get_str (dict, "volname", &volname);
4367         if (ret) {
4368                 snprintf (err_str, sizeof (err_str), "Unable to get volume "
4369                           "name");
4370                 gf_msg (this->name, GF_LOG_ERROR, 0,
4371                         GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
4372                 goto out;
4373         }
4374
4375         gf_msg (this->name, GF_LOG_INFO, 0,
4376                 GD_MSG_CLRCLK_VOL_REQ_RCVD, "Received clear-locks volume req "
4377                 "for volume %s", volname);
4378
4379         ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict);
4380
4381 out:
4382         if (ret) {
4383                 if (err_str[0] == '\0')
4384                         snprintf (err_str, sizeof (err_str),
4385                                   "Operation failed");
4386                 ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
4387                                                      dict, err_str);
4388         }
4389         free (cli_req.dict.dict_val);
4390
4391         return ret;
4392 }
4393
4394 int
4395 glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
4396 {
4397         return glusterd_big_locked_handler (req,
4398                                             __glusterd_handle_cli_clearlocks_volume);
4399 }
4400
4401 static int
4402 get_volinfo_from_brickid (char *brickid, glusterd_volinfo_t **volinfo)
4403 {
4404         int             ret         = -1;
4405         char           *volid_str  = NULL;
4406         char           *brick      = NULL;
4407         char           *brickid_dup = NULL;
4408         uuid_t          volid       = {0};
4409         xlator_t       *this        = NULL;
4410
4411         this = THIS;
4412         GF_ASSERT (this);
4413         GF_ASSERT (brickid);
4414
4415         brickid_dup = gf_strdup (brickid);
4416         if (!brickid_dup)
4417                 goto out;
4418
4419         volid_str = brickid_dup;
4420         brick = strchr (brickid_dup, ':');
4421         if (!brick) {
4422                 gf_msg (this->name, GF_LOG_ERROR, 0,
4423                         GD_MSG_BRICK_NOT_FOUND,
4424                         "Invalid brickid");
4425                 goto out;
4426         }
4427
4428         *brick = '\0';
4429         brick++;
4430         gf_uuid_parse (volid_str, volid);
4431         ret = glusterd_volinfo_find_by_volume_id (volid, volinfo);
4432         if (ret) {
4433                 /* Check if it is a snapshot volume */
4434                 ret = glusterd_snap_volinfo_find_by_volume_id (volid, volinfo);
4435                 if (ret) {
4436                         gf_msg (this->name, GF_LOG_WARNING, 0,
4437                                 GD_MSG_VOLINFO_GET_FAIL,
4438                                 "Failed to find volinfo");
4439                         goto out;
4440                 }
4441         }
4442
4443         ret = 0;
4444 out:
4445         GF_FREE (brickid_dup);
4446         return ret;
4447 }
4448
4449 static int
4450 __glusterd_handle_barrier (rpcsvc_request_t *req)
4451 {
4452         int          ret     = -1;
4453         xlator_t     *this   = NULL;
4454         gf_cli_req   cli_req = {{0,}};
4455         dict_t       *dict   = NULL;
4456         char *volname = NULL;
4457
4458         GF_ASSERT (req);
4459         this = THIS;
4460         GF_ASSERT(this);
4461
4462         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4463         if (ret < 0) {
4464                 gf_msg (this->name, GF_LOG_ERROR, 0,
4465                         GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
4466                         "request received from cli");
4467                 req->rpc_err = GARBAGE_ARGS;
4468                 goto out;
4469         }
4470
4471         if (!cli_req.dict.dict_len) {
4472                 ret = -1;
4473                 goto out;
4474         }
4475
4476         dict = dict_new();
4477         if (!dict) {
4478                 ret = -1;
4479                 goto out;
4480         }
4481         ret = dict_unserialize (cli_req.dict.dict_val, cli_req.dict.dict_len,
4482                                 &dict);
4483         if (ret < 0) {
4484                 gf_msg (this->name, GF_LOG_ERROR, 0,
4485                         GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to unserialize "
4486                         "request dictionary.");
4487                 goto out;
4488         }
4489
4490         ret = dict_get_str (dict, "volname", &volname);
4491         if (ret) {
4492                 gf_msg (this->name, GF_LOG_ERROR, 0,
4493                         GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4494                         "Volname not present in "
4495                         "dict");
4496                 goto out;
4497         }
4498         gf_msg (this->name, GF_LOG_INFO, 0,
4499                 GD_MSG_BARRIER_VOL_REQ_RCVD,
4500                 "Received barrier volume request for "
4501                 "volume %s", volname);
4502
4503         ret = glusterd_op_begin_synctask (req, GD_OP_BARRIER, dict);
4504
4505 out:
4506         if (ret) {
4507                 ret = glusterd_op_send_cli_response (GD_OP_BARRIER, ret, 0, req,
4508                                                      dict, "Operation failed");
4509         }
4510         free (cli_req.dict.dict_val);
4511         return ret;
4512 }
4513
4514 int
4515 glusterd_handle_barrier (rpcsvc_request_t *req)
4516 {
4517         return glusterd_big_locked_handler (req, __glusterd_handle_barrier);
4518 }
4519
4520 int32_t
4521 glusterd_get_volume_opts (rpcsvc_request_t *req, dict_t *dict)
4522 {
4523         int32_t                   ret = -1;
4524         int32_t                   count = 1;
4525         int                       exists = 0;
4526         char                      *key = NULL;
4527         char                      *orig_key = NULL;
4528         char                      *key_fixed = NULL;
4529         char                      *volname = NULL;
4530         char                      err_str[2048] = {0,};
4531         char                      dict_key[50] = {0,};
4532         xlator_t                  *this = NULL;
4533         glusterd_conf_t           *priv = NULL;
4534         glusterd_volinfo_t        *volinfo = NULL;
4535         gf_cli_rsp                rsp = {0,};
4536         char                      op_version_buff[10] = {0,};
4537
4538         this = THIS;
4539         GF_ASSERT (this);
4540
4541         priv = this->private;
4542         GF_ASSERT (priv);
4543
4544         GF_ASSERT (req);
4545         GF_ASSERT (dict);
4546
4547         ret = dict_get_str (dict, "volname", &volname);
4548         if (ret) {
4549                 snprintf (err_str, sizeof (err_str), "Failed to get volume "
4550                           "name while handling get volume option command");
4551                 gf_msg (this->name, GF_LOG_ERROR, 0,
4552                         GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
4553                 goto out;
4554         }
4555
4556         ret = dict_get_str (dict, "key", &key);
4557         if (ret) {
4558                 snprintf (err_str, sizeof (err_str), "Failed to get key "
4559                           "while handling get volume option for %s", volname);
4560                 gf_msg (this->name, GF_LOG_ERROR, 0,
4561                         GD_MSG_DICT_GET_FAILED, "%s", err_str);
4562                 goto out;
4563         }
4564         gf_msg_debug (this->name, 0, "Received get volume opt request for "
4565                 "volume %s", volname);
4566
4567         ret = glusterd_volinfo_find (volname, &volinfo);
4568         if (ret) {
4569                 snprintf (err_str, sizeof(err_str),
4570                           FMTSTR_CHECK_VOL_EXISTS, volname);
4571                 gf_msg (this->name, GF_LOG_ERROR, 0,
4572                         GD_MSG_VOL_NOT_FOUND, FMTSTR_CHECK_VOL_EXISTS,
4573                         volname);
4574                 goto out;
4575         }
4576         if (strcmp(key, "all")) {
4577                 exists = glusterd_check_option_exists (key, &key_fixed);
4578                 if (!exists) {
4579                         snprintf (err_str, sizeof (err_str), "Option "
4580                                   "with name: %s does not exist", key);
4581                         gf_msg (this->name, GF_LOG_ERROR, EINVAL,
4582                                 GD_MSG_UNKNOWN_KEY, "%s",
4583                                 err_str);
4584                         if (key_fixed)
4585                                 snprintf (err_str + ret,
4586                                           sizeof (err_str) - ret,
4587                                           "Did you mean %s?",
4588                                           key_fixed);
4589                         ret = -1;
4590                         goto out;
4591                 }
4592                 if (key_fixed) {
4593                         orig_key = key;
4594                         key = key_fixed;
4595                 }
4596                 if (strcmp (key, "cluster.op-version") == 0) {
4597                         sprintf (dict_key, "key%d", count);
4598                         ret = dict_set_str(dict, dict_key, key);
4599                         if (ret) {
4600                                 gf_msg (this->name, GF_LOG_ERROR, 0,
4601                                         GD_MSG_DICT_SET_FAILED, "Failed to "
4602                                        "set %s in dictionary", key);
4603                                 goto out;
4604                         }
4605                         sprintf (dict_key, "value%d", count);
4606                         sprintf (op_version_buff, "%d", priv->op_version);
4607                         ret = dict_set_str (dict, dict_key, op_version_buff);
4608                         if (ret) {
4609                                 gf_msg (this->name, GF_LOG_ERROR, 0,
4610                                         GD_MSG_DICT_SET_FAILED, "Failed to "
4611                                         "set value for key %s in dictionary",
4612                                         key);
4613                                 goto out;
4614                         }
4615                 }
4616                 else if (strcmp (key, "config.memory-accounting") == 0) {
4617                         sprintf (dict_key, "key%d", count);
4618                         ret = dict_set_str(dict, dict_key, key);
4619                         if (ret) {
4620                                 gf_msg (this->name, GF_LOG_ERROR, 0,
4621                                         GD_MSG_DICT_SET_FAILED, "Failed to "
4622                                        "set %s in dictionary", key);
4623                                 goto out;
4624                         }
4625                         sprintf (dict_key, "value%d", count);
4626
4627                         if (volinfo->memory_accounting)
4628                                 ret = dict_set_str(dict, dict_key,"Enabled");
4629                         else
4630                                 ret = dict_set_str(dict, dict_key,"Disabled");
4631                         if (ret) {
4632                                 gf_msg (this->name, GF_LOG_ERROR, 0,
4633                                         GD_MSG_DICT_SET_FAILED, "Failed to "
4634                                         "set value for key %s in dictionary",
4635                                         key);
4636                                 goto out;
4637                         }
4638                 }
4639                 else if (strcmp (key, "config.transport") == 0) {
4640                         sprintf (dict_key, "key%d", count);
4641                         ret = dict_set_str(dict, dict_key, key);
4642                         if (ret) {
4643                                 gf_msg (this->name, GF_LOG_ERROR, 0,
4644                                         GD_MSG_DICT_SET_FAILED, "Failed to "
4645                                         "set %s in dictionary", key);
4646                                 goto out;
4647                         }
4648                         sprintf (dict_key, "value%d", count);
4649
4650                         if (volinfo->transport_type == GF_TRANSPORT_RDMA)
4651                                 ret = dict_set_str(dict, dict_key,"rdma");
4652                         else if (volinfo->transport_type == GF_TRANSPORT_TCP)
4653                                 ret = dict_set_str(dict, dict_key,"tcp");
4654                         else if (volinfo->transport_type ==
4655                                  GF_TRANSPORT_BOTH_TCP_RDMA)
4656                                 ret = dict_set_str(dict, dict_key,"tcp,rdma");
4657                         else
4658                                 ret = dict_set_str(dict, dict_key,"none");
4659
4660                         if (ret) {
4661                                 gf_msg (this->name, GF_LOG_ERROR, 0,
4662                                         GD_MSG_DICT_SET_FAILED, "Failed to "
4663                                         "set value for key %s in dictionary",
4664                                         key);
4665                                 goto out;
4666                         }
4667                 }
4668                 else {
4669                         ret = glusterd_get_default_val_for_volopt (dict,
4670                                                                   _gf_false,
4671                                                                   key, orig_key,
4672                                                                   volinfo->dict,
4673                                                                   &rsp.op_errstr);
4674                         if (ret && !rsp.op_errstr) {
4675                                 snprintf (err_str, sizeof(err_str),
4676                                           "Failed to fetch the value of"
4677                                           " %s, check log file for more"
4678                                           " details", key);
4679                         }
4680                 }
4681
4682                 /* Request is for a single option, explicitly set count to 1
4683                  * in the dictionary.
4684                  */
4685                 ret = dict_set_int32 (dict, "count", 1);
4686                 if (ret) {
4687                         gf_msg (this->name, GF_LOG_ERROR, errno,
4688                                 GD_MSG_DICT_SET_FAILED, "Failed to set count "
4689                                 "value in the dictionary");
4690                         goto out;
4691                 }
4692         } else {
4693                 /* Handle the "all" volume option request */
4694                 ret = glusterd_get_default_val_for_volopt (dict, _gf_true, NULL,
4695                                                            NULL, volinfo->dict,
4696                                                            &rsp.op_errstr);
4697                 if (ret && !rsp.op_errstr) {
4698                         snprintf (err_str, sizeof(err_str),
4699                                   "Failed to fetch the value of all volume "
4700                                   "options, check log file for more details");
4701                 }
4702
4703         }
4704
4705 out:
4706         if (ret) {
4707                 if (!rsp.op_errstr)
4708                         rsp.op_errstr = err_str;
4709                 rsp.op_ret =  ret;
4710         }
4711         else {
4712                 rsp.op_errstr = "";
4713                 rsp.op_ret = 0;
4714         }
4715
4716         ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
4717                                            &rsp.dict.dict_len);
4718
4719         glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
4720                                (xdrproc_t)xdr_gf_cli_rsp);
4721         return ret;
4722 }
4723
4724 int
4725 __glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
4726 {
4727         int32_t                         ret = -1;
4728         gf_cli_req                      cli_req = {{0,}};
4729         dict_t                          *dict = NULL;
4730         char                            err_str[2048] = {0,};
4731         xlator_t                        *this = NULL;
4732
4733         this = THIS;
4734         GF_ASSERT (this);
4735
4736         GF_ASSERT (req);
4737
4738         ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4739         if (ret < 0) {
4740                 snprintf (err_str, sizeof (err_str), "Failed to decode "
4741                           "request received from cli");
4742                 gf_msg (this->name, GF_LOG_ERROR, 0,
4743                         GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
4744                 req->rpc_err = GARBAGE_ARGS;
4745                 goto out;
4746         }
4747
4748         if (cli_req.dict.dict_len) {
4749                 /* Unserialize the dictionary */
4750                 dict  = dict_new ();
4751
4752                 ret = dict_unserialize (cli_req.dict.dict_val,
4753                                         cli_req.dict.dict_len,
4754                                         &dict);
4755                 if (ret < 0) {
4756                         gf_msg (this->name, GF_LOG_ERROR, 0,
4757                                 GD_MSG_DICT_UNSERIALIZE_FAIL,
4758                                 "failed to "
4759                                 "unserialize req-buffer to dictionary");
4760                         snprintf (err_str, sizeof (err_str), "Unable to decode "
4761                                   "the command");
4762                         goto out;
4763                 } else {
4764                         dict->extra_stdfree = cli_req.dict.dict_val;
4765                 }
4766         }
4767         ret = glusterd_get_volume_opts (req, dict);
4768
4769 out:
4770         if (dict)
4771                 dict_unref (dict);
4772
4773         return ret;
4774 }
4775
4776 int
4777 glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
4778 {
4779         return glusterd_big_locked_handler (req, __glusterd_handle_get_vol_opt);
4780 }
4781 static int
4782 get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
4783 {
4784         glusterd_volinfo_t      *volinfo    = NULL;
4785         char                    *volid_str  = NULL;
4786         char                    *brick      = NULL;
4787         char                    *brickid_dup = NULL;
4788         uuid_t                  volid       = {0};
4789         int                     ret         = -1;
4790
4791         brickid_dup = gf_strdup (brickid);
4792         if (!brickid_dup)
4793                 goto out;
4794
4795         volid_str = brickid_dup;
4796         brick = strchr (brickid_dup, ':');
4797         if (!volid_str || !brick)
4798                 goto out;
4799
4800         *brick = '\0';
4801         brick++;
4802         gf_uuid_parse (volid_str, volid);
4803         ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo);
4804         if (ret) {
4805                 /* Check if it a snapshot volume */
4806                 ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo);
4807                 if (ret)
4808                         goto out;
4809         }
4810
4811         ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
4812                                                       brickinfo);
4813         if (ret)
4814                 goto out;
4815
4816         ret = 0;
4817 out:
4818         GF_FREE (brickid_dup);
4819         return ret;
4820 }
4821
4822 int
4823 __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
4824                              rpc_clnt_event_t event, void *data)
4825 {
4826         char                    *brickid           = NULL;
4827         int                      ret               = 0;
4828         glusterd_conf_t         *conf              = NULL;
4829         glusterd_brickinfo_t    *brickinfo         = NULL;
4830         glusterd_volinfo_t      *volinfo           = NULL;
4831         xlator_t                *this              = NULL;
4832
4833         brickid = mydata;
4834         if (!brickid)
4835                 return 0;
4836
4837         ret = get_brickinfo_from_brickid (brickid, &brickinfo);
4838         if (ret)
4839                 return 0;
4840
4841         this = THIS;
4842         GF_ASSERT (this);
4843         conf = this->private;
4844         GF_ASSERT (conf);
4845
4846         switch (event) {
4847         case RPC_CLNT_CONNECT:
4848                 /* If a node on coming back up, already starts a brick
4849                  * before the handshake, and the notification comes after
4850                  * the handshake is done, then we need to check if this
4851                  * is a restored brick with a snapshot pending. If so, we
4852                  * need to stop the brick
4853                  */
4854                 if (brickinfo->snap_status == -1) {
4855                         gf_msg (this->name, GF_LOG_INFO, 0,
4856                                 GD_MSG_SNAPSHOT_PENDING,
4857                                 "Snapshot is pending on %s:%s. "
4858                                 "Hence not starting the brick",
4859                                 brickinfo->hostname,
4860                                 brickinfo->path);
4861                         ret = get_volinfo_from_brickid (brickid, &volinfo);
4862                         if (ret) {
4863                                 gf_msg (this->name, GF_LOG_ERROR, 0,
4864                                         GD_MSG_VOLINFO_GET_FAIL,
4865                                         "Failed to get volinfo from "
4866                                         "brickid(%s)", brickid);
4867                                 goto out;
4868                         }
4869
4870                         ret = glusterd_brick_stop (volinfo, brickinfo,
4871                                                    _gf_false);
4872                         if (ret) {
4873                                 gf_msg (THIS->name, GF_LOG_ERROR, 0,
4874                                         GD_MSG_BRICK_STOP_FAIL,
4875                                         "Unable to stop %s:%s",
4876                                         brickinfo->hostname, brickinfo->path);
4877                                 goto out;
4878                         }
4879
4880                         break;
4881                 }
4882                 gf_msg_debug (this->name, 0, "Connected to %s:%s",
4883                         brickinfo->hostname, brickinfo->path);
4884                 glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED);
4885                 ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
4886
4887                 break;
4888
4889         case RPC_CLNT_DISCONNECT:
4890                 if (glusterd_is_brick_started (brickinfo))
4891                         gf_msg (this->name, GF_LOG_INFO, 0,
4892                                 GD_MSG_BRICK_DISCONNECTED,
4893                                 "Brick %s:%s has disconnected from glusterd.",
4894                                 brickinfo->hostname, brickinfo->path);
4895
4896                 glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
4897                 break;
4898
4899         case RPC_CLNT_DESTROY:
4900                 GF_FREE (mydata);
4901                 mydata = NULL;
4902                 break;
4903         default:
4904                 gf_msg_trace (this->name, 0,
4905                         "got some other RPC event %d", event);
4906                 break;
4907         }
4908
4909 out:
4910         return ret;
4911 }
4912
4913 int
4914 glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
4915                            rpc_clnt_event_t event, void *data)
4916 {
4917         return glusterd_big_locked_notify (rpc, mydata, event, data,
4918                                            __glusterd_brick_rpc_notify);
4919 }
4920
4921 int
4922 glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx, int32_t op_errno)
4923 {
4924         int                             ret = -1;
4925         glusterd_friend_sm_event_t      *new_event = NULL;
4926         glusterd_peerinfo_t             *peerinfo = NULL;
4927         rpcsvc_request_t                *req = NULL;
4928         char                            *errstr = NULL;
4929         dict_t                          *dict = NULL;
4930
4931         GF_ASSERT (peerctx);
4932
4933         rcu_read_lock ();
4934         peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
4935         if (!peerinfo) {
4936                 gf_msg_debug (THIS->name, 0, "Could not find peer %s(%s). "
4937                         "Peer could have been deleted.", peerctx->peername,
4938                         uuid_utoa (peerctx->peerid));
4939                 ret = 0;
4940                 goto out;
4941         }
4942
4943         req = peerctx->args.req;
4944         dict = peerctx->args.dict;
4945         errstr = peerctx->errstr;
4946
4947         ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND,
4948                                             &new_event);
4949         if (!ret) {
4950                 if (!req) {
4951                         gf_msg (THIS->name, GF_LOG_WARNING, 0,
4952                                 GD_MSG_EVENT_NEW_GET_FAIL,
4953                                 "Unable to find the request for responding "
4954                                 "to User (%s)", peerinfo->hostname);
4955                         goto out;
4956                 }
4957
4958                 glusterd_xfer_cli_probe_resp (req, -1, op_errno, errstr,
4959                                               peerinfo->hostname,
4960                                               peerinfo->port, dict);
4961
4962                 new_event->peername = gf_strdup (peerinfo->hostname);
4963                 gf_uuid_copy (new_event->peerid, peerinfo->uuid);
4964                 ret = glusterd_friend_sm_inject_event (new_event);
4965
4966         } else {
4967                 gf_msg ("glusterd", GF_LOG_ERROR, 0,
4968                         GD_MSG_EVENT_INJECT_FAIL,
4969                         "Unable to create event for removing peer %s",
4970                         peerinfo->hostname);
4971         }
4972
4973 out:
4974         rcu_read_unlock ();
4975         return ret;
4976 }
4977
4978 int
4979 __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
4980                             rpc_clnt_event_t event, void *data)
4981 {
4982         xlator_t             *this        = NULL;
4983         glusterd_conf_t      *conf        = NULL;
4984         int                   ret         = 0;
4985         int32_t               op_errno    = ENOTCONN;
4986         glusterd_peerinfo_t  *peerinfo    = NULL;
4987         glusterd_peerctx_t   *peerctx     = NULL;
4988         gf_boolean_t         quorum_action = _gf_false;
4989         glusterd_volinfo_t   *volinfo     = NULL;
4990         uuid_t               uuid;
4991
4992         peerctx = mydata;
4993         if (!peerctx)
4994                 return 0;
4995
4996         this = THIS;
4997         conf = this->private;
4998
4999         if (RPC_CLNT_DESTROY == event) {
5000                 GF_FREE (peerctx->errstr);
5001                 GF_FREE (peerctx->peername);
5002                 GF_FREE (peerctx);
5003                 return 0;
5004         }
5005
5006         rcu_read_lock ();
5007
5008         peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
5009         if (!peerinfo) {
5010                 /* Peerinfo should be available at this point if its a connect
5011                  * event. Not finding it means that something terrible has
5012                  * happened. For non-connect event we might end up having a null
5013                  * peerinfo, so log at debug level.
5014                  */
5015                 gf_msg (THIS->name, (RPC_CLNT_CONNECT == event) ?
5016                         GF_LOG_CRITICAL : GF_LOG_DEBUG, ENOENT,
5017                         GD_MSG_PEER_NOT_FOUND, "Could not find peer "
5018                         "%s(%s)", peerctx->peername,
5019                         uuid_utoa (peerctx->peerid));
5020
5021                 ret = -1;
5022                 goto out;
5023         }
5024
5025         switch (event) {
5026         case RPC_CLNT_CONNECT:
5027         {
5028                 rpc_clnt_set_connected (&rpc->conn);
5029                 gf_msg_debug (this->name, 0, "got RPC_CLNT_CONNECT");
5030                 peerinfo->connected = 1;
5031                 peerinfo->quorum_action = _gf_true;
5032                 peerinfo->generation = uatomic_add_return
5033                                                    (&conf->generation, 1);
5034                 peerctx->peerinfo_gen = peerinfo->generation;
5035
5036                 ret = glusterd_peer_dump_version (this, rpc, peerctx);
5037                 if (ret)
5038                         gf_msg (this->name, GF_LOG_ERROR, 0,
5039                                 GD_MSG_HANDSHAKE_FAILED,
5040                                 "glusterd handshake failed");
5041                 break;
5042         }
5043
5044         case RPC_CLNT_DISCONNECT:
5045         {
5046                 rpc_clnt_unset_connected (&rpc->conn);
5047                 gf_msg (this->name, GF_LOG_INFO, 0,
5048                         GD_MSG_PEER_DISCONNECTED,
5049                         "Peer <%s> (<%s>), in state <%s>, has disconnected "
5050                         "from glusterd.",
5051                         peerinfo->hostname, uuid_utoa (peerinfo->uuid),
5052                         glusterd_friend_sm_state_name_get (peerinfo->state.state));
5053
5054                 if (peerinfo->connected) {
5055                         if (conf->op_version < GD_OP_VERSION_3_6_0) {
5056                                 glusterd_get_lock_owner (&uuid);
5057                                 if (!gf_uuid_is_null (uuid) &&
5058                                     !gf_uuid_compare (peerinfo->uuid, uuid))
5059                                         glusterd_unlock (peerinfo->uuid);
5060                         } else {
5061                                 cds_list_for_each_entry (volinfo,
5062                                                          &conf->volumes,
5063                                                          vol_list) {
5064                                         ret = glusterd_mgmt_v3_unlock
5065                                                     (volinfo->volname,
5066                                                      peerinfo->uuid,
5067                                                      "vol");
5068                                         if (ret)
5069                                                 gf_msg (this->name,
5070                                                         GF_LOG_WARNING, 0,
5071                                                         GD_MSG_MGMTV3_UNLOCK_FAIL,
5072                                                         "Lock not released "
5073                                                         "for %s",
5074                                                          volinfo->volname);
5075                                 }
5076                         }
5077
5078                         op_errno = GF_PROBE_ANOTHER_CLUSTER;
5079                         ret = 0;
5080                 }
5081
5082                 if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
5083                     (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
5084                         peerinfo->quorum_contrib = QUORUM_DOWN;
5085                         quorum_action = _gf_true;
5086                         peerinfo->quorum_action = _gf_false;
5087                 }
5088
5089                 /* Remove peer if it is not a friend and connection/handshake
5090                 *  fails, and notify cli. Happens only during probe.
5091                 */
5092                 if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) {
5093                         glusterd_friend_remove_notify (peerctx, op_errno);
5094                         goto out;
5095                 }
5096
5097                 peerinfo->connected = 0;
5098                 break;
5099         }
5100
5101         default:
5102                 gf_msg_trace (this->name, 0,
5103                         "got some other RPC event %d", event);
5104                 ret = 0;
5105                 break;
5106         }
5107
5108 out:
5109         rcu_read_unlock ();
5110
5111         glusterd_friend_sm ();
5112         glusterd_op_sm ();
5113         if (quorum_action)
5114                 glusterd_do_quorum_action ();
5115         return ret;
5116 }
5117
5118 int
5119 glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
5120                           rpc_clnt_event_t event, void *data)
5121 {
5122         return glusterd_big_locked_notify (rpc, mydata, event, data,
5123                                            __glusterd_peer_rpc_notify);
5124 }
5125
5126 int
5127 glusterd_null (rpcsvc_request_t *req)
5128 {
5129
5130         return 0;
5131 }
5132
5133 rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
5134         [GLUSTERD_MGMT_NULL]           = { "NULL",           GLUSTERD_MGMT_NULL,           glusterd_null,                  NULL, 0, DRC_NA},
5135         [GLUSTERD_MGMT_CLUSTER_LOCK]   = { "CLUSTER_LOCK",   GLUSTERD_MGMT_CLUSTER_LOCK,   glusterd_handle_cluster_lock,   NULL, 0, DRC_NA},
5136         [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA},
5137         [GLUSTERD_MGMT_STAGE_OP]       = { "STAGE_OP",       GLUSTERD_MGMT_STAGE_OP,       glusterd_handle_stage_op,       NULL, 0, DRC_NA},
5138         [GLUSTERD_MGMT_COMMIT_OP]      = { "COMMIT_OP",      GLUSTERD_MGMT_COMMIT_OP,      glusterd_handle_commit_op,      NULL, 0, DRC_NA},
5139 };
5140
5141 struct rpcsvc_program gd_svc_mgmt_prog = {
5142         .progname  = "GlusterD svc mgmt",
5143         .prognum   = GD_MGMT_PROGRAM,
5144         .progver   = GD_MGMT_VERSION,
5145         .numactors = GLUSTERD_MGMT_MAXVALUE,
5146         .actors    = gd_svc_mgmt_actors,
5147         .synctask  = _gf_true,
5148 };
5149
5150 rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
5151         [GLUSTERD_FRIEND_NULL]    = { "NULL",          GLUSTERD_MGMT_NULL,     glusterd_null,                         NULL, 0, DRC_NA},
5152         [GLUSTERD_PROBE_QUERY]    = { "PROBE_QUERY",   GLUSTERD_PROBE_QUERY,   glusterd_handle_probe_query,           NULL, 0, DRC_NA},
5153         [GLUSTERD_FRIEND_ADD]     = { "FRIEND_ADD",    GLUSTERD_FRIEND_ADD,    glusterd_handle_incoming_friend_req,   NULL, 0, DRC_NA},
5154         [GLUSTERD_FRIEND_REMOVE]  = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, 0, DRC_NA},
5155         [GLUSTERD_FRIEND_UPDATE]  = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update,         NULL, 0, DRC_NA},
5156 };
5157
5158 struct rpcsvc_program gd_svc_peer_prog = {
5159         .progname  = "GlusterD svc peer",
5160         .prognum   = GD_FRIEND_PROGRAM,
5161         .progver   = GD_FRIEND_VERSION,
5162         .numactors = GLUSTERD_FRIEND_MAXVALUE,
5163         .actors    = gd_svc_peer_actors,
5164         .synctask  = _gf_false,
5165 };
5166
5167
5168
5169 rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
5170         [GLUSTER_CLI_PROBE]              = { "CLI_PROBE",         GLUSTER_CLI_PROBE,            glusterd_handle_cli_probe,             NULL, 0, DRC_NA},
5171         [GLUSTER_CLI_CREATE_VOLUME]      = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME,    glusterd_handle_create_volume,         NULL, 0, DRC_NA},
5172         [GLUSTER_CLI_DEFRAG_VOLUME]      = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME,    glusterd_handle_defrag_volume,         NULL, 0, DRC_NA},
5173         [GLUSTER_CLI_DEPROBE]            = { "FRIEND_REMOVE",     GLUSTER_CLI_DEPROBE,          glusterd_handle_cli_deprobe,           NULL, 0, DRC_NA},
5174         [GLUSTER_CLI_LIST_FRIENDS]       = { "LIST_FRIENDS",      GLUSTER_CLI_LIST_FRIENDS,     glusterd_handle_cli_list_friends,      NULL, 0, DRC_NA},
5175         [GLUSTER_CLI_UUID_RESET]         = { "UUID_RESET",        GLUSTER_CLI_UUID_RESET,       glusterd_handle_cli_uuid_reset,        NULL, 0, DRC_NA},
5176         [GLUSTER_CLI_UUID_GET]           = { "UUID_GET",          GLUSTER_CLI_UUID_GET,         glusterd_handle_cli_uuid_get,          NULL, 0, DRC_NA},
5177         [GLUSTER_CLI_START_VOLUME]       = { "START_VOLUME",      GLUSTER_CLI_START_VOLUME,     glusterd_handle_cli_start_volume,      NULL, 0, DRC_NA},
5178         [GLUSTER_CLI_STOP_VOLUME]        = { "STOP_VOLUME",       GLUSTER_CLI_STOP_VOLUME,      glusterd_handle_cli_stop_volume,       NULL, 0, DRC_NA},
5179         [GLUSTER_CLI_DELETE_VOLUME]      = { "DELETE_VOLUME",     GLUSTER_CLI_DELETE_VOLUME,    glusterd_handle_cli_delete_volume,     NULL, 0, DRC_NA},
5180         [GLUSTER_CLI_GET_VOLUME]         = { "GET_VOLUME",        GLUSTER_CLI_GET_VOLUME,       glusterd_handle_cli_get_volume,        NULL, 0, DRC_NA},
5181         [GLUSTER_CLI_ADD_BRICK]          = { "ADD_BRICK",         GLUSTER_CLI_ADD_BRICK,        glusterd_handle_add_brick,             NULL, 0, DRC_NA},
5182         [GLUSTER_CLI_ATTACH_TIER]        = { "ATTACH_TIER",       GLUSTER_CLI_ATTACH_TIER,      glusterd_handle_attach_tier,           NULL, 0, DRC_NA},
5183         [GLUSTER_CLI_DETACH_TIER]        = { "DETACH_TIER",       GLUSTER_CLI_DETACH_TIER,      glusterd_handle_detach_tier,           NULL, 0, DRC_NA},
5184         [GLUSTER_CLI_REPLACE_BRICK]      = { "REPLACE_BRICK",     GLUSTER_CLI_REPLACE_BRICK,    glusterd_handle_replace_brick,         NULL, 0, DRC_NA},
5185         [GLUSTER_CLI_REMOVE_BRICK]       = { "REMOVE_BRICK",      GLUSTER_CLI_REMOVE_BRICK,     glusterd_handle_remove_brick,          NULL, 0, DRC_NA},
5186         [GLUSTER_CLI_LOG_ROTATE]         = { "LOG FILENAME",      GLUSTER_CLI_LOG_ROTATE,       glusterd_handle_log_rotate,            NULL, 0, DRC_NA},
5187         [GLUSTER_CLI_SET_VOLUME]         = { "SET_VOLUME",        GLUSTER_CLI_SET_VOLUME,       glusterd_handle_set_volume,            NULL, 0, DRC_NA},
5188         [GLUSTER_CLI_SYNC_VOLUME]        = { "SYNC_VOLUME",       GLUSTER_CLI_SYNC_VOLUME,      glusterd_handle_sync_volume,           NULL, 0, DRC_NA},
5189         [GLUSTER_CLI_RESET_VOLUME]       = { "RESET_VOLUME",      GLUSTER_CLI_RESET_VOLUME,     glusterd_handle_reset_volume,          NULL, 0, DRC_NA},
5190         [GLUSTER_CLI_FSM_LOG]            = { "FSM_LOG",           GLUSTER_CLI_FSM_LOG,          glusterd_handle_fsm_log,               NULL, 0, DRC_NA},
5191         [GLUSTER_CLI_GSYNC_SET]          = { "GSYNC_SET",         GLUSTER_CLI_GSYNC_SET,        glusterd_handle_gsync_set,             NULL, 0, DRC_NA},
5192         [GLUSTER_CLI_PROFILE_VOLUME]     = { "STATS_VOLUME",      GLUSTER_CLI_PROFILE_VOLUME,   glusterd_handle_cli_profile_volume,    NULL, 0, DRC_NA},
5193         [GLUSTER_CLI_QUOTA]              = { "QUOTA",             GLUSTER_CLI_QUOTA,            glusterd_handle_quota,                 NULL, 0, DRC_NA},
5194         [GLUSTER_CLI_GETWD]              = { "GETWD",             GLUSTER_CLI_GETWD,            glusterd_handle_getwd,                 NULL, 1, DRC_NA},
5195         [GLUSTER_CLI_STATUS_VOLUME]      = {"STATUS_VOLUME",      GLUSTER_CLI_STATUS_VOLUME,    glusterd_handle_status_volume,         NULL, 0, DRC_NA},
5196         [GLUSTER_CLI_MOUNT]              = { "MOUNT",             GLUSTER_CLI_MOUNT,            glusterd_handle_mount,                 NULL, 1, DRC_NA},
5197         [GLUSTER_CLI_UMOUNT]             = { "UMOUNT",            GLUSTER_CLI_UMOUNT,           glusterd_handle_umount,                NULL, 1, DRC_NA},
5198         [GLUSTER_CLI_HEAL_VOLUME]        = { "HEAL_VOLUME",       GLUSTER_CLI_HEAL_VOLUME,      glusterd_handle_cli_heal_volume,       NULL, 0, DRC_NA},
5199         [GLUSTER_CLI_STATEDUMP_VOLUME]   = {"STATEDUMP_VOLUME",   GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume,  NULL, 0, DRC_NA},
5200         [GLUSTER_CLI_LIST_VOLUME]        = {"LIST_VOLUME",        GLUSTER_CLI_LIST_VOLUME,      glusterd_handle_cli_list_volume,       NULL, 0, DRC_NA},
5201         [GLUSTER_CLI_CLRLOCKS_VOLUME]    = {"CLEARLOCKS_VOLUME",  GLUSTER_CLI_CLRLOCKS_VOLUME,  glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA},
5202         [GLUSTER_CLI_COPY_FILE]          = {"COPY_FILE",          GLUSTER_CLI_COPY_FILE,        glusterd_handle_copy_file,             NULL, 0, DRC_NA},
5203         [GLUSTER_CLI_SYS_EXEC]           = {"SYS_EXEC",           GLUSTER_CLI_SYS_EXEC,         glusterd_handle_sys_exec,              NULL, 0, DRC_NA},
5204         [GLUSTER_CLI_SNAP]               = {"SNAP",               GLUSTER_CLI_SNAP,             glusterd_handle_snapshot,              NULL, 0, DRC_NA},
5205         [GLUSTER_CLI_BARRIER_VOLUME]     = {"BARRIER_VOLUME",     GLUSTER_CLI_BARRIER_VOLUME,   glusterd_handle_barrier,               NULL, 0, DRC_NA},
5206         [GLUSTER_CLI_GANESHA]            = { "GANESHA"  ,         GLUSTER_CLI_GANESHA,          glusterd_handle_ganesha_cmd,           NULL, 0, DRC_NA},
5207         [GLUSTER_CLI_GET_VOL_OPT]        = {"GET_VOL_OPT",        GLUSTER_CLI_GET_VOL_OPT,      glusterd_handle_get_vol_opt,           NULL, 0, DRC_NA},
5208         [GLUSTER_CLI_BITROT]             = {"BITROT",             GLUSTER_CLI_BITROT,           glusterd_handle_bitrot,                NULL, 0, DRC_NA},
5209 };
5210
5211 struct rpcsvc_program gd_svc_cli_prog = {
5212         .progname  = "GlusterD svc cli",
5213         .prognum   = GLUSTER_CLI_PROGRAM,
5214         .progver   = GLUSTER_CLI_VERSION,
5215         .numactors = GLUSTER_CLI_MAXVALUE,
5216         .actors    = gd_svc_cli_actors,
5217         .synctask  = _gf_true,
5218 };
5219
5220 /**
5221  * This set of RPC progs are deemed to be trusted. Most of the actors support
5222  * read only queries, the only exception being MOUNT/UMOUNT which is required
5223  * by geo-replication to supprt unprivileged master -> slave sessions.
5224  */
5225 rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
5226         [GLUSTER_CLI_LIST_FRIENDS]       = { "LIST_FRIENDS",      GLUSTER_CLI_LIST_FRIENDS,     glusterd_handle_cli_list_friends,      NULL, 0, DRC_NA},
5227         [GLUSTER_CLI_UUID_GET]           = { "UUID_GET",          GLUSTER_CLI_UUID_GET,         glusterd_handle_cli_uuid_get,          NULL, 0, DRC_NA},
5228         [GLUSTER_CLI_GET_VOLUME]         = { "GET_VOLUME",        GLUSTER_CLI_GET_VOLUME,       glusterd_handle_cli_get_volume,        NULL, 0, DRC_NA},
5229         [GLUSTER_CLI_GETWD]              = { "GETWD",             GLUSTER_CLI_GETWD,            glusterd_handle_getwd,                 NULL, 1, DRC_NA},
5230         [GLUSTER_CLI_STATUS_VOLUME]      = {"STATUS_VOLUME",      GLUSTER_CLI_STATUS_VOLUME,    glusterd_handle_status_volume,         NULL, 0, DRC_NA},
5231         [GLUSTER_CLI_LIST_VOLUME]        = {"LIST_VOLUME",        GLUSTER_CLI_LIST_VOLUME,      glusterd_handle_cli_list_volume,       NULL, 0, DRC_NA},
5232         [GLUSTER_CLI_MOUNT]              = { "MOUNT",             GLUSTER_CLI_MOUNT,            glusterd_handle_mount,                 NULL, 1, DRC_NA},
5233         [GLUSTER_CLI_UMOUNT]             = { "UMOUNT",            GLUSTER_CLI_UMOUNT,           glusterd_handle_umount,                NULL, 1, DRC_NA},
5234 };
5235
5236 struct rpcsvc_program gd_svc_cli_trusted_progs = {
5237         .progname  = "GlusterD svc cli read-only",
5238         .prognum   = GLUSTER_CLI_PROGRAM,
5239         .progver   = GLUSTER_CLI_VERSION,
5240         .numactors = GLUSTER_CLI_MAXVALUE,
5241         .actors    = gd_svc_cli_trusted_actors,
5242         .synctask  = _gf_true,
5243 };