speed startup: alter recovery loop
authorRusty Russell <rusty@rustcorp.com.au>
Tue, 22 Jun 2010 13:20:23 +0000 (22:50 +0930)
committerRusty Russell <rusty@rustcorp.com.au>
Tue, 22 Jun 2010 13:20:23 +0000 (22:50 +0930)
We do a recovery on startup.  But the code does:
   Sleep for ctdb->tunable.recover_interval.
   Check for recovery.

We want to do it in the other order.  This is best done by extracting
the loop into a separate "main_loop" function.

Seconds between ctdbd first log message and node healthy:
BEFORE: 24.09
AFTER: 23.58

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
server/ctdb_recoverd.c

index 3a2242e5c9f17343576719532caa282225e8f955..2b8c47fb5e254b01fc90b1b9b79c57ab4ca66767 100644 (file)
@@ -2877,14 +2877,11 @@ static int update_recovery_lock_file(struct ctdb_context *ctdb)
        talloc_free(tmp_ctx);
        return 0;
 }
-               
-/*
-  the main monitoring loop
- */
-static void monitor_cluster(struct ctdb_context *ctdb)
+
+static void main_loop(struct ctdb_context *ctdb, struct ctdb_recoverd *rec,
+                     TALLOC_CTX *mem_ctx)
 {
        uint32_t pnn;
-       TALLOC_CTX *mem_ctx=NULL;
        struct ctdb_node_map *nodemap=NULL;
        struct ctdb_node_map *recmaster_nodemap=NULL;
        struct ctdb_node_map **remote_nodemaps=NULL;
@@ -2892,57 +2889,8 @@ static void monitor_cluster(struct ctdb_context *ctdb)
        struct ctdb_vnn_map *remote_vnnmap=NULL;
        int32_t debug_level;
        int i, j, ret;
-       struct ctdb_recoverd *rec;
-
-       DEBUG(DEBUG_NOTICE,("monitor_cluster starting\n"));
-
-       rec = talloc_zero(ctdb, struct ctdb_recoverd);
-       CTDB_NO_MEMORY_FATAL(ctdb, rec);
-
-       rec->ctdb = ctdb;
-
-       rec->priority_time = timeval_current();
-
-       /* register a message port for sending memory dumps */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_MEM_DUMP, mem_dump_handler, rec);
 
-       /* register a message port for recovery elections */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_RECOVERY, election_handler, rec);
 
-       /* when nodes are disabled/enabled */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_SET_NODE_FLAGS, monitor_handler, rec);
-
-       /* when we are asked to puch out a flag change */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_PUSH_NODE_FLAGS, push_flags_handler, rec);
-
-       /* register a message port for vacuum fetch */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_VACUUM_FETCH, vacuum_fetch_handler, rec);
-
-       /* register a message port for reloadnodes  */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_RELOAD_NODES, reload_nodes_handler, rec);
-
-       /* register a message port for performing a takeover run */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_TAKEOVER_RUN, ip_reallocate_handler, rec);
-
-       /* register a message port for disabling the ip check for a short while */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_DISABLE_IP_CHECK, disable_ip_check_handler, rec);
-
-       /* register a message port for updating the recovery daemons node assignment for an ip */
-       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_RECD_UPDATE_IP, recd_update_ip_handler, rec);
-
-again:
-       if (mem_ctx) {
-               talloc_free(mem_ctx);
-               mem_ctx = NULL;
-       }
-       mem_ctx = talloc_new(ctdb);
-       if (!mem_ctx) {
-               DEBUG(DEBUG_CRIT,(__location__ " Failed to create temporary context\n"));
-               exit(-1);
-       }
-
-       /* we only check for recovery once every second */
-       ctdb_wait_timeout(ctdb, ctdb->tunable.recover_interval);
 
        /* verify that the main daemon is still running */
        if (kill(ctdb->ctdbd_pid, 0) != 0) {
@@ -2955,14 +2903,14 @@ again:
 
        if (rec->election_timeout) {
                /* an election is in progress */
-               goto again;
+               return;
        }
 
        /* read the debug level from the parent and update locally */
        ret = ctdb_ctrl_get_debuglevel(ctdb, CTDB_CURRENT_NODE, &debug_level);
        if (ret !=0) {
                DEBUG(DEBUG_ERR, (__location__ " Failed to read debuglevel from parent\n"));
-               goto again;
+               return;
        }
        LogLevel = debug_level;
 
@@ -2992,13 +2940,13 @@ again:
        ret = ctdb_ctrl_get_all_tunables(ctdb, CONTROL_TIMEOUT(), CTDB_CURRENT_NODE, &ctdb->tunable);
        if (ret != 0) {
                DEBUG(DEBUG_ERR,("Failed to get tunables - retrying\n"));
-               goto again;
+               return;
        }
 
        /* get the current recovery lock file from the server */
        if (update_recovery_lock_file(ctdb) != 0) {
                DEBUG(DEBUG_ERR,("Failed to update the recovery lock file\n"));
-               goto again;
+               return;
        }
 
        /* Make sure that if recovery lock verification becomes disabled when
@@ -3014,14 +2962,14 @@ again:
        pnn = ctdb_ctrl_getpnn(ctdb, CONTROL_TIMEOUT(), CTDB_CURRENT_NODE);
        if (pnn == (uint32_t)-1) {
                DEBUG(DEBUG_ERR,("Failed to get local pnn - retrying\n"));
-               goto again;
+               return;
        }
 
        /* get the vnnmap */
        ret = ctdb_ctrl_getvnnmap(ctdb, CONTROL_TIMEOUT(), pnn, mem_ctx, &vnnmap);
        if (ret != 0) {
                DEBUG(DEBUG_ERR, (__location__ " Unable to get vnnmap from node %u\n", pnn));
-               goto again;
+               return;
        }
 
 
@@ -3034,7 +2982,7 @@ again:
        ret = ctdb_ctrl_getnodemap(ctdb, CONTROL_TIMEOUT(), pnn, rec, &rec->nodemap);
        if (ret != 0) {
                DEBUG(DEBUG_ERR, (__location__ " Unable to get nodemap from node %u\n", pnn));
-               goto again;
+               return;
        }
        nodemap = rec->nodemap;
 
@@ -3042,7 +2990,7 @@ again:
        ret = ctdb_ctrl_getrecmaster(ctdb, mem_ctx, CONTROL_TIMEOUT(), pnn, &rec->recmaster);
        if (ret != 0) {
                DEBUG(DEBUG_ERR, (__location__ " Unable to get recmaster from node %u\n", pnn));
-               goto again;
+               return;
        }
 
        /* if we are not the recmaster we can safely ignore any ip reallocate requests */
@@ -3061,7 +3009,7 @@ again:
        if (rec->recmaster == (uint32_t)-1) {
                DEBUG(DEBUG_NOTICE,(__location__ " Initial recovery master set - forcing election\n"));
                force_election(rec, pnn, nodemap);
-               goto again;
+               return;
        }
 
 
@@ -3079,15 +3027,15 @@ again:
                        ret = ctdb_ctrl_freeze_priority(ctdb, CONTROL_TIMEOUT(), CTDB_CURRENT_NODE, 1);
                        if (ret != 0) {
                                DEBUG(DEBUG_ERR,(__location__ " Failed to freeze node due to node being STOPPED\n"));
-                               goto again;
+                               return;
                        }
                        ret = ctdb_ctrl_setrecmode(ctdb, CONTROL_TIMEOUT(), CTDB_CURRENT_NODE, CTDB_RECOVERY_ACTIVE);
                        if (ret != 0) {
                                DEBUG(DEBUG_ERR,(__location__ " Failed to activate recovery mode due to node being stopped\n"));
 
-                               goto again;
+                               return;
                        }
-                       goto again;
+                       return;
                }
        }
        /* If the local node is stopped, verify we are not the recmaster 
@@ -3096,7 +3044,7 @@ again:
        if ((nodemap->nodes[pnn].flags & NODE_FLAGS_STOPPED) && (rec->recmaster == pnn)) {
                DEBUG(DEBUG_ERR,("Local node is STOPPED. Yielding recmaster role\n"));
                force_election(rec, pnn, nodemap);
-               goto again;
+               return;
        }
        
        /* check that we (recovery daemon) and the local ctdb daemon
@@ -3130,14 +3078,14 @@ again:
        if (j == nodemap->num) {
                DEBUG(DEBUG_ERR, ("Recmaster node %u not in list. Force reelection\n", rec->recmaster));
                force_election(rec, pnn, nodemap);
-               goto again;
+               return;
        }
 
        /* if recovery master is disconnected we must elect a new recmaster */
        if (nodemap->nodes[j].flags & NODE_FLAGS_DISCONNECTED) {
                DEBUG(DEBUG_NOTICE, ("Recmaster node %u is disconnected. Force reelection\n", nodemap->nodes[j].pnn));
                force_election(rec, pnn, nodemap);
-               goto again;
+               return;
        }
 
        /* grap the nodemap from the recovery master to check if it is banned */
@@ -3146,14 +3094,14 @@ again:
        if (ret != 0) {
                DEBUG(DEBUG_ERR, (__location__ " Unable to get nodemap from recovery master %u\n", 
                          nodemap->nodes[j].pnn));
-               goto again;
+               return;
        }
 
 
        if (recmaster_nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
                DEBUG(DEBUG_NOTICE, ("Recmaster node %u no longer available. Force reelection\n", nodemap->nodes[j].pnn));
                force_election(rec, pnn, nodemap);
-               goto again;
+               return;
        }
 
 
@@ -3173,7 +3121,7 @@ again:
           if recovery is needed
         */
        if (pnn != rec->recmaster) {
-               goto again;
+               return;
        }
 
 
@@ -3182,38 +3130,38 @@ again:
        if (ret == MONITOR_ELECTION_NEEDED) {
                DEBUG(DEBUG_NOTICE,("update_local_flags() called for a re-election.\n"));
                force_election(rec, pnn, nodemap);
-               goto again;
+               return;
        }
        if (ret != MONITOR_OK) {
                DEBUG(DEBUG_ERR,("Unable to update local flags\n"));
-               goto again;
+               return;
        }
 
        if (ctdb->num_nodes != nodemap->num) {
                DEBUG(DEBUG_ERR, (__location__ " ctdb->num_nodes (%d) != nodemap->num (%d) reloading nodes file\n", ctdb->num_nodes, nodemap->num));
                reload_nodes_file(ctdb);
-               goto again;
+               return;
        }
 
        /* verify that all active nodes agree that we are the recmaster */
        switch (verify_recmaster(rec, nodemap, pnn)) {
        case MONITOR_RECOVERY_NEEDED:
                /* can not happen */
-               goto again;
+               return;
        case MONITOR_ELECTION_NEEDED:
                force_election(rec, pnn, nodemap);
-               goto again;
+               return;
        case MONITOR_OK:
                break;
        case MONITOR_FAILED:
-               goto again;
+               return;
        }
 
 
        if (rec->need_recovery) {
                /* a previous recovery didn't finish */
                do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-               goto again;             
+               return;
        }
 
        /* verify that all active nodes are in normal mode 
@@ -3222,9 +3170,9 @@ again:
        switch (verify_recmode(ctdb, nodemap)) {
        case MONITOR_RECOVERY_NEEDED:
                do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-               goto again;
+               return;
        case MONITOR_FAILED:
-               goto again;
+               return;
        case MONITOR_ELECTION_NEEDED:
                /* can not happen */
        case MONITOR_OK:
@@ -3239,7 +3187,7 @@ again:
                        DEBUG(DEBUG_ERR,("Failed check_recovery_lock. Force a recovery\n"));
                        ctdb_set_culprit(rec, ctdb->pnn);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
        }
 
@@ -3248,14 +3196,14 @@ again:
        remote_nodemaps = talloc_array(mem_ctx, struct ctdb_node_map *, nodemap->num);
        if (remote_nodemaps == NULL) {
                DEBUG(DEBUG_ERR, (__location__ " failed to allocate remote nodemap array\n"));
-               goto again;
+               return;
        }
        for(i=0; i<nodemap->num; i++) {
                remote_nodemaps[i] = NULL;
        }
        if (get_remote_nodemaps(ctdb, mem_ctx, nodemap, remote_nodemaps) != 0) {
                DEBUG(DEBUG_ERR,(__location__ " Failed to read remote nodemaps\n"));
-               goto again;
+               return;
        } 
 
        /* verify that all other nodes have the same nodemap as we have
@@ -3269,7 +3217,7 @@ again:
                        DEBUG(DEBUG_ERR,(__location__ " Did not get a remote nodemap for node %d, restarting monitoring\n", j));
                        ctdb_set_culprit(rec, j);
 
-                       goto again;
+                       return;
                }
 
                /* if the nodes disagree on how many nodes there are
@@ -3280,7 +3228,7 @@ again:
                                  nodemap->nodes[j].pnn, remote_nodemaps[j]->num, nodemap->num));
                        ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
 
                /* if the nodes disagree on which nodes exist and are
@@ -3294,7 +3242,7 @@ again:
                                ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                                do_recovery(rec, mem_ctx, pnn, nodemap, 
                                            vnnmap);
-                               goto again;
+                               return;
                        }
                }
 
@@ -3317,14 +3265,14 @@ again:
                                        ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                                        do_recovery(rec, mem_ctx, pnn, nodemap, 
                                                    vnnmap);
-                                       goto again;
+                                       return;
                                } else {
                                        DEBUG(DEBUG_ERR,("Use flags 0x%02x from local recmaster node for cluster update of node %d flags\n", nodemap->nodes[i].flags, i));
                                        update_flags_on_all_nodes(ctdb, nodemap, nodemap->nodes[i].pnn, nodemap->nodes[i].flags);
                                        ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                                        do_recovery(rec, mem_ctx, pnn, nodemap, 
                                                    vnnmap);
-                                       goto again;
+                                       return;
                                }
                        }
                }
@@ -3339,7 +3287,7 @@ again:
                          vnnmap->size, rec->num_active));
                ctdb_set_culprit(rec, ctdb->pnn);
                do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-               goto again;
+               return;
        }
 
        /* verify that all active nodes in the nodemap also exist in 
@@ -3363,7 +3311,7 @@ again:
                                  nodemap->nodes[j].pnn));
                        ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
        }
 
@@ -3384,7 +3332,7 @@ again:
                if (ret != 0) {
                        DEBUG(DEBUG_ERR, (__location__ " Unable to get vnnmap from remote node %u\n", 
                                  nodemap->nodes[j].pnn));
-                       goto again;
+                       return;
                }
 
                /* verify the vnnmap generation is the same */
@@ -3393,7 +3341,7 @@ again:
                                  nodemap->nodes[j].pnn, remote_vnnmap->generation, vnnmap->generation));
                        ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
 
                /* verify the vnnmap size is the same */
@@ -3402,7 +3350,7 @@ again:
                                  nodemap->nodes[j].pnn, remote_vnnmap->size, vnnmap->size));
                        ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
 
                /* verify the vnnmap is the same */
@@ -3413,7 +3361,7 @@ again:
                                ctdb_set_culprit(rec, nodemap->nodes[j].pnn);
                                do_recovery(rec, mem_ctx, pnn, nodemap, 
                                            vnnmap);
-                               goto again;
+                               return;
                        }
                }
        }
@@ -3433,7 +3381,7 @@ again:
                                         culprit));
                        ctdb_set_culprit(rec, culprit);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
 
                /* execute the "startrecovery" event script on all nodes */
@@ -3442,7 +3390,7 @@ again:
                        DEBUG(DEBUG_ERR, (__location__ " Unable to run the 'startrecovery' event on cluster\n"));
                        ctdb_set_culprit(rec, ctdb->pnn);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
 
                ret = ctdb_takeover_run(ctdb, nodemap);
@@ -3450,7 +3398,7 @@ again:
                        DEBUG(DEBUG_ERR, (__location__ " Unable to setup public takeover addresses - starting recovery\n"));
                        ctdb_set_culprit(rec, ctdb->pnn);
                        do_recovery(rec, mem_ctx, pnn, nodemap, vnnmap);
-                       goto again;
+                       return;
                }
 
                /* execute the "recovered" event script on all nodes */
@@ -3467,10 +3415,65 @@ again:
                }
 #endif
        }
+}
+
+/*
+  the main monitoring loop
+ */
+static void monitor_cluster(struct ctdb_context *ctdb)
+{
+       struct ctdb_recoverd *rec;
+
+       DEBUG(DEBUG_NOTICE,("monitor_cluster starting\n"));
+
+       rec = talloc_zero(ctdb, struct ctdb_recoverd);
+       CTDB_NO_MEMORY_FATAL(ctdb, rec);
+
+       rec->ctdb = ctdb;
 
+       rec->priority_time = timeval_current();
 
-       goto again;
+       /* register a message port for sending memory dumps */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_MEM_DUMP, mem_dump_handler, rec);
 
+       /* register a message port for recovery elections */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_RECOVERY, election_handler, rec);
+
+       /* when nodes are disabled/enabled */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_SET_NODE_FLAGS, monitor_handler, rec);
+
+       /* when we are asked to puch out a flag change */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_PUSH_NODE_FLAGS, push_flags_handler, rec);
+
+       /* register a message port for vacuum fetch */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_VACUUM_FETCH, vacuum_fetch_handler, rec);
+
+       /* register a message port for reloadnodes  */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_RELOAD_NODES, reload_nodes_handler, rec);
+
+       /* register a message port for performing a takeover run */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_TAKEOVER_RUN, ip_reallocate_handler, rec);
+
+       /* register a message port for disabling the ip check for a short while */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_DISABLE_IP_CHECK, disable_ip_check_handler, rec);
+
+       /* register a message port for updating the recovery daemons node assignment for an ip */
+       ctdb_client_set_message_handler(ctdb, CTDB_SRVID_RECD_UPDATE_IP, recd_update_ip_handler, rec);
+
+       for (;;) {
+               TALLOC_CTX *mem_ctx = talloc_new(ctdb);
+               if (!mem_ctx) {
+                       DEBUG(DEBUG_CRIT,(__location__
+                                         " Failed to create temp context\n"));
+                       exit(-1);
+               }
+
+               main_loop(ctdb, rec, mem_ctx);
+               talloc_free(mem_ctx);
+
+               /* we only check for recovery once every second */
+               ctdb_wait_timeout(ctdb, ctdb->tunable.recover_interval);
+       }
 }
 
 /*