#define CTDB_ARP_INTERVAL 1
#define CTDB_ARP_REPEAT 3
-/* These flags are ONLY valid within IP allocation code and must be
- * cleared to avoid confusing other recovery daemon functions
- */
-#define NODE_FLAGS_NOIPTAKEOVER 0x01000000 /* can not takeover additional IPs */
-#define NODE_FLAGS_NOIPHOST 0x02000000 /* can not host IPs */
+/* Flags used in IP allocation algorithms. */
+struct ctdb_ipflags {
+ bool noiptakeover;
+ bool noiphost;
+};
struct ctdb_iface {
struct ctdb_iface *prev, *next;
* node and is NOIPHOST unset?
*/
static bool can_node_host_ip(struct ctdb_context *ctdb, int32_t pnn,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags ipflags,
struct ctdb_public_ip_list *ip)
{
struct ctdb_all_public_ips *public_ips;
int i;
- if (nodemap->nodes[pnn].flags & NODE_FLAGS_NOIPHOST) {
+ if (ipflags.noiphost) {
return false;
}
}
static bool can_node_takeover_ip(struct ctdb_context *ctdb, int32_t pnn,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags ipflags,
struct ctdb_public_ip_list *ip)
{
- if (nodemap->nodes[pnn].flags & NODE_FLAGS_NOIPTAKEOVER) {
+ if (ipflags.noiptakeover) {
return false;
}
- return can_node_host_ip(ctdb, pnn, nodemap, ip);
+ return can_node_host_ip(ctdb, pnn, ipflags, ip);
}
/* search the node lists list for a node to takeover this ip.
so that the ips get spread out evenly.
*/
static int find_takeover_node(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *ip,
struct ctdb_public_ip_list *all_ips)
{
int pnn, min=0, num;
- int i;
+ int i, numnodes;
+ numnodes = talloc_get_size(ipflags) / sizeof(struct ctdb_ipflags);
pnn = -1;
- for (i=0;i<nodemap->num;i++) {
+ for (i=0;i<numnodes;i++) {
/* verify that this node can serve this ip */
- if (!can_node_takeover_ip(ctdb, i, nodemap, ip)) {
+ if (!can_node_takeover_ip(ctdb, i, ipflags[i], ip)) {
/* no it couldnt so skip to the next node */
continue;
}
* finding the best node for each.
*/
static void basic_allocate_unassigned(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips)
{
struct ctdb_public_ip_list *tmp_ip;
*/
for (tmp_ip=all_ips;tmp_ip;tmp_ip=tmp_ip->next) {
if (tmp_ip->pnn == -1) {
- if (find_takeover_node(ctdb, nodemap, tmp_ip, all_ips)) {
+ if (find_takeover_node(ctdb, ipflags, tmp_ip, all_ips)) {
DEBUG(DEBUG_WARNING,("Failed to find node to cover ip %s\n",
ctdb_addr_to_str(&tmp_ip->addr)));
}
/* Basic non-deterministic rebalancing algorithm.
*/
static void basic_failback(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips,
int num_ips)
{
- int i;
+ int i, numnodes;
int maxnode, maxnum, minnode, minnum, num, retries;
struct ctdb_public_ip_list *tmp_ip;
+ numnodes = talloc_get_size(ipflags) / sizeof(struct ctdb_ipflags);
retries = 0;
try_again:
*/
maxnode = -1;
minnode = -1;
- for (i=0;i<nodemap->num;i++) {
+ for (i=0;i<numnodes;i++) {
/* only check nodes that can actually serve this ip */
- if (!can_node_takeover_ip(ctdb, i, nodemap, tmp_ip)) {
+ if (!can_node_takeover_ip(ctdb, i, ipflags[i], tmp_ip)) {
/* no it couldnt so skip to the next node */
continue;
}
/* Reassign one of maxnode's VNNs */
for (tmp=all_ips;tmp;tmp=tmp->next) {
if (tmp->pnn == maxnode) {
- (void)find_takeover_node(ctdb, nodemap, tmp, all_ips);
+ (void)find_takeover_node(ctdb, ipflags, tmp, all_ips);
retries++;
goto try_again;;
}
/* Do necessary LCP2 initialisation. Bury it in a function here so
* that we can unit test it.
*/
-static void lcp2_init(struct ctdb_context * tmp_ctx,
- struct ctdb_node_map * nodemap,
- struct ctdb_public_ip_list *all_ips,
- uint32_t **lcp2_imbalances,
- bool **rebalance_candidates)
+static void lcp2_init(struct ctdb_context *tmp_ctx,
+ struct ctdb_ipflags *ipflags,
+ struct ctdb_public_ip_list *all_ips,
+ uint32_t **lcp2_imbalances,
+ bool **rebalance_candidates)
{
- int i;
+ int i, numnodes;
struct ctdb_public_ip_list *tmp_ip;
- *rebalance_candidates = talloc_array(tmp_ctx, bool, nodemap->num);
+ numnodes = talloc_get_size(ipflags) / sizeof(struct ctdb_ipflags);
+
+ *rebalance_candidates = talloc_array(tmp_ctx, bool, numnodes);
CTDB_NO_MEMORY_FATAL(tmp_ctx, *rebalance_candidates);
- *lcp2_imbalances = talloc_array(tmp_ctx, uint32_t, nodemap->num);
+ *lcp2_imbalances = talloc_array(tmp_ctx, uint32_t, numnodes);
CTDB_NO_MEMORY_FATAL(tmp_ctx, *lcp2_imbalances);
- for (i=0;i<nodemap->num;i++) {
+ for (i=0;i<numnodes;i++) {
(*lcp2_imbalances)[i] = lcp2_imbalance(all_ips, i);
/* First step: assume all nodes are candidates */
(*rebalance_candidates)[i] = true;
while (force_rebalance_list != NULL) {
struct ctdb_rebalancenodes *next = force_rebalance_list->next;
- if (force_rebalance_list->pnn <= nodemap->num) {
+ if (force_rebalance_list->pnn <= numnodes) {
(*rebalance_candidates)[force_rebalance_list->pnn] = true;
}
* the IP/node combination that will cost the least.
*/
static void lcp2_allocate_unassigned(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips,
uint32_t *lcp2_imbalances)
{
struct ctdb_public_ip_list *tmp_ip;
- int dstnode;
+ int dstnode, numnodes;
int minnode;
uint32_t mindsum, dstdsum, dstimbl, minimbl;
bool should_loop = true;
bool have_unassigned = true;
+ numnodes = talloc_get_size(ipflags) / sizeof(struct ctdb_ipflags);
+
while (have_unassigned && should_loop) {
should_loop = false;
continue;
}
- for (dstnode=0; dstnode < nodemap->num; dstnode++) {
+ for (dstnode=0; dstnode < numnodes; dstnode++) {
/* only check nodes that can actually takeover this ip */
if (!can_node_takeover_ip(ctdb, dstnode,
- nodemap, tmp_ip)) {
+ ipflags[dstnode],
+ tmp_ip)) {
/* no it couldnt so skip to the next node */
continue;
}
* combination to move from the source node.
*/
static bool lcp2_failback_candidate(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips,
int srcnode,
uint32_t candimbl,
uint32_t *lcp2_imbalances,
bool *rebalance_candidates)
{
- int dstnode, mindstnode;
+ int dstnode, mindstnode, numnodes;
uint32_t srcimbl, srcdsum, dstimbl, dstdsum;
uint32_t minsrcimbl, mindstimbl;
struct ctdb_public_ip_list *minip;
mindstnode = -1;
mindstimbl = 0;
+ numnodes = talloc_get_size(ipflags) / sizeof(struct ctdb_ipflags);
+
DEBUG(DEBUG_DEBUG,(" ----------------------------------------\n"));
DEBUG(DEBUG_DEBUG,(" CONSIDERING MOVES FROM %d [%d]\n", srcnode, candimbl));
* to do gratuitous failover of IPs just to make minor
* balance improvements.
*/
- for (dstnode=0; dstnode < nodemap->num; dstnode++) {
+ for (dstnode=0; dstnode < numnodes; dstnode++) {
if (!rebalance_candidates[dstnode]) {
continue;
}
/* only check nodes that can actually takeover this ip */
if (!can_node_takeover_ip(ctdb, dstnode,
- nodemap, tmp_ip)) {
+ ipflags[dstnode], tmp_ip)) {
/* no it couldnt so skip to the next node */
continue;
}
* IP/destination node combination to move from the source node.
*/
static void lcp2_failback(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips,
uint32_t *lcp2_imbalances,
bool *rebalance_candidates)
{
- int i, num_rebalance_candidates;
+ int i, num_rebalance_candidates, numnodes;
struct lcp2_imbalance_pnn * lips;
bool again;
+ numnodes = talloc_get_size(ipflags) / sizeof(struct ctdb_ipflags);
+
try_again:
/* It is only worth continuing if we have suitable target
* continuing on...
*/
num_rebalance_candidates = 0;
- for (i = 0; i < nodemap->num; i++) {
+ for (i = 0; i < numnodes; i++) {
if (rebalance_candidates[i]) {
num_rebalance_candidates++;
}
* iterate through candidates. Usually the 1st one will be
* used, so this doesn't cost much...
*/
- lips = talloc_array(ctdb, struct lcp2_imbalance_pnn, nodemap->num);
- for (i = 0; i < nodemap->num; i++) {
+ lips = talloc_array(ctdb, struct lcp2_imbalance_pnn, numnodes);
+ for (i = 0; i < numnodes; i++) {
lips[i].imbalance = lcp2_imbalances[i];
lips[i].pnn = i;
}
- qsort(lips, nodemap->num, sizeof(struct lcp2_imbalance_pnn),
+ qsort(lips, numnodes, sizeof(struct lcp2_imbalance_pnn),
lcp2_cmp_imbalance_pnn);
again = false;
- for (i = 0; i < nodemap->num; i++) {
+ for (i = 0; i < numnodes; i++) {
/* This means that all nodes had 0 or 1 addresses, so
* can't be imbalanced.
*/
}
if (lcp2_failback_candidate(ctdb,
- nodemap,
+ ipflags,
all_ips,
lips[i].pnn,
lips[i].imbalance,
}
static void unassign_unsuitable_ips(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips)
{
struct ctdb_public_ip_list *tmp_ip;
continue;
}
if (!can_node_host_ip(ctdb, tmp_ip->pnn,
- nodemap, tmp_ip) != 0) {
+ ipflags[tmp_ip->pnn], tmp_ip) != 0) {
/* this node can not serve this ip. */
DEBUG(DEBUG_DEBUG,("Unassign IP: %s from %d\n",
ctdb_addr_to_str(&(tmp_ip->addr)),
}
static void ip_alloc_deterministic_ips(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips)
{
struct ctdb_public_ip_list *tmp_ip;
- int i;
+ int i, numnodes;
+
+ numnodes = talloc_get_size(ipflags) / sizeof(struct ctdb_ipflags);
DEBUG(DEBUG_NOTICE,("Deterministic IPs enabled. Resetting all ip allocations\n"));
/* Allocate IPs to nodes in a modulo fashion so that IPs will
*/
for (i=0,tmp_ip=all_ips;tmp_ip;tmp_ip=tmp_ip->next,i++) {
- tmp_ip->pnn = i%nodemap->num;
+ tmp_ip->pnn = i%numnodes;
}
/* IP failback doesn't make sense with deterministic
DEBUG(DEBUG_WARNING, ("WARNING: 'NoIPFailback' set but ignored - incompatible with 'DeterministicIPs\n"));
}
- unassign_unsuitable_ips(ctdb, nodemap, all_ips);
+ unassign_unsuitable_ips(ctdb, ipflags, all_ips);
- basic_allocate_unassigned(ctdb, nodemap, all_ips);
+ basic_allocate_unassigned(ctdb, ipflags, all_ips);
/* No failback here! */
}
static void ip_alloc_nondeterministic_ips(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips)
{
/* This should be pushed down into basic_failback. */
num_ips++;
}
- unassign_unsuitable_ips(ctdb, nodemap, all_ips);
+ unassign_unsuitable_ips(ctdb, ipflags, all_ips);
- basic_allocate_unassigned(ctdb, nodemap, all_ips);
+ basic_allocate_unassigned(ctdb, ipflags, all_ips);
/* If we don't want IPs to fail back then don't rebalance IPs. */
if (1 == ctdb->tunable.no_ip_failback) {
/* Now, try to make sure the ip adresses are evenly distributed
across the nodes.
*/
- basic_failback(ctdb, nodemap, all_ips, num_ips);
+ basic_failback(ctdb, ipflags, all_ips, num_ips);
}
static void ip_alloc_lcp2(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list *all_ips)
{
uint32_t *lcp2_imbalances;
TALLOC_CTX *tmp_ctx = talloc_new(ctdb);
- unassign_unsuitable_ips(ctdb, nodemap, all_ips);
+ unassign_unsuitable_ips(ctdb, ipflags, all_ips);
- lcp2_init(tmp_ctx, nodemap, all_ips,
+ lcp2_init(tmp_ctx, ipflags, all_ips,
&lcp2_imbalances, &rebalance_candidates);
- lcp2_allocate_unassigned(ctdb, nodemap, all_ips, lcp2_imbalances);
+ lcp2_allocate_unassigned(ctdb, ipflags, all_ips, lcp2_imbalances);
/* If we don't want IPs to fail back then don't rebalance IPs. */
if (1 == ctdb->tunable.no_ip_failback) {
/* Now, try to make sure the ip adresses are evenly distributed
across the nodes.
*/
- lcp2_failback(ctdb, nodemap, all_ips,
+ lcp2_failback(ctdb, ipflags, all_ips,
lcp2_imbalances, rebalance_candidates);
finished:
/* The calculation part of the IP allocation algorithm. */
static void ctdb_takeover_run_core(struct ctdb_context *ctdb,
- struct ctdb_node_map *nodemap,
+ struct ctdb_ipflags *ipflags,
struct ctdb_public_ip_list **all_ips_p)
{
/* since nodes only know about those public addresses that
*all_ips_p = create_merged_ip_list(ctdb);
if (1 == ctdb->tunable.lcp2_public_ip_assignment) {
- ip_alloc_lcp2(ctdb, nodemap, *all_ips_p);
+ ip_alloc_lcp2(ctdb, ipflags, *all_ips_p);
} else if (1 == ctdb->tunable.deterministic_public_ips) {
- ip_alloc_deterministic_ips(ctdb, nodemap, *all_ips_p);
+ ip_alloc_deterministic_ips(ctdb, ipflags, *all_ips_p);
} else {
- ip_alloc_nondeterministic_ips(ctdb, nodemap, *all_ips_p);
+ ip_alloc_nondeterministic_ips(ctdb, ipflags, *all_ips_p);
}
/* at this point ->pnn is the node which will own each IP
return tvals;
}
-static void clear_ipflags(struct ctdb_node_map *nodemap)
-{
- int i;
-
- for (i=0;i<nodemap->num;i++) {
- nodemap->nodes[i].flags &=
- ~(NODE_FLAGS_NOIPTAKEOVER|NODE_FLAGS_NOIPHOST);
- }
-}
-
-
/* Set internal flags for IP allocation:
* Clear ip flags
* Set NOIPTAKOVER ip flags from per-node NoIPTakeover tunable
* else
* Set NOIPHOST ip flags for disabled nodes
*/
-static void set_ipflags_internal(struct ctdb_node_map *nodemap,
- uint32_t *tval_noiptakeover,
- uint32_t *tval_noiphostonalldisabled)
+static struct ctdb_ipflags *
+set_ipflags_internal(struct ctdb_context *ctdb,
+ TALLOC_CTX *tmp_ctx,
+ struct ctdb_node_map *nodemap,
+ uint32_t *tval_noiptakeover,
+ uint32_t *tval_noiphostonalldisabled)
{
int i;
+ struct ctdb_ipflags *ipflags;
- clear_ipflags(nodemap);
+ /* Clear IP flags - implicit due to talloc_zero */
+ ipflags = talloc_zero_array(tmp_ctx, struct ctdb_ipflags, nodemap->num);
+ CTDB_NO_MEMORY_NULL(ctdb, ipflags);
for (i=0;i<nodemap->num;i++) {
/* Can not take IPs on node with NoIPTakeover set */
if (tval_noiptakeover[i] != 0) {
- nodemap->nodes[i].flags |= NODE_FLAGS_NOIPTAKEOVER;
+ ipflags[i].noiptakeover = true;
}
/* Can not host IPs on INACTIVE node */
if (nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE) {
- nodemap->nodes[i].flags |= NODE_FLAGS_NOIPHOST;
+ ipflags[i].noiphost = true;
}
}
*/
for (i=0;i<nodemap->num;i++) {
if (tval_noiphostonalldisabled[i] != 0) {
- nodemap->nodes[i].flags |= NODE_FLAGS_NOIPHOST;
+ ipflags[i].noiphost = true;
}
}
} else {
*/
for (i=0;i<nodemap->num;i++) {
if (nodemap->nodes[i].flags & NODE_FLAGS_DISABLED) {
- nodemap->nodes[i].flags |= NODE_FLAGS_NOIPHOST;
+ ipflags[i].noiphost = true;
}
}
}
+
+ return ipflags;
}
-static bool set_ipflags(struct ctdb_context *ctdb,
- TALLOC_CTX *tmp_ctx,
- struct ctdb_node_map *nodemap)
+static struct ctdb_ipflags *set_ipflags(struct ctdb_context *ctdb,
+ TALLOC_CTX *tmp_ctx,
+ struct ctdb_node_map *nodemap)
{
uint32_t *tval_noiptakeover;
uint32_t *tval_noiphostonalldisabled;
+ struct ctdb_ipflags *ipflags;
tval_noiptakeover = get_tunable_from_nodes(ctdb, tmp_ctx, nodemap,
"NoIPTakeover");
if (tval_noiptakeover == NULL) {
- return false;
+ return NULL;
}
tval_noiphostonalldisabled =
get_tunable_from_nodes(ctdb, tmp_ctx, nodemap,
"NoIPHostOnAllDisabled");
if (tval_noiphostonalldisabled == NULL) {
- return false;
+ return NULL;
}
- set_ipflags_internal(nodemap,
- tval_noiptakeover, tval_noiphostonalldisabled);
+ ipflags = set_ipflags_internal(ctdb, tmp_ctx, nodemap,
+ tval_noiptakeover,
+ tval_noiphostonalldisabled);
talloc_free(tval_noiptakeover);
talloc_free(tval_noiphostonalldisabled);
- return true;
+ return ipflags;
}
/*
struct ctdb_client_control_state *state;
TALLOC_CTX *tmp_ctx = talloc_new(ctdb);
uint32_t disable_timeout;
+ struct ctdb_ipflags *ipflags;
/*
* ip failover is completely disabled, just send out the
goto ipreallocated;
}
- if (!set_ipflags(ctdb, tmp_ctx, nodemap)) {
- DEBUG(DEBUG_ERR,("Failed to set IP flags from tunables\n"));
+ ipflags = set_ipflags(ctdb, tmp_ctx, nodemap);
+ if (ipflags == NULL) {
+ DEBUG(DEBUG_ERR,("Failed to set IP flags - aborting takeover run\n"));
+ talloc_free(tmp_ctx);
return -1;
}
ZERO_STRUCT(ip);
/* Do the IP reassignment calculations */
- ctdb_takeover_run_core(ctdb, nodemap, &all_ips);
+ ctdb_takeover_run_core(ctdb, ipflags, &all_ips);
/* The IP flags need to be cleared because they should never
* be seen outside the IP allocation code.
*/
- clear_ipflags(nodemap);
/* The recovery daemon does regular sanity checks of the IPs.
* However, sometimes it is overzealous and thinks changes are
void ctdb_test_init(const char nodestates[],
struct ctdb_context **ctdb,
struct ctdb_public_ip_list **all_ips,
- struct ctdb_node_map **nodemap)
+ struct ctdb_ipflags **ipflags)
{
struct ctdb_all_public_ips **avail;
int i, numnodes;
uint32_t nodeflags[CTDB_TEST_MAX_NODES];
char *tok, *ns, *t;
+ struct ctdb_node_map *nodemap;
uint32_t *tval_noiptakeover;
uint32_t *tval_noiptakeoverondisabled;
get_tunable_values(*ctdb, numnodes,
"CTDB_SET_NoIPHostOnAllDisabled");
- *nodemap = talloc_array(*ctdb, struct ctdb_node_map, numnodes);
- (*nodemap)->num = numnodes;
+ nodemap = talloc_array(*ctdb, struct ctdb_node_map, numnodes);
+ nodemap->num = numnodes;
read_ctdb_public_ip_info(*ctdb, numnodes, all_ips, &avail);
(*ctdb)->nodes = talloc_array(*ctdb, struct ctdb_node *, numnodes); // FIXME: bogus size, overkill
for (i=0; i < numnodes; i++) {
- (*nodemap)->nodes[i].pnn = i;
- (*nodemap)->nodes[i].flags = nodeflags[i];
+ nodemap->nodes[i].pnn = i;
+ nodemap->nodes[i].flags = nodeflags[i];
/* nodemap->nodes[i].sockaddr is uninitialised */
(*ctdb)->nodes[i] = talloc(*ctdb, struct ctdb_node);
(*ctdb)->nodes[i]->known_public_ips = avail[i];
}
- set_ipflags_internal(*nodemap, tval_noiptakeover, tval_noiptakeoverondisabled);
+ *ipflags = set_ipflags_internal(*ctdb, *ctdb, nodemap,
+ tval_noiptakeover,
+ tval_noiptakeoverondisabled);
}
/* IP layout is read from stdin. */
{
struct ctdb_context *ctdb;
struct ctdb_public_ip_list *all_ips;
- struct ctdb_node_map *nodemap;
+ struct ctdb_ipflags *ipflags;
uint32_t *lcp2_imbalances;
bool *newly_healthy;
- ctdb_test_init(nodestates, &ctdb, &all_ips, &nodemap);
+ ctdb_test_init(nodestates, &ctdb, &all_ips, &ipflags);
- lcp2_init(ctdb, nodemap, all_ips, &lcp2_imbalances, &newly_healthy);
+ lcp2_init(ctdb, ipflags, all_ips, &lcp2_imbalances, &newly_healthy);
- lcp2_allocate_unassigned(ctdb, nodemap,
+ lcp2_allocate_unassigned(ctdb, ipflags,
all_ips, lcp2_imbalances);
print_ctdb_public_ip_list(all_ips);
{
struct ctdb_context *ctdb;
struct ctdb_public_ip_list *all_ips;
- struct ctdb_node_map *nodemap;
+ struct ctdb_ipflags *ipflags;
uint32_t *lcp2_imbalances;
bool *newly_healthy;
- ctdb_test_init(nodestates, &ctdb, &all_ips, &nodemap);
+ ctdb_test_init(nodestates, &ctdb, &all_ips, &ipflags);
- lcp2_init(ctdb, nodemap, all_ips, &lcp2_imbalances, &newly_healthy);
+ lcp2_init(ctdb, ipflags, all_ips, &lcp2_imbalances, &newly_healthy);
- lcp2_failback(ctdb, nodemap,
+ lcp2_failback(ctdb, ipflags,
all_ips, lcp2_imbalances, newly_healthy);
print_ctdb_public_ip_list(all_ips);
{
struct ctdb_context *ctdb;
struct ctdb_public_ip_list *all_ips;
- struct ctdb_node_map *nodemap;
+ struct ctdb_ipflags *ipflags;
uint32_t *lcp2_imbalances;
bool *newly_healthy;
- ctdb_test_init(nodestates, &ctdb, &all_ips, &nodemap);
+ ctdb_test_init(nodestates, &ctdb, &all_ips, &ipflags);
- lcp2_init(ctdb, nodemap, all_ips, &lcp2_imbalances, &newly_healthy);
+ lcp2_init(ctdb, ipflags, all_ips, &lcp2_imbalances, &newly_healthy);
- lcp2_failback(ctdb, nodemap,
+ lcp2_failback(ctdb, ipflags,
all_ips, lcp2_imbalances, newly_healthy);
print_ctdb_public_ip_list(all_ips);
{
struct ctdb_context *ctdb;
struct ctdb_public_ip_list *all_ips;
- struct ctdb_node_map *nodemap;
+ struct ctdb_ipflags *ipflags;
- ctdb_test_init(nodestates, &ctdb, &all_ips, &nodemap);
+ ctdb_test_init(nodestates, &ctdb, &all_ips, &ipflags);
- ctdb_takeover_run_core(ctdb, nodemap, &all_ips);
+ ctdb_takeover_run_core(ctdb, ipflags, &all_ips);
print_ctdb_public_ip_list(all_ips);