use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class HostReplacementTest method replaceDownedHost.
/**
* Attempt to do a host replacement on a down host
*/
@Test
public void replaceDownedHost() throws IOException {
// start with 2 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == 3 ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
setupCluster(cluster);
// collect rows to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
stopUnchecked(nodeToRemove);
// now create a new node to replace the other node
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove, props -> {
// since we have a downed host there might be a schema version which is old show up but
// can't be fetched since the host is down...
props.set(BOOTSTRAP_SKIP_SCHEMA_CHECK, true);
});
// wait till the replacing node is in the ring
awaitRingJoin(seed, replacingNode);
awaitRingJoin(replacingNode, seed);
// make sure all nodes are healthy
awaitRingHealthy(seed);
assertRingIs(seed, seed, replacingNode);
logger.info("Current ring is {}", assertRingIs(replacingNode, seed, replacingNode));
validateRows(seed.coordinator(), expectedState);
validateRows(replacingNode.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class HostReplacementTest method replaceAliveHost.
/**
* Attempt to do a host replacement on a alive host
*/
@Test
public void replaceAliveHost() throws IOException {
// start with 2 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK).set(Constants.KEY_DTEST_API_STARTUP_FAILURE_AS_SHUTDOWN, false)).withTokenSupplier(node -> even.token(node == 3 ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
setupCluster(cluster);
// collect rows to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
// now create a new node to replace the other node
Assertions.assertThatThrownBy(() -> replaceHostAndStart(cluster, nodeToRemove)).as("Startup of instance should have failed as you can not replace a alive node").hasMessageContaining("Cannot replace a live node").isInstanceOf(UnsupportedOperationException.class);
// make sure all nodes are healthy
awaitRingHealthy(seed);
assertRingIs(seed, seed, nodeToRemove);
logger.info("Current ring is {}", assertRingIs(nodeToRemove, seed, nodeToRemove));
validateRows(seed.coordinator(), expectedState);
validateRows(nodeToRemove.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class RepairOperationalTest method mainDC.
@Test
public void mainDC() throws IOException {
try (Cluster cluster = Cluster.build().withRacks(2, 1, 2).start()) {
// 1-2 : datacenter1
// 3-4 : datacenter2
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0}");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (id int PRIMARY KEY, i int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, i) VALUES (?, ?)", ConsistencyLevel.ALL, i, i);
cluster.forEach(i -> i.flush(KEYSPACE));
// choose a node in the DC that doesn't have any replicas
IInvokableInstance node = cluster.get(1);
Assertions.assertThat(node.config().localDatacenter()).isEqualTo("datacenter1");
node.nodetoolResult("repair", "-full", "--ignore-unreplicated-keyspaces", "-st", "0", "-et", "1000", KEYSPACE, "tbl").asserts().success();
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class RepairOperationalTest method hostFilterDifferentDC.
@Test
public void hostFilterDifferentDC() throws IOException {
try (Cluster cluster = Cluster.build().withRacks(2, 1, 2).start()) {
// 1-2 : datacenter1
// 3-4 : datacenter2
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0}");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (id int PRIMARY KEY, i int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, i) VALUES (?, ?)", ConsistencyLevel.ALL, i, i);
cluster.forEach(i -> i.flush(KEYSPACE));
// choose a node in the DC that doesn't have any replicas
IInvokableInstance node = cluster.get(3);
Assertions.assertThat(node.config().localDatacenter()).isEqualTo("datacenter2");
// fails with "Specified hosts [127.0.0.3, 127.0.0.1] do not share range (0,1000] needed for repair. Either restrict repair ranges with -st/-et options, or specify one of the neighbors that share this range with this node: [].. Check the logs on the repair participants for further details"
node.nodetoolResult("repair", "-full", "-hosts", cluster.get(1).broadcastAddress().getAddress().getHostAddress(), "-hosts", node.broadcastAddress().getAddress().getHostAddress(), "--ignore-unreplicated-keyspaces", "-st", "0", "-et", "1000", KEYSPACE, "tbl").asserts().success();
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class RepairOperationalTest method dcFilterOnEmptyDC.
@Test
public void dcFilterOnEmptyDC() throws IOException {
try (Cluster cluster = Cluster.build().withRacks(2, 1, 2).start()) {
// 1-2 : datacenter1
// 3-4 : datacenter2
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0}");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (id int PRIMARY KEY, i int)");
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, i) VALUES (?, ?)", ConsistencyLevel.ALL, i, i);
cluster.forEach(i -> i.flush(KEYSPACE));
// choose a node in the DC that doesn't have any replicas
IInvokableInstance node = cluster.get(3);
Assertions.assertThat(node.config().localDatacenter()).isEqualTo("datacenter2");
// fails with "the local data center must be part of the repair"
node.nodetoolResult("repair", "-full", "-dc", "datacenter1", "-dc", "datacenter2", "--ignore-unreplicated-keyspaces", "-st", "0", "-et", "1000", KEYSPACE, "tbl").asserts().success();
}
}
Aggregations