use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class HostReplacementTest method validateRows.
static void validateRows(ICoordinator coordinator, SimpleQueryResult expected) {
expected.reset();
SimpleQueryResult rows = coordinator.executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
AssertUtils.assertRows(rows, expected);
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class HostReplacementTest method seedGoesDownBeforeDownHost.
/**
* If the seed goes down, then another node, once the seed comes back, make sure host replacements still work.
*/
@Test
public void seedGoesDownBeforeDownHost() throws IOException {
// start with 3 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(3);
try (Cluster cluster = Cluster.build(3).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == 4 ? 2 : node)).start()) {
// call early as this can't be touched on a down node
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
IInvokableInstance nodeToStayAlive = cluster.get(3);
setupCluster(cluster);
// collect rows/tokens to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
List<String> beforeCrashTokens = getTokenMetadataTokens(seed);
// shutdown the seed, then the node to remove
stopUnchecked(seed);
stopUnchecked(nodeToRemove);
// restart the seed
seed.startup();
// make sure the node to remove is still in the ring
assertInRing(seed, nodeToRemove);
// make sure node1 still has node2's tokens
List<String> currentTokens = getTokenMetadataTokens(seed);
Assertions.assertThat(currentTokens).as("Tokens no longer match after restarting").isEqualTo(beforeCrashTokens);
// now create a new node to replace the other node
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove);
List<IInvokableInstance> expectedRing = Arrays.asList(seed, replacingNode, nodeToStayAlive);
// wait till the replacing node is in the ring
awaitRingJoin(seed, replacingNode);
awaitRingJoin(replacingNode, seed);
awaitRingJoin(nodeToStayAlive, replacingNode);
// make sure all nodes are healthy
logger.info("Current ring is {}", awaitRingHealthy(seed));
expectedRing.forEach(i -> assertRingIs(i, expectedRing));
validateRows(seed.coordinator(), expectedState);
validateRows(replacingNode.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class AbstractClientSizeWarning method noWarnings.
public void noWarnings(String cql) {
CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, ?)", ConsistencyLevel.ALL, bytes(128));
CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 2, ?)", ConsistencyLevel.ALL, bytes(128));
if (shouldFlush())
CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
Consumer<List<String>> test = warnings -> Assert.assertEquals(Collections.emptyList(), warnings);
for (boolean b : Arrays.asList(true, false)) {
enable(b);
checkpointHistogram();
SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
test.accept(result.warnings());
if (b) {
assertHistogramUpdated();
} else {
assertHistogramNotUpdated();
}
test.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
if (b) {
assertHistogramUpdated();
} else {
assertHistogramNotUpdated();
}
assertWarnAborts(0, 0, 0);
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class TombstoneCountWarningTest method noWarnings.
public void noWarnings(String cql) {
Consumer<List<String>> test = warnings -> Assert.assertEquals(Collections.emptyList(), warnings);
for (int i = 0; i < TOMBSTONE_WARN; i++) CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, null)", ConsistencyLevel.ALL, i);
for (boolean b : Arrays.asList(true, false)) {
enable(b);
SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
test.accept(result.warnings());
test.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
assertWarnAborts(0, 0, 0);
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class HostReplacementAbruptDownedInstanceTest method hostReplaceAbruptShutdown.
/**
* Can we maybe also test with an abrupt shutdown, that is when the shutdown state is not broadcast and the node to be replaced is on NORMAL state?
*/
@Test
public void hostReplaceAbruptShutdown() throws IOException {
int numStartNodes = 3;
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(numStartNodes);
try (Cluster cluster = Cluster.build(numStartNodes).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == (numStartNodes + 1) ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
IInvokableInstance peer = cluster.get(3);
List<IInvokableInstance> peers = Arrays.asList(seed, peer);
setupCluster(cluster);
// collect rows/tokens to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
stopAbrupt(cluster, nodeToRemove);
// at this point node 2 should still be NORMAL on all other nodes
peers.forEach(p -> assertRingState(p, nodeToRemove, "Normal"));
// node is down, but queries should still work
// TODO failing, but shouldn't!
// peers.forEach(p -> validateRows(p.coordinator(), expectedState));
// now create a new node to replace the other node
long startNanos = nanoTime();
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove, properties -> {
// since node2 was killed abruptly its possible that node2's gossip state has an old schema version
// if this happens then bootstrap will fail waiting for a schema version it will never see; to avoid
// this, setting this property to log the warning rather than fail bootstrap
properties.set(BOOTSTRAP_SKIP_SCHEMA_CHECK, true);
});
logger.info("Host replacement of {} with {} took {}", nodeToRemove, replacingNode, Duration.ofNanos(nanoTime() - startNanos));
peers.forEach(p -> awaitRingJoin(p, replacingNode));
// make sure all nodes are healthy
awaitRingHealthy(seed);
List<IInvokableInstance> expectedRing = Arrays.asList(seed, peer, replacingNode);
expectedRing.forEach(p -> assertRingIs(p, expectedRing));
expectedRing.forEach(p -> validateRows(p.coordinator(), expectedState));
}
}
Aggregations