use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class HostReplacementOfDownedClusterTest method hostReplacementOfDeadNodeAndOtherNodeStartsAfter.
/**
* Cluster stops completely, then start seed, then host replace node2; after all complete start node3 to make sure
* it comes up correctly with the new host in the ring.
*/
@Test
public void hostReplacementOfDeadNodeAndOtherNodeStartsAfter() throws IOException {
// start with 3 nodes, stop both nodes, start the seed, host replace the down node)
int numStartNodes = 3;
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(numStartNodes);
try (Cluster cluster = Cluster.build(numStartNodes).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == (numStartNodes + 1) ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
IInvokableInstance nodeToStartAfterReplace = cluster.get(3);
InetSocketAddress addressToReplace = nodeToRemove.broadcastAddress();
setupCluster(cluster);
// collect rows/tokens to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
List<String> beforeCrashTokens = getTokenMetadataTokens(seed);
// now stop all nodes
stopAll(cluster);
// with all nodes down, now start the seed (should be first node)
seed.startup();
// at this point node2 should be known in gossip, but with generation/version of 0
assertGossipInfo(seed, addressToReplace, 0, -1);
// make sure node1 still has node2's tokens
List<String> currentTokens = getTokenMetadataTokens(seed);
Assertions.assertThat(currentTokens).as("Tokens no longer match after restarting").isEqualTo(beforeCrashTokens);
// now create a new node to replace the other node
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove);
// wait till the replacing node is in the ring
awaitRingJoin(seed, replacingNode);
awaitRingJoin(replacingNode, seed);
// we see that the replaced node is properly in the ring, now lets add the other node back
nodeToStartAfterReplace.startup();
awaitRingJoin(seed, nodeToStartAfterReplace);
awaitRingJoin(replacingNode, nodeToStartAfterReplace);
// make sure all nodes are healthy
awaitRingHealthy(seed);
assertRingIs(seed, seed, replacingNode, nodeToStartAfterReplace);
assertRingIs(replacingNode, seed, replacingNode, nodeToStartAfterReplace);
logger.info("Current ring is {}", assertRingIs(nodeToStartAfterReplace, seed, replacingNode, nodeToStartAfterReplace));
validateRows(seed.coordinator(), expectedState);
validateRows(replacingNode.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class HostReplacementOfDownedClusterTest method hostReplacementOfDeadNode.
/**
* When the full cluster crashes, make sure that we can replace a dead node after recovery. This can happen
* with DC outages (assuming single DC setup) where the recovery isn't able to recover a specific node.
*/
@Test
public void hostReplacementOfDeadNode() throws IOException {
// start with 2 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == 3 ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
InetSocketAddress addressToReplace = nodeToRemove.broadcastAddress();
setupCluster(cluster);
// collect rows/tokens to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
List<String> beforeCrashTokens = getTokenMetadataTokens(seed);
// now stop all nodes
stopAll(cluster);
// with all nodes down, now start the seed (should be first node)
seed.startup();
// at this point node2 should be known in gossip, but with generation/version of 0
assertGossipInfo(seed, addressToReplace, 0, -1);
// make sure node1 still has node2's tokens
List<String> currentTokens = getTokenMetadataTokens(seed);
Assertions.assertThat(currentTokens).as("Tokens no longer match after restarting").isEqualTo(beforeCrashTokens);
// now create a new node to replace the other node
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove);
awaitRingJoin(seed, replacingNode);
awaitRingJoin(replacingNode, seed);
assertNotInRing(seed, nodeToRemove);
logger.info("Current ring is {}", assertNotInRing(replacingNode, nodeToRemove));
validateRows(seed.coordinator(), expectedState);
validateRows(replacingNode.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class AbstractClientSizeWarning method failThreshold.
public void failThreshold(String cql) throws UnknownHostException {
ICoordinator node = CLUSTER.coordinator(1);
for (int i = 0; i < failThresholdRowCount(); i++) node.execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", ConsistencyLevel.ALL, i + 1, bytes(512));
if (shouldFlush())
CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
enable(true);
checkpointHistogram();
List<String> warnings = CLUSTER.get(1).callsOnInstance(() -> {
ClientWarn.instance.captureWarnings();
CoordinatorWarnings.init();
try {
QueryProcessor.execute(cql, org.apache.cassandra.db.ConsistencyLevel.ALL, QueryState.forInternalCalls());
Assert.fail("Expected query failure");
} catch (ReadSizeAbortException e) {
// expected, client transport returns an error message and includes client warnings
}
CoordinatorWarnings.done();
CoordinatorWarnings.reset();
return ClientWarn.instance.getWarnings();
}).call();
assertAbortWarnings(warnings);
assertHistogramUpdated();
assertWarnAborts(0, 1, 1);
try {
driverQueryAll(cql);
Assert.fail("Query should have thrown ReadFailureException");
} catch (com.datastax.driver.core.exceptions.ReadFailureException e) {
// without changing the client can't produce a better message...
// client does NOT include the message sent from the server in the exception; so the message doesn't work
// well in this case
assertThat(e.getMessage()).contains("responses were required but only 0 replica responded");
ImmutableSet<InetAddress> expectedKeys = ImmutableSet.of(InetAddress.getByAddress(new byte[] { 127, 0, 0, 1 }), InetAddress.getByAddress(new byte[] { 127, 0, 0, 2 }), InetAddress.getByAddress(new byte[] { 127, 0, 0, 3 }));
assertThat(e.getFailuresMap()).hasSizeBetween(1, 3).containsValue(RequestFailureReason.READ_SIZE.code).hasKeySatisfying(new Condition<InetAddress>() {
public boolean matches(InetAddress value) {
return expectedKeys.contains(value);
}
});
}
assertHistogramUpdated();
assertWarnAborts(0, 2, 1);
// query should no longer fail
enable(false);
SimpleQueryResult result = node.executeWithResult(cql, ConsistencyLevel.ALL);
assertThat(result.warnings()).isEmpty();
assertHistogramNotUpdated();
assertThat(driverQueryAll(cql).getExecutionInfo().getWarnings()).isEmpty();
assertHistogramNotUpdated();
assertWarnAborts(0, 2, 0);
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class AbstractClientSizeWarning method warnThreshold.
public void warnThreshold(String cql, boolean triggerReadRepair) {
for (int i = 0; i < warnThresholdRowCount(); i++) {
if (triggerReadRepair) {
int finalI = i;
// cell timestamps will not match (even though the values match) which will trigger a read-repair
CLUSTER.stream().forEach(node -> node.executeInternal("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", finalI + 1, bytes(512)));
} else {
CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, ?)", ConsistencyLevel.ALL, i + 1, bytes(512));
}
}
if (shouldFlush())
CLUSTER.stream().forEach(i -> i.flush(KEYSPACE));
enable(true);
checkpointHistogram();
SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
assertWarnings(result.warnings());
assertHistogramUpdated();
assertWarnAborts(1, 0, 0);
assertWarnings(driverQueryAll(cql).getExecutionInfo().getWarnings());
assertHistogramUpdated();
assertWarnAborts(2, 0, 0);
enable(false);
result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
assertThat(result.warnings()).isEmpty();
assertHistogramNotUpdated();
assertThat(driverQueryAll(cql).getExecutionInfo().getWarnings()).isEmpty();
assertHistogramNotUpdated();
assertWarnAborts(2, 0, 0);
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class TombstoneCountWarningTest method warnThreshold.
private void warnThreshold(String cql, boolean isScan) {
for (int i = 0; i < TOMBSTONE_WARN + 1; i++) CLUSTER.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, ?, null)", ConsistencyLevel.ALL, i);
enable(true);
Consumer<List<String>> testEnabled = warnings -> Assertions.assertThat(Iterables.getOnlyElement(warnings)).contains("nodes scanned up to " + (TOMBSTONE_WARN + 1) + " tombstones and issued tombstone warnings for query " + cql);
SimpleQueryResult result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
testEnabled.accept(result.warnings());
assertWarnAborts(1, 0, 0);
testEnabled.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
assertWarnAborts(2, 0, 0);
enable(false);
Consumer<List<String>> testDisabled = warnings -> {
// client warnings are currently coordinator only, so if present only 1 is expected
if (isScan) {
// Scans perform multiple ReadCommands, which will not propgate the warnings to the top-level coordinator; so no warnings are expected
Assertions.assertThat(warnings).isEmpty();
} else {
Assertions.assertThat(Iterables.getOnlyElement(warnings)).startsWith("Read " + (TOMBSTONE_WARN + 1) + " live rows and " + (TOMBSTONE_WARN + 1) + " tombstone cells for query " + cql);
}
};
result = CLUSTER.coordinator(1).executeWithResult(cql, ConsistencyLevel.ALL);
testDisabled.accept(result.warnings());
assertWarnAborts(2, 0, 0);
testDisabled.accept(driverQueryAll(cql).getExecutionInfo().getWarnings());
assertWarnAborts(2, 0, 0);
}
Aggregations