use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class BootstrapBinaryDisabledTest method assertBootstrapState.
private static void assertBootstrapState(IInvokableInstance node, String expected) {
SimpleQueryResult qr = node.executeInternalWithResult("SELECT bootstrapped FROM system.local WHERE key='local'");
Assert.assertTrue("No rows found", qr.hasNext());
Assert.assertEquals(expected, qr.next().getString("bootstrapped"));
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class ForceRepairTest method force.
private void force(boolean includeDifference) throws IOException {
long nowInMicro = System.currentTimeMillis() * 1000;
try (Cluster cluster = Cluster.build(3).withConfig(c -> c.set("hinted_handoff_enabled", false).with(Feature.values())).start()) {
init(cluster);
cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (k INT PRIMARY KEY, v INT)"));
for (int i = 0; i < 10; i++) cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.tbl (k,v) VALUES (?, ?) USING TIMESTAMP ?"), ConsistencyLevel.ALL, i, i, nowInMicro++);
ClusterUtils.stopUnchecked(cluster.get(2));
// repair should fail because node2 is down
IInvokableInstance node1 = cluster.get(1);
for (String[] args : Arrays.asList(new String[] { "--full" }, new String[] { "--full", "--preview" }, // nothing should be in the repaired set, so shouldn't stream
new String[] { "--full", "--validate" }, // IR Preview
new String[] { "--preview" }, // nothing should be in the repaired set, so shouldn't stream
new String[] { "--validate" }, // IR
new String[0])) {
if (includeDifference)
// each loop should have a different timestamp, causing a new difference
node1.executeInternal(withKeyspace("INSERT INTO %s.tbl (k,v) VALUES (?, ?) USING TIMESTAMP ?"), -1, -1, nowInMicro++);
try {
node1.nodetoolResult(ArrayUtils.addAll(new String[] { "repair", KEYSPACE }, args)).asserts().failure();
node1.nodetoolResult(ArrayUtils.addAll(new String[] { "repair", KEYSPACE, "--force" }, args)).asserts().success();
assertNoRepairedAt(cluster);
} catch (Exception | Error e) {
// tag the error to include which args broke
e.addSuppressed(new AssertionError("Failure for args: " + Arrays.toString(args)));
throw e;
}
}
if (includeDifference) {
SimpleQueryResult expected = QueryResults.builder().row(-1, -1).build();
for (IInvokableInstance node : Arrays.asList(node1, cluster.get(3))) {
SimpleQueryResult results = node.executeInternalWithResult(withKeyspace("SELECT * FROM %s.tbl WHERE k=?"), -1);
expected.reset();
AssertUtils.assertRows(results, expected);
}
}
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class HostReplacementTest method replaceDownedHost.
/**
* Attempt to do a host replacement on a down host
*/
@Test
public void replaceDownedHost() throws IOException {
// start with 2 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK)).withTokenSupplier(node -> even.token(node == 3 ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
setupCluster(cluster);
// collect rows to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
stopUnchecked(nodeToRemove);
// now create a new node to replace the other node
IInvokableInstance replacingNode = replaceHostAndStart(cluster, nodeToRemove, props -> {
// since we have a downed host there might be a schema version which is old show up but
// can't be fetched since the host is down...
props.set(BOOTSTRAP_SKIP_SCHEMA_CHECK, true);
});
// wait till the replacing node is in the ring
awaitRingJoin(seed, replacingNode);
awaitRingJoin(replacingNode, seed);
// make sure all nodes are healthy
awaitRingHealthy(seed);
assertRingIs(seed, seed, replacingNode);
logger.info("Current ring is {}", assertRingIs(replacingNode, seed, replacingNode));
validateRows(seed.coordinator(), expectedState);
validateRows(replacingNode.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class HostReplacementTest method replaceAliveHost.
/**
* Attempt to do a host replacement on a alive host
*/
@Test
public void replaceAliveHost() throws IOException {
// start with 2 nodes, stop both nodes, start the seed, host replace the down node)
TokenSupplier even = TokenSupplier.evenlyDistributedTokens(2);
try (Cluster cluster = Cluster.build(2).withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK).set(Constants.KEY_DTEST_API_STARTUP_FAILURE_AS_SHUTDOWN, false)).withTokenSupplier(node -> even.token(node == 3 ? 2 : node)).start()) {
IInvokableInstance seed = cluster.get(1);
IInvokableInstance nodeToRemove = cluster.get(2);
setupCluster(cluster);
// collect rows to detect issues later on if the state doesn't match
SimpleQueryResult expectedState = nodeToRemove.coordinator().executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
// now create a new node to replace the other node
Assertions.assertThatThrownBy(() -> replaceHostAndStart(cluster, nodeToRemove)).as("Startup of instance should have failed as you can not replace a alive node").hasMessageContaining("Cannot replace a live node").isInstanceOf(UnsupportedOperationException.class);
// make sure all nodes are healthy
awaitRingHealthy(seed);
assertRingIs(seed, seed, nodeToRemove);
logger.info("Current ring is {}", assertRingIs(nodeToRemove, seed, nodeToRemove));
validateRows(seed.coordinator(), expectedState);
validateRows(nodeToRemove.coordinator(), expectedState);
}
}
use of org.apache.cassandra.distributed.api.SimpleQueryResult in project cassandra by apache.
the class ReplicaFilteringProtectionTest method testMissedUpdates.
private void testMissedUpdates(String tableName, int warnThreshold, int failThreshold, boolean shouldWarn) {
cluster.get(1).runOnInstance(() -> StorageService.instance.setCachedReplicaRowsWarnThreshold(warnThreshold));
cluster.get(1).runOnInstance(() -> StorageService.instance.setCachedReplicaRowsFailThreshold(failThreshold));
String fullTableName = KEYSPACE + '.' + tableName;
// Case 1: Insert and query rows at ALL to verify base line.
for (int i = 0; i < ROWS; i++) {
cluster.coordinator(1).execute("INSERT INTO " + fullTableName + "(k, v) VALUES (?, 'old')", ALL, i);
}
long histogramSampleCount = rowsCachedPerQueryCount(cluster.get(1), tableName);
String query = "SELECT * FROM " + fullTableName + " WHERE v = ? LIMIT ? ALLOW FILTERING";
Object[][] initialRows = cluster.coordinator(1).execute(query, ALL, "old", ROWS);
assertRows(initialRows, row(1, "old"), row(0, "old"), row(2, "old"));
// Make sure only one sample was recorded for the query.
assertEquals(histogramSampleCount + 1, rowsCachedPerQueryCount(cluster.get(1), tableName));
// Case 2: Update all rows on only one replica, leaving the entire dataset of the remaining replica out-of-date.
updateAllRowsOn(1, fullTableName, "new");
// The replica that missed the results creates a mismatch at every row, and we therefore cache a version
// of that row for all replicas.
SimpleQueryResult oldResult = cluster.coordinator(1).executeWithResult(query, ALL, "old", ROWS);
assertRows(oldResult.toObjectArrays());
verifyWarningState(shouldWarn, oldResult);
// We should have made 3 row "completion" requests.
assertEquals(ROWS, protectionQueryCount(cluster.get(1), tableName));
// In all cases above, the queries should be caching 1 row per partition per replica, but
// 6 for the whole query, given every row is potentially stale.
assertEquals(ROWS * REPLICAS, maxRowsCachedPerQuery(cluster.get(1), tableName));
// Make sure only one more sample was recorded for the query.
assertEquals(histogramSampleCount + 2, rowsCachedPerQueryCount(cluster.get(1), tableName));
// Case 3: Observe the effects of blocking read-repair.
// The previous query peforms a blocking read-repair, which removes replica divergence. This
// will only warn, therefore, if the warning threshold is actually below the number of replicas.
// (i.e. The row cache counter is decremented/reset as each partition is consumed.)
SimpleQueryResult newResult = cluster.coordinator(1).executeWithResult(query, ALL, "new", ROWS);
Object[][] newRows = newResult.toObjectArrays();
assertRows(newRows, row(1, "new"), row(0, "new"), row(2, "new"));
verifyWarningState(warnThreshold < REPLICAS, newResult);
// We still sould only have made 3 row "completion" requests, with no replica divergence in the last query.
assertEquals(ROWS, protectionQueryCount(cluster.get(1), tableName));
// With no replica divergence, we only cache a single partition at a time across 2 replicas.
assertEquals(REPLICAS, minRowsCachedPerQuery(cluster.get(1), tableName));
// Make sure only one more sample was recorded for the query.
assertEquals(histogramSampleCount + 3, rowsCachedPerQueryCount(cluster.get(1), tableName));
// Case 4: Introduce another mismatch by updating all rows on only one replica.
updateAllRowsOn(1, fullTableName, "future");
// Another mismatch is introduced, and we once again cache a version of each row during resolution.
SimpleQueryResult futureResult = cluster.coordinator(1).executeWithResult(query, ALL, "future", ROWS);
Object[][] futureRows = futureResult.toObjectArrays();
assertRows(futureRows, row(1, "future"), row(0, "future"), row(2, "future"));
verifyWarningState(shouldWarn, futureResult);
// We sould have made 3 more row "completion" requests.
assertEquals(ROWS * 2, protectionQueryCount(cluster.get(1), tableName));
// In all cases above, the queries should be caching 1 row per partition, but 6 for the
// whole query, given every row is potentially stale.
assertEquals(ROWS * REPLICAS, maxRowsCachedPerQuery(cluster.get(1), tableName));
// Make sure only one more sample was recorded for the query.
assertEquals(histogramSampleCount + 4, rowsCachedPerQueryCount(cluster.get(1), tableName));
}
Aggregations