use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class SnapshotsTest method testSameTimestampOnEachTableOfSnaphot.
@Test
public void testSameTimestampOnEachTableOfSnaphot() {
cluster.get(1).nodetoolResult("snapshot", "-t", "sametimestamp").asserts().success();
NodeToolResult result = cluster.get(1).nodetoolResult("listsnapshots");
long distinctTimestamps = Arrays.stream(result.getStdout().split("\n")).filter(line -> line.startsWith("sametimestamp")).map(line -> line.replaceAll(" +", " ").split(" ")[7]).distinct().count();
// assert all dates are same so there is just one value accross all individual tables
Assert.assertEquals(1, distinctTimestamps);
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class PreviewRepairTest method testStartNonIntersectingPreviewRepair.
/**
* Makes sure we can start a non-intersecting preview repair while there are other pending sstables on disk
*/
@Test
public void testStartNonIntersectingPreviewRepair() throws IOException, InterruptedException, ExecutionException {
ExecutorService es = Executors.newSingleThreadExecutor();
try (Cluster cluster = init(Cluster.build(2).withConfig(config -> config.with(GOSSIP).with(NETWORK)).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int)");
insert(cluster.coordinator(1), 0, 100);
cluster.forEach((node) -> node.flush(KEYSPACE));
cluster.get(1).nodetoolResult("repair", KEYSPACE, "tbl").asserts().success();
insert(cluster.coordinator(1), 100, 100);
cluster.forEach((node) -> node.flush(KEYSPACE));
// pause inc repair validation messages on node2 until node1 has finished
Condition incRepairStarted = newOneTimeCondition();
Condition continueIncRepair = newOneTimeCondition();
DelayFirstRepairTypeMessageFilter filter = DelayFirstRepairTypeMessageFilter.validationRequest(incRepairStarted, continueIncRepair);
cluster.filters().outbound().verbs(Verb.VALIDATION_REQ.id).from(1).to(2).messagesMatching(filter).drop();
// get local ranges to repair two separate ranges:
List<String> localRanges = cluster.get(1).callOnInstance(() -> {
List<String> res = new ArrayList<>();
for (Range<Token> r : StorageService.instance.getLocalReplicas(KEYSPACE).ranges()) res.add(r.left.getTokenValue() + ":" + r.right.getTokenValue());
return res;
});
assertEquals(2, localRanges.size());
String[] previewedRange = localRanges.get(0).split(":");
String[] repairedRange = localRanges.get(1).split(":");
Future<NodeToolResult> repairStatusFuture = es.submit(() -> cluster.get(1).nodetoolResult("repair", "-st", repairedRange[0], "-et", repairedRange[1], KEYSPACE, "tbl"));
// wait for node1 to start validation compaction
incRepairStarted.await();
// now we have pending sstables in range "repairedRange", make sure we can preview "previewedRange"
cluster.get(1).nodetoolResult("repair", "-vd", "-st", previewedRange[0], "-et", previewedRange[1], KEYSPACE, "tbl").asserts().success().notificationContains("Repaired data is in sync");
continueIncRepair.signalAll();
repairStatusFuture.get().asserts().success();
} finally {
es.shutdown();
}
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class RepairCoordinatorFailingMessageTest method prepareIrFailure.
@Test(timeout = 1 * 60 * 1000)
public void prepareIrFailure() {
Assume.assumeTrue("The Verb.PREPARE_CONSISTENT_REQ is only for incremental, so disable in non-incremental", repairType == RepairType.INCREMENTAL);
// Wait, isn't this copy paste of RepairCoordinatorTest::prepareFailure? NO!
// Incremental repair sends the PREPARE message the same way full does, but then after it does it sends
// a consistent prepare message... and that one doesn't handle errors...
CLUSTER.schemaChange("CREATE TABLE " + KEYSPACE + ".prepareirfailure (key text, value text, PRIMARY KEY (key))");
IMessageFilters.Filter filter = CLUSTER.verbs(Verb.PREPARE_CONSISTENT_REQ).messagesMatching(of(m -> {
throw new RuntimeException("prepare fail");
})).drop();
try {
NodeToolResult result = repair(1, KEYSPACE, "prepareirfailure");
result.asserts().failure().errorContains("error prepare fail").notificationContains(NodeToolResult.ProgressEventType.ERROR, "error prepare fail").notificationContains(NodeToolResult.ProgressEventType.COMPLETE, "finished with error");
} finally {
filter.off();
}
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class RepairCoordinatorFast method intersectingRange.
@Test
public void intersectingRange() {
// this test exists to show that this case will cause repair to finish; success or failure isn't imporant
// if repair is enhanced to allow intersecting ranges w/ local then this test will fail saying that we expected
// repair to fail but it didn't, this would be fine and this test should be updated to reflect the new
// semantic
String table = tableName("intersectingrange");
assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
CLUSTER.schemaChange(format("CREATE TABLE %s.%s (key text, value text, PRIMARY KEY (key))", KEYSPACE, table));
// TODO dtest api for this?
LongTokenRange tokenRange = CLUSTER.get(2).callOnInstance(() -> {
Set<Range<Token>> ranges = StorageService.instance.getLocalReplicas(KEYSPACE).ranges();
Range<Token> range = Iterables.getFirst(ranges, null);
long left = (long) range.left.getTokenValue();
long right = (long) range.right.getTokenValue();
return new LongTokenRange(left, right);
});
LongTokenRange intersectingRange = new LongTokenRange(tokenRange.maxInclusive - 7, tokenRange.maxInclusive + 7);
long repairExceptions = getRepairExceptions(CLUSTER, 2);
NodeToolResult result = repair(2, KEYSPACE, table, "--start-token", Long.toString(intersectingRange.minExclusive), "--end-token", Long.toString(intersectingRange.maxInclusive));
result.asserts().failure().errorContains("Requested range " + intersectingRange + " intersects a local range (" + tokenRange + ") but is not fully contained in one");
if (withNotifications) {
result.asserts().notificationContains(ProgressEventType.START, "Starting repair command").notificationContains(ProgressEventType.START, "repairing keyspace " + KEYSPACE + " with repair options").notificationContains(ProgressEventType.ERROR, "Requested range " + intersectingRange + " intersects a local range (" + tokenRange + ") but is not fully contained in one").notificationContains(ProgressEventType.COMPLETE, "finished with error");
}
assertParentRepairNotExist(CLUSTER, KEYSPACE, table);
Assert.assertEquals(repairExceptions + 1, getRepairExceptions(CLUSTER, 2));
});
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class RepairCoordinatorFast method replicationFactorOne.
@Test
public void replicationFactorOne() {
// In the case of rf=1 repair fails to create a cmd handle so node tool exists early
String table = tableName("one");
assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
// since cluster is shared and this test gets called multiple times, need "IF NOT EXISTS" so the second+ attempt
// does not fail
CLUSTER.schemaChange("CREATE KEYSPACE IF NOT EXISTS replicationfactor WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};");
CLUSTER.schemaChange(format("CREATE TABLE replicationfactor.%s (key text, value text, PRIMARY KEY (key))", table));
long repairExceptions = getRepairExceptions(CLUSTER, 1);
NodeToolResult result = repair(1, "replicationfactor", table);
result.asserts().success();
assertParentRepairNotExist(CLUSTER, KEYSPACE, table);
Assert.assertEquals(repairExceptions, getRepairExceptions(CLUSTER, 1));
});
}
Aggregations