use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class RepairCoordinatorFast method simple.
@Test
public void simple() {
String table = tableName("simple");
assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
CLUSTER.schemaChange(format("CREATE TABLE %s.%s (key text, PRIMARY KEY (key))", KEYSPACE, table));
CLUSTER.coordinator(1).execute(format("INSERT INTO %s.%s (key) VALUES (?)", KEYSPACE, table), ConsistencyLevel.ANY, "some text");
long repairExceptions = getRepairExceptions(CLUSTER, 2);
NodeToolResult result = repair(2, KEYSPACE, table);
result.asserts().success();
if (withNotifications) {
result.asserts().notificationContains(ProgressEventType.START, "Starting repair command").notificationContains(ProgressEventType.START, "repairing keyspace " + KEYSPACE + " with repair options").notificationContains(ProgressEventType.SUCCESS, repairType != RepairType.PREVIEW ? "Repair completed successfully" : "Repair preview completed successfully").notificationContains(ProgressEventType.COMPLETE, "finished");
}
if (repairType != RepairType.PREVIEW) {
assertParentRepairSuccess(CLUSTER, KEYSPACE, table);
} else {
assertParentRepairNotExist(CLUSTER, KEYSPACE, table);
}
Assert.assertEquals(repairExceptions, getRepairExceptions(CLUSTER, 2));
});
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class OptimiseStreamsRepairTest method testBasic.
@Test
public void testBasic() throws Exception {
try (Cluster cluster = init(Cluster.build(3).withInstanceInitializer(BBHelper::install).withConfig(config -> config.set("hinted_handoff_enabled", false).with(GOSSIP).with(NETWORK)).start())) {
cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, t int) with compaction={'class': 'SizeTieredCompactionStrategy'}");
for (int i = 0; i < 10000; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, t) values (?,?)", ConsistencyLevel.ALL, i, i);
cluster.forEach((i) -> i.flush(KEYSPACE));
cluster.get(2).shutdown().get();
for (int i = 0; i < 2000; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, t) values (?,?)", ConsistencyLevel.QUORUM, i, i * 2 + 2);
cluster.get(2).startup();
Thread.sleep(1000);
cluster.forEach(c -> c.flush(KEYSPACE));
cluster.forEach(c -> c.forceCompact(KEYSPACE, "tbl"));
long[] marks = PreviewRepairTest.logMark(cluster);
NodeToolResult res = cluster.get(1).nodetoolResult("repair", KEYSPACE, "-os");
res.asserts().success();
PreviewRepairTest.waitLogsRepairFullyFinished(cluster, marks);
res = cluster.get(1).nodetoolResult("repair", KEYSPACE, "-vd");
res.asserts().success();
res.asserts().notificationContains("Repaired data is in sync");
res = cluster.get(1).nodetoolResult("repair", KEYSPACE, "--preview", "--full");
res.asserts().success();
res.asserts().notificationContains("Previewed data was in sync");
}
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class RepairCoordinatorFast method missingTable.
@Test
public void missingTable() {
assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
long repairExceptions = getRepairExceptions(CLUSTER, 2);
String tableName = tableName("doesnotexist");
NodeToolResult result = repair(2, KEYSPACE, tableName);
result.asserts().failure();
if (withNotifications) {
result.asserts().errorContains("Unknown keyspace/cf pair (distributed_test_keyspace." + tableName + ")").notificationContains(ProgressEventType.ERROR, "failed with error Unknown keyspace/cf pair (distributed_test_keyspace." + tableName + ")").notificationContains(ProgressEventType.COMPLETE, "finished with error");
}
assertParentRepairNotExist(CLUSTER, KEYSPACE, "doesnotexist");
Assert.assertEquals(repairExceptions + 1, getRepairExceptions(CLUSTER, 2));
});
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class RepairCoordinatorTimeout method prepareRPCTimeout.
@Test
public void prepareRPCTimeout() {
String table = tableName("preparerpctimeout");
assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
CLUSTER.schemaChange(format("CREATE TABLE %s.%s (key text, value text, PRIMARY KEY (key))", KEYSPACE, table));
CLUSTER.verbs(Verb.PREPARE_MSG).drop();
long repairExceptions = getRepairExceptions(CLUSTER, 1);
NodeToolResult result = repair(1, KEYSPACE, table);
result.asserts().failure().errorContains("Did not get replies from all endpoints.");
if (withNotifications) {
result.asserts().notificationContains(NodeToolResult.ProgressEventType.START, "Starting repair command").notificationContains(NodeToolResult.ProgressEventType.START, "repairing keyspace " + KEYSPACE + " with repair options").notificationContains(NodeToolResult.ProgressEventType.ERROR, "Did not get replies from all endpoints.").notificationContains(NodeToolResult.ProgressEventType.COMPLETE, "finished with error");
}
if (repairType != RepairType.PREVIEW) {
assertParentRepairFailedWithMessageContains(CLUSTER, KEYSPACE, table, "Did not get replies from all endpoints.");
} else {
assertParentRepairNotExist(CLUSTER, KEYSPACE, table);
}
Assert.assertEquals(repairExceptions + 1, getRepairExceptions(CLUSTER, 1));
});
}
use of org.apache.cassandra.distributed.api.NodeToolResult in project cassandra by apache.
the class RepairCoordinatorFast method snapshotFailure.
@Test
public void snapshotFailure() {
Assume.assumeFalse("incremental does not do snapshot", repairType == RepairType.INCREMENTAL);
Assume.assumeFalse("Parallel repair does not perform snapshots", parallelism == RepairParallelism.PARALLEL);
String table = tableName("snapshotfailure");
assertTimeoutPreemptively(Duration.ofMinutes(1), () -> {
CLUSTER.schemaChange(format("CREATE TABLE %s.%s (key text, value text, PRIMARY KEY (key))", KEYSPACE, table));
IMessageFilters.Filter filter = CLUSTER.verbs(Verb.SNAPSHOT_MSG).messagesMatching(of(m -> {
throw new RuntimeException("snapshot fail");
})).drop();
try {
long repairExceptions = getRepairExceptions(CLUSTER, 1);
NodeToolResult result = repair(1, KEYSPACE, table);
result.asserts().failure();
// Right now coordination doesn't propgate the first exception, so we only know "there exists a issue".
// With notifications on nodetool will see the error then complete, so the cmd state (what nodetool
// polls on) is ignored. With notifications off or dropped, the poll await fails and queries cmd
// state, and that will have the below error.
// NOTE: this isn't desireable, would be good to propgate
// TODO replace with errorContainsAny once dtest api updated
Throwable error = result.getError();
Assert.assertNotNull("Error was null", error);
if (!(error.getMessage().contains("Could not create snapshot") || error.getMessage().contains("Some repair failed")))
throw new AssertionError("Unexpected error, expected to contain 'Could not create snapshot' or 'Some repair failed'", error);
if (withNotifications) {
result.asserts().notificationContains(ProgressEventType.START, "Starting repair command").notificationContains(ProgressEventType.START, "repairing keyspace " + KEYSPACE + " with repair options").notificationContains(ProgressEventType.ERROR, "Could not create snapshot ").notificationContains(ProgressEventType.COMPLETE, "finished with error");
}
Assert.assertEquals(repairExceptions + 1, getRepairExceptions(CLUSTER, 1));
if (repairType != RepairType.PREVIEW) {
assertParentRepairFailedWithMessageContains(CLUSTER, KEYSPACE, table, "Could not create snapshot");
} else {
assertParentRepairNotExist(CLUSTER, KEYSPACE, table);
}
} finally {
filter.off();
}
});
}
Aggregations