use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class PairOfSequencesPaxosSimulation method plan.
public ActionPlan plan() {
ActionPlan plan = new KeyspaceActions(simulated, KEYSPACE, TABLE, CREATE_TABLE, cluster, clusterOptions, serialConsistency, this, primaryKeys, debug).plan();
plan = plan.encapsulate(ActionPlan.setUpTearDown(ActionList.of(cluster.stream().map(i -> simulated.run("Insert Partitions", i, executeForPrimaryKeys(INSERT1, primaryKeys)))), ActionList.of(cluster.stream().map(i -> simulated.run("Delete Partitions", i, executeForPrimaryKeys(DELETE1, primaryKeys))))));
final int nodes = cluster.size();
for (int primaryKey : primaryKeys) historyCheckers.add(new HistoryChecker(primaryKey));
List<Supplier<Action>> primaryKeyActions = new ArrayList<>();
for (int pki = 0; pki < primaryKeys.length; ++pki) {
int primaryKey = primaryKeys[pki];
HistoryChecker historyChecker = historyCheckers.get(pki);
Supplier<Action> supplier = new Supplier<Action>() {
int i = 0;
@Override
public Action get() {
int node = simulated.random.uniform(1, nodes + 1);
IInvokableInstance instance = cluster.get(node);
switch(serialConsistency) {
default:
throw new AssertionError();
case LOCAL_SERIAL:
if (simulated.snitch.dcOf(node) > 0) {
// perform some queries against these nodes but don't expect them to be linearizable
return new NonVerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker);
}
case SERIAL:
return simulated.random.decide(readRatio) ? new VerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker) : new ModifyingOperation(i++, instance, ANY, serialConsistency, primaryKey, historyChecker);
}
}
@Override
public String toString() {
return Integer.toString(primaryKey);
}
};
final ActionListener listener = debug.debug(PARTITION, simulated.time, cluster, KEYSPACE, primaryKey);
if (listener != null) {
Supplier<Action> wrap = supplier;
supplier = new Supplier<Action>() {
@Override
public Action get() {
Action action = wrap.get();
action.register(listener);
return action;
}
@Override
public String toString() {
return wrap.toString();
}
};
}
primaryKeyActions.add(supplier);
}
List<Integer> available = IntStream.range(0, primaryKeys.length).boxed().collect(Collectors.toList());
Action stream = Actions.infiniteStream(concurrency, new Supplier<Action>() {
@Override
public Action get() {
int i = simulated.random.uniform(0, available.size());
int next = available.get(i);
available.set(i, available.get(available.size() - 1));
available.remove(available.size() - 1);
long untilNanos = simulated.time.nanoTime() + SECONDS.toNanos(simulateKeyForSeconds.select(simulated.random));
int concurrency = withinKeyConcurrency.select(simulated.random);
Supplier<Action> supplier = primaryKeyActions.get(next);
// while this stream is finite, it participates in an infinite stream via its parent, so we want to permit termination while it's running
return Actions.infiniteStream(concurrency, new Supplier<Action>() {
@Override
public Action get() {
if (simulated.time.nanoTime() >= untilNanos) {
available.add(next);
return null;
}
return supplier.get();
}
@Override
public String toString() {
return supplier.toString();
}
});
}
@Override
public String toString() {
return "Primary Key Actions";
}
});
return simulated.execution.plan().encapsulate(plan).encapsulate(ActionPlan.interleave(singletonList(ActionList.of(stream))));
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class ClientNetworkStopStartTest method stopStartNative.
/**
* @see <a href="https://issues.apache.org/jira/browse/CASSANDRA-16127">CASSANDRA-16127</a>
*/
@Test
public void stopStartNative() throws IOException {
// TODO why does trunk need GOSSIP for native to work but no other branch does?
try (Cluster cluster = init(Cluster.build(1).withConfig(c -> c.with(Feature.GOSSIP, Feature.NATIVE_PROTOCOL)).start())) {
IInvokableInstance node = cluster.get(1);
assertTransportStatus(node, "binary", true);
node.nodetoolResult("disablebinary").asserts().success();
assertTransportStatus(node, "binary", false);
node.nodetoolResult("enablebinary").asserts().success();
assertTransportStatus(node, "binary", true);
// now use it to make sure it still works!
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, value int, PRIMARY KEY (pk))");
try (com.datastax.driver.core.Cluster client = com.datastax.driver.core.Cluster.builder().addContactPoints(node.broadcastAddress().getAddress()).build();
Session session = client.connect()) {
session.execute("INSERT INTO " + KEYSPACE + ".tbl (pk, value) VALUES (?, ?)", 0, 0);
}
SimpleQueryResult qr = cluster.coordinator(1).executeWithResult("SELECT * FROM " + KEYSPACE + ".tbl", ConsistencyLevel.ALL);
AssertUtils.assertRows(qr, QueryResults.builder().row(0, 0).build());
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class DistributedRepairUtils method queryParentRepairHistory.
public static QueryResult queryParentRepairHistory(ICluster<IInvokableInstance> cluster, int coordinator, String ks, String table) {
// This is kinda brittle since the caller never gets the ID and can't ask for the ID; it needs to infer the id
// this logic makes the assumption the ks/table pairs are unique (should be or else create should fail) so any
// repair for that pair will be the repair id
Set<String> tableNames = table == null ? Collections.emptySet() : ImmutableSet.of(table);
QueryResult rs = retryWithBackoffBlocking(10, () -> cluster.coordinator(coordinator).executeWithResult("SELECT * FROM system_distributed.parent_repair_history", ConsistencyLevel.QUORUM).filter(row -> ks.equals(row.getString("keyspace_name"))).filter(row -> tableNames.equals(row.getSet("columnfamily_names"))));
return rs;
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class FailingRepairTest method testFailingMessage.
@Test(timeout = 10 * 60 * 1000)
public void testFailingMessage() throws IOException {
final int replica = 1;
final int coordinator = 2;
String tableName = getCfName(messageType, parallelism, withTracing);
String fqtn = KEYSPACE + "." + tableName;
CLUSTER.schemaChange("CREATE TABLE " + fqtn + " (k INT, PRIMARY KEY (k))");
// create data which will NOT conflict
int lhsOffset = 10;
int rhsOffset = 20;
int limit = rhsOffset + (rhsOffset - lhsOffset);
// setup data which is consistent on both sides
for (int i = 0; i < lhsOffset; i++) CLUSTER.coordinator(replica).execute("INSERT INTO " + fqtn + " (k) VALUES (?)", ConsistencyLevel.ALL, i);
// create data on LHS which does NOT exist in RHS
for (int i = lhsOffset; i < rhsOffset; i++) CLUSTER.get(replica).executeInternal("INSERT INTO " + fqtn + " (k) VALUES (?)", i);
// create data on RHS which does NOT exist in LHS
for (int i = rhsOffset; i < limit; i++) CLUSTER.get(coordinator).executeInternal("INSERT INTO " + fqtn + " (k) VALUES (?)", i);
// at this point, the two nodes should be out of sync, so confirm missing data
// node 1
Object[][] node1Records = toRows(IntStream.range(0, rhsOffset));
Object[][] node1Actuals = toNaturalOrder(CLUSTER.get(replica).executeInternal("SELECT k FROM " + fqtn));
Assert.assertArrayEquals(node1Records, node1Actuals);
// node 2
Object[][] node2Records = toRows(IntStream.concat(IntStream.range(0, lhsOffset), IntStream.range(rhsOffset, limit)));
Object[][] node2Actuals = toNaturalOrder(CLUSTER.get(coordinator).executeInternal("SELECT k FROM " + fqtn));
Assert.assertArrayEquals(node2Records, node2Actuals);
// Inject the failure
CLUSTER.get(replica).runOnInstance(() -> setup.run());
// run a repair which is expected to fail
List<String> repairStatus = CLUSTER.get(coordinator).callOnInstance(() -> {
// need all ranges on the host
String ranges = StorageService.instance.getLocalAndPendingRanges(KEYSPACE).stream().map(r -> r.left + ":" + r.right).collect(Collectors.joining(","));
Map<String, String> args = new HashMap<String, String>() {
{
put(RepairOption.PARALLELISM_KEY, parallelism.getName());
put(RepairOption.PRIMARY_RANGE_KEY, "false");
put(RepairOption.INCREMENTAL_KEY, "false");
put(RepairOption.TRACE_KEY, Boolean.toString(withTracing));
put(RepairOption.PULL_REPAIR_KEY, "false");
put(RepairOption.FORCE_REPAIR_KEY, "false");
put(RepairOption.RANGES_KEY, ranges);
put(RepairOption.COLUMNFAMILIES_KEY, tableName);
}
};
int cmd = StorageService.instance.repairAsync(KEYSPACE, args);
Assert.assertFalse("repair return status was 0, expected non-zero return status, 0 indicates repair not submitted", cmd == 0);
List<String> status;
do {
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
status = StorageService.instance.getParentRepairStatus(cmd);
} while (status == null || status.get(0).equals(ParentRepairStatus.IN_PROGRESS.name()));
return status;
});
Assert.assertEquals(repairStatus.toString(), ParentRepairStatus.FAILED, ParentRepairStatus.valueOf(repairStatus.get(0)));
// its possible that the coordinator gets the message that the replica failed before the replica completes
// shutting down; this then means that isKilled could be updated after the fact
IInvokableInstance replicaInstance = CLUSTER.get(replica);
Awaitility.await().atMost(Duration.ofSeconds(30)).until(replicaInstance::isShutdown);
Assert.assertEquals("coordinator should not be killed", 0, CLUSTER.get(coordinator).killAttempts());
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class MessageFiltersTest method testMessageMatching.
@Test
public void testMessageMatching() throws Throwable {
String read = "SELECT * FROM " + KEYSPACE + ".tbl";
String write = "INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1)";
try (ICluster<IInvokableInstance> cluster = builder().withNodes(2).withConfig(c -> c.set("range_request_timeout", "2000ms")).start()) {
cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + cluster.size() + "};");
cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
AtomicInteger counter = new AtomicInteger();
Set<Integer> verbs = Sets.newHashSet(Arrays.asList(Verb.RANGE_REQ.id, Verb.RANGE_RSP.id, Verb.MUTATION_REQ.id, Verb.MUTATION_RSP.id));
for (boolean inbound : Arrays.asList(true, false)) {
counter.set(0);
// Reads and writes are going to time out in both directions
IMessageFilters.Filter filter = cluster.filters().allVerbs().inbound(inbound).from(1).to(2).messagesMatching((from, to, msg) -> {
// Decode and verify message on instance; return the result back here
Integer id = cluster.get(1).callsOnInstance((IIsolatedExecutor.SerializableCallable<Integer>) () -> {
Message decoded = Instance.deserializeMessage(msg);
return (Integer) decoded.verb().id;
}).call();
Assert.assertTrue(verbs.contains(id));
counter.incrementAndGet();
return false;
}).drop();
for (int i : new int[] { 1, 2 }) cluster.coordinator(i).execute(read, ConsistencyLevel.ALL);
for (int i : new int[] { 1, 2 }) cluster.coordinator(i).execute(write, ConsistencyLevel.ALL);
filter.off();
Assert.assertEquals(4, counter.get());
}
}
}
Aggregations