use of org.apache.cassandra.simulator.ActionPlan in project cassandra by apache.
the class KeyspaceActions method plan.
public ActionPlan plan() {
ActionList pre = ActionList.of(pre(createKeyspaceCql(keyspace), createTableCql));
ActionList interleave = stream();
ActionList post = ActionList.empty();
return new ActionPlan(pre, singletonList(interleave), post);
}
use of org.apache.cassandra.simulator.ActionPlan in project cassandra by apache.
the class PairOfSequencesPaxosSimulation method plan.
public ActionPlan plan() {
ActionPlan plan = new KeyspaceActions(simulated, KEYSPACE, TABLE, CREATE_TABLE, cluster, clusterOptions, serialConsistency, this, primaryKeys, debug).plan();
plan = plan.encapsulate(ActionPlan.setUpTearDown(ActionList.of(cluster.stream().map(i -> simulated.run("Insert Partitions", i, executeForPrimaryKeys(INSERT1, primaryKeys)))), ActionList.of(cluster.stream().map(i -> simulated.run("Delete Partitions", i, executeForPrimaryKeys(DELETE1, primaryKeys))))));
final int nodes = cluster.size();
for (int primaryKey : primaryKeys) historyCheckers.add(new HistoryChecker(primaryKey));
List<Supplier<Action>> primaryKeyActions = new ArrayList<>();
for (int pki = 0; pki < primaryKeys.length; ++pki) {
int primaryKey = primaryKeys[pki];
HistoryChecker historyChecker = historyCheckers.get(pki);
Supplier<Action> supplier = new Supplier<Action>() {
int i = 0;
@Override
public Action get() {
int node = simulated.random.uniform(1, nodes + 1);
IInvokableInstance instance = cluster.get(node);
switch(serialConsistency) {
default:
throw new AssertionError();
case LOCAL_SERIAL:
if (simulated.snitch.dcOf(node) > 0) {
// perform some queries against these nodes but don't expect them to be linearizable
return new NonVerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker);
}
case SERIAL:
return simulated.random.decide(readRatio) ? new VerifyingOperation(i++, instance, serialConsistency, primaryKey, historyChecker) : new ModifyingOperation(i++, instance, ANY, serialConsistency, primaryKey, historyChecker);
}
}
@Override
public String toString() {
return Integer.toString(primaryKey);
}
};
final ActionListener listener = debug.debug(PARTITION, simulated.time, cluster, KEYSPACE, primaryKey);
if (listener != null) {
Supplier<Action> wrap = supplier;
supplier = new Supplier<Action>() {
@Override
public Action get() {
Action action = wrap.get();
action.register(listener);
return action;
}
@Override
public String toString() {
return wrap.toString();
}
};
}
primaryKeyActions.add(supplier);
}
List<Integer> available = IntStream.range(0, primaryKeys.length).boxed().collect(Collectors.toList());
Action stream = Actions.infiniteStream(concurrency, new Supplier<Action>() {
@Override
public Action get() {
int i = simulated.random.uniform(0, available.size());
int next = available.get(i);
available.set(i, available.get(available.size() - 1));
available.remove(available.size() - 1);
long untilNanos = simulated.time.nanoTime() + SECONDS.toNanos(simulateKeyForSeconds.select(simulated.random));
int concurrency = withinKeyConcurrency.select(simulated.random);
Supplier<Action> supplier = primaryKeyActions.get(next);
// while this stream is finite, it participates in an infinite stream via its parent, so we want to permit termination while it's running
return Actions.infiniteStream(concurrency, new Supplier<Action>() {
@Override
public Action get() {
if (simulated.time.nanoTime() >= untilNanos) {
available.add(next);
return null;
}
return supplier.get();
}
@Override
public String toString() {
return supplier.toString();
}
});
}
@Override
public String toString() {
return "Primary Key Actions";
}
});
return simulated.execution.plan().encapsulate(plan).encapsulate(ActionPlan.interleave(singletonList(ActionList.of(stream))));
}
Aggregations