use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class ReprepareTestOldBehaviour method testReprepareUsingOldBehavior.
@Test
public void testReprepareUsingOldBehavior() throws Throwable {
// fork of testReprepareMixedVersionWithoutReset, but makes sure oldBehavior has a clean state
try (ICluster<IInvokableInstance> c = init(builder().withNodes(2).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(PrepareBehaviour::oldBehaviour).start())) {
ForceHostLoadBalancingPolicy lbp = new ForceHostLoadBalancingPolicy();
c.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck));"));
try (com.datastax.driver.core.Cluster cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").addContactPoint("127.0.0.2").withLoadBalancingPolicy(lbp).build();
Session session = cluster.connect()) {
session.execute(withKeyspace("USE %s"));
lbp.setPrimary(2);
final PreparedStatement select = session.prepare(withKeyspace("SELECT * FROM %s.tbl"));
session.execute(select.bind());
lbp.setPrimary(1);
session.execute(select.bind());
}
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class ReprepareTestBase method testReprepareTwoKeyspaces.
public void testReprepareTwoKeyspaces(BiConsumer<ClassLoader, Integer> instanceInitializer) throws Throwable {
try (ICluster<IInvokableInstance> c = init(builder().withNodes(2).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(instanceInitializer).start())) {
c.schemaChange(withKeyspace("CREATE KEYSPACE %s2 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};"));
c.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck));"));
ForceHostLoadBalancingPolicy lbp = new ForceHostLoadBalancingPolicy();
for (int firstContact : new int[] { 1, 2 }) try (com.datastax.driver.core.Cluster cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").addContactPoint("127.0.0.2").withLoadBalancingPolicy(lbp).build();
Session session = cluster.connect()) {
{
session.execute(withKeyspace("USE %s"));
c.stream().forEach((i) -> i.runOnInstance(QueryProcessor::clearPreparedStatementsCache));
lbp.setPrimary(firstContact);
final PreparedStatement select = session.prepare(withKeyspace("SELECT * FROM %s.tbl"));
session.execute(select.bind());
c.stream().forEach((i) -> i.runOnInstance(QueryProcessor::clearPreparedStatementsCache));
lbp.setPrimary(firstContact == 1 ? 2 : 1);
session.execute(withKeyspace("USE %s2"));
try {
session.execute(select.bind());
} catch (DriverInternalError e) {
Assert.assertTrue(e.getCause().getMessage().contains("can't execute it on"));
continue;
}
fail("Should have thrown");
}
}
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class ReprepareTestBase method testReprepare.
public void testReprepare(BiConsumer<ClassLoader, Integer> instanceInitializer, ReprepareTestConfiguration... configs) throws Throwable {
try (ICluster<IInvokableInstance> c = init(builder().withNodes(2).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).withInstanceInitializer(instanceInitializer).start())) {
ForceHostLoadBalancingPolicy lbp = new ForceHostLoadBalancingPolicy();
c.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck));"));
for (ReprepareTestConfiguration config : configs) {
// 1 has old behaviour
for (int firstContact : new int[] { 1, 2 }) {
try (com.datastax.driver.core.Cluster cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").addContactPoint("127.0.0.2").withLoadBalancingPolicy(lbp).build();
Session session = cluster.connect()) {
lbp.setPrimary(firstContact);
final PreparedStatement select = session.prepare(withKeyspace("SELECT * FROM %s.tbl"));
session.execute(select.bind());
c.stream().forEach((i) -> i.runOnInstance(QueryProcessor::clearPreparedStatementsCache));
lbp.setPrimary(firstContact == 1 ? 2 : 1);
if (config.withUse)
session.execute(withKeyspace("USE %s"));
// Re-preparing on the node
if (!config.skipBrokenBehaviours && firstContact == 1)
session.execute(select.bind());
c.stream().forEach((i) -> i.runOnInstance(QueryProcessor::clearPreparedStatementsCache));
lbp.setPrimary(firstContact);
// Re-preparing on the node with old behaviour will break no matter where the statement was initially prepared
if (!config.skipBrokenBehaviours)
session.execute(select.bind());
c.stream().forEach((i) -> i.runOnInstance(QueryProcessor::clearPreparedStatementsCache));
}
}
}
}
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class SnapshotsTest method testSnapshotCleanupAfterRestart.
@Test
public void testSnapshotCleanupAfterRestart() throws Exception {
// longer TTL to allow snapshot to survive node restart
int TWENTY_SECONDS = 20;
IInvokableInstance instance = cluster.get(1);
// Create snapshot and check exists
instance.nodetoolResult("snapshot", "--ttl", String.format("%ds", TWENTY_SECONDS), "-t", "basic").asserts().success();
instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains("basic");
// Restart node
stopUnchecked(instance);
instance.startup();
// Check snapshot still exists after restart
instance.nodetoolResult("listsnapshots").asserts().success().stdoutContains("basic");
// Sleep for 2*TTL and then check snapshot is gone
Thread.sleep(TWENTY_SECONDS * 1000L);
cluster.get(1).nodetoolResult("listsnapshots").asserts().success().stdoutNotContains("basic");
}
use of org.apache.cassandra.distributed.api.IInvokableInstance in project cassandra by apache.
the class StreamingTest method registerSink.
public static void registerSink(Cluster cluster, int initiatorNodeId) {
IInvokableInstance initiatorNode = cluster.get(initiatorNodeId);
InetSocketAddress initiator = initiatorNode.broadcastAddress();
MessageStateSinkImpl initiatorSink = new MessageStateSinkImpl();
for (int node = 1; node <= cluster.size(); node++) {
if (initiatorNodeId == node)
continue;
IInvokableInstance followerNode = cluster.get(node);
InetSocketAddress follower = followerNode.broadcastAddress();
// verify on initiator's stream session
initiatorSink.messages(follower, Arrays.asList(PREPARE_SYNACK, STREAM, StreamMessage.Type.COMPLETE));
initiatorSink.states(follower, Arrays.asList(PREPARING, STREAMING, WAIT_COMPLETE, StreamSession.State.COMPLETE));
// verify on follower's stream session
MessageStateSinkImpl followerSink = new MessageStateSinkImpl();
followerSink.messages(initiator, Arrays.asList(STREAM_INIT, PREPARE_SYN, PREPARE_ACK, RECEIVED));
// why 2 completes? There is a race condition bug with sending COMPLETE where the socket gets closed
// by the initator, which then triggers a ClosedChannelException, which then checks the current state (PREPARING)
// to solve this, COMPLETE is set before sending the message, and reset when closing the stream
followerSink.states(initiator, Arrays.asList(PREPARING, STREAMING, StreamSession.State.COMPLETE, StreamSession.State.COMPLETE));
followerNode.runOnInstance(() -> StreamSession.sink = followerSink);
}
cluster.get(initiatorNodeId).runOnInstance(() -> StreamSession.sink = initiatorSink);
}
Aggregations