use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class HintedHandoffAddRemoveNodesTest method shouldStreamHintsDuringDecommission.
/**
* Replaces Python dtest {@code hintedhandoff_test.py:TestHintedHandoff.test_hintedhandoff_decom()}.
* Ignored for now as there is some in-jvm bug which needs to be fixed, otherwise the test is flaky
* For more information see CASSANDRA-16679
*/
@Ignore
@Test
public void shouldStreamHintsDuringDecommission() throws Exception {
try (Cluster cluster = builder().withNodes(4).withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).start()) {
cluster.schemaChange(withKeyspace("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2}"));
cluster.schemaChange(withKeyspace("CREATE TABLE %s.decom_hint_test (key int PRIMARY KEY, value int)"));
cluster.get(4).shutdown().get();
// Write data using the second node as the coordinator...
populate(cluster, "decom_hint_test", 2, 0, 128, ConsistencyLevel.ONE);
Long totalHints = countTotalHints(cluster);
// ...and verify that we've accumulated hints intended for node 4, which is down.
assertThat(totalHints).isGreaterThan(0);
// Decomision node 1...
assertEquals(4, endpointsKnownTo(cluster, 2));
cluster.run(decommission(), 1);
await().pollDelay(1, SECONDS).until(() -> endpointsKnownTo(cluster, 2) == 3);
// ...and verify that all data still exists on either node 2 or 3.
verify(cluster, "decom_hint_test", 2, 0, 128, ConsistencyLevel.ONE);
// Start node 4 back up and verify that all hints were delivered.
cluster.get(4).startup();
await().atMost(30, SECONDS).pollDelay(3, SECONDS).until(() -> count(cluster, "decom_hint_test", 4).equals(totalHints));
// Now decommission both nodes 2 and 3...
cluster.run(GossipHelper.decommission(true), 2);
cluster.run(GossipHelper.decommission(true), 3);
await().pollDelay(1, SECONDS).until(() -> endpointsKnownTo(cluster, 4) == 1);
// ...and verify that even if we drop below the replication factor of 2, all data has been preserved.
verify(cluster, "decom_hint_test", 4, 0, 128, ConsistencyLevel.ONE);
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class NodeNotInRingTest method nodeNotInRingTest.
@Test
public void nodeNotInRingTest() throws Throwable {
try (Cluster cluster = builder().withNodes(3).withConfig(config -> config.with(NETWORK, GOSSIP)).start()) {
cluster.schemaChange("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};");
cluster.schemaChange("CREATE TABLE IF NOT EXISTS " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
cluster.filters().verbs(Verb.GOSSIP_DIGEST_ACK.id, Verb.GOSSIP_DIGEST_SYN.id).from(3).outbound().drop().on();
cluster.run(GossipHelper.removeFromRing(cluster.get(3)), 1, 2);
cluster.run(inst -> inst.runsOnInstance(() -> {
Assert.assertEquals("There should be 2 remaining nodes in ring", 2, StorageService.instance.effectiveOwnershipWithPort(KEYSPACE).size());
}), 1, 2);
populate(cluster, 0, 50, 1, ConsistencyLevel.ALL);
populate(cluster, 50, 100, 2, ConsistencyLevel.ALL);
Map<Integer, Long> counts = BootstrapTest.count(cluster);
Assert.assertEquals(0L, counts.get(3).longValue());
Assert.assertEquals(100L, counts.get(2).longValue());
Assert.assertEquals(100L, counts.get(1).longValue());
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class AutoBootstrapTest method autoBootstrapTest.
// Originally part of BootstrapTest. Broken out into separate test as the in-JVM dtests fail
// if too many instances are created in the same JVM. Bug in the JVM is suspected.
@Test
public void autoBootstrapTest() throws Throwable {
int originalNodeCount = 2;
int expandedNodeCount = originalNodeCount + 1;
try (Cluster cluster = builder().withNodes(originalNodeCount).withTokenSupplier(TokenSupplier.evenlyDistributedTokens(expandedNodeCount)).withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(expandedNodeCount, "dc0", "rack0")).withConfig(config -> config.with(NETWORK, GOSSIP)).start()) {
populate(cluster, 0, 100);
bootstrapAndJoinNode(cluster);
for (Map.Entry<Integer, Long> e : count(cluster).entrySet()) Assert.assertEquals("Node " + e.getKey() + " has incorrect row state", e.getValue().longValue(), 100L);
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class CommunicationDuringDecommissionTest method internodeConnectionsDuringDecom.
@Test
public void internodeConnectionsDuringDecom() throws Throwable {
try (Cluster cluster = builder().withNodes(4).withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).start()) {
BootstrapTest.populate(cluster, 0, 100);
cluster.run(decommission(), 1);
cluster.filters().allVerbs().from(1).messagesMatching((i, i1, iMessage) -> {
throw new AssertionError("Decomissioned node should not send any messages");
}).drop();
Map<Integer, Long> connectionAttempts = new HashMap<>();
long deadline = currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
// Wait 10 seconds and check if there are any new connection attempts to the decomissioned node
while (currentTimeMillis() <= deadline) {
for (int i = 2; i <= cluster.size(); i++) {
Object[][] res = cluster.get(i).executeInternal("SELECT active_connections, connection_attempts FROM system_views.internode_outbound WHERE address = '127.0.0.1' AND port = 7012");
Assert.assertEquals(1, res.length);
Assert.assertEquals(0L, ((Long) res[0][0]).longValue());
long attempts = ((Long) res[0][1]).longValue();
if (connectionAttempts.get(i) == null)
connectionAttempts.put(i, attempts);
else
Assert.assertEquals(connectionAttempts.get(i), (Long) attempts);
}
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
}
}
}
use of org.apache.cassandra.distributed.api.Feature.GOSSIP in project cassandra by apache.
the class TopologyChangeTest method testRestartNode.
@Test
public void testRestartNode() throws Throwable {
try (Cluster control = init(Cluster.build().withNodes(3).withNodeProvisionStrategy(strategy).withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL)).start());
com.datastax.driver.core.Cluster cluster = com.datastax.driver.core.Cluster.builder().addContactPoint("127.0.0.1").build();
Session session = cluster.connect()) {
EventStateListener eventStateListener = new EventStateListener();
session.getCluster().register(eventStateListener);
control.get(3).shutdown().get();
await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> Assert.assertEquals(2, cluster.getMetadata().getAllHosts().stream().filter(h -> h.isUp()).count()));
control.get(3).startup();
await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> Assert.assertEquals(3, cluster.getMetadata().getAllHosts().stream().filter(h -> h.isUp()).count()));
// DOWN UP can also be seen if the jvm is slow and connections are closed, but make sure it at least happens once
// given the node restarts
assertThat(eventStateListener.events).containsSequence(new Event(Down, control.get(3)), new Event(Up, control.get(3)));
}
}
Aggregations