Search in sources :

Example 36 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class AutoBootstrapTest method autoBootstrapTest.

// Originally part of BootstrapTest. Broken out into separate test as the in-JVM dtests fail
// if too many instances are created in the same JVM. Bug in the JVM is suspected.
@Test
public void autoBootstrapTest() throws Throwable {
    int originalNodeCount = 2;
    int expandedNodeCount = originalNodeCount + 1;
    try (Cluster cluster = builder().withNodes(originalNodeCount).withTokenSupplier(TokenSupplier.evenlyDistributedTokens(expandedNodeCount)).withNodeIdTopology(NetworkTopology.singleDcNetworkTopology(expandedNodeCount, "dc0", "rack0")).withConfig(config -> config.with(NETWORK, GOSSIP)).start()) {
        populate(cluster, 0, 100);
        bootstrapAndJoinNode(cluster);
        for (Map.Entry<Integer, Long> e : count(cluster).entrySet()) Assert.assertEquals("Node " + e.getKey() + " has incorrect row state", e.getValue().longValue(), 100L);
    }
}
Also used : TokenSupplier(org.apache.cassandra.distributed.api.TokenSupplier) Map(java.util.Map) Test(org.junit.Test) Cluster(org.apache.cassandra.distributed.Cluster) Assert(org.junit.Assert) NetworkTopology(org.apache.cassandra.distributed.shared.NetworkTopology) NETWORK(org.apache.cassandra.distributed.api.Feature.NETWORK) GOSSIP(org.apache.cassandra.distributed.api.Feature.GOSSIP) Cluster(org.apache.cassandra.distributed.Cluster) Map(java.util.Map) Test(org.junit.Test)

Example 37 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class CommunicationDuringDecommissionTest method internodeConnectionsDuringDecom.

@Test
public void internodeConnectionsDuringDecom() throws Throwable {
    try (Cluster cluster = builder().withNodes(4).withConfig(config -> config.with(NETWORK, GOSSIP, NATIVE_PROTOCOL)).start()) {
        BootstrapTest.populate(cluster, 0, 100);
        cluster.run(decommission(), 1);
        cluster.filters().allVerbs().from(1).messagesMatching((i, i1, iMessage) -> {
            throw new AssertionError("Decomissioned node should not send any messages");
        }).drop();
        Map<Integer, Long> connectionAttempts = new HashMap<>();
        long deadline = currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
        // Wait 10 seconds and check if there are any new connection attempts to the decomissioned node
        while (currentTimeMillis() <= deadline) {
            for (int i = 2; i <= cluster.size(); i++) {
                Object[][] res = cluster.get(i).executeInternal("SELECT active_connections, connection_attempts FROM system_views.internode_outbound WHERE address = '127.0.0.1' AND port = 7012");
                Assert.assertEquals(1, res.length);
                Assert.assertEquals(0L, ((Long) res[0][0]).longValue());
                long attempts = ((Long) res[0][1]).longValue();
                if (connectionAttempts.get(i) == null)
                    connectionAttempts.put(i, attempts);
                else
                    Assert.assertEquals(connectionAttempts.get(i), (Long) attempts);
            }
            LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
        }
    }
}
Also used : Global.currentTimeMillis(org.apache.cassandra.utils.Clock.Global.currentTimeMillis) HashMap(java.util.HashMap) Test(org.junit.Test) NATIVE_PROTOCOL(org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL) TimeUnit(java.util.concurrent.TimeUnit) LockSupport(java.util.concurrent.locks.LockSupport) Map(java.util.Map) TestBaseImpl(org.apache.cassandra.distributed.test.TestBaseImpl) Cluster(org.apache.cassandra.distributed.Cluster) Assert(org.junit.Assert) NETWORK(org.apache.cassandra.distributed.api.Feature.NETWORK) GossipHelper.decommission(org.apache.cassandra.distributed.action.GossipHelper.decommission) GOSSIP(org.apache.cassandra.distributed.api.Feature.GOSSIP) HashMap(java.util.HashMap) Cluster(org.apache.cassandra.distributed.Cluster) Test(org.junit.Test)

Example 38 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class StreamingTest method testStreaming.

private void testStreaming(int nodes, int replicationFactor, int rowCount, String compactionStrategy) throws Throwable {
    try (Cluster cluster = builder().withNodes(nodes).withDataDirCount(// this test expects there to only be a single sstable to stream (with ddirs = 3, we get 3 sstables)
    1).withConfig(config -> config.with(NETWORK)).start()) {
        cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': " + replicationFactor + "};");
        cluster.schemaChange(String.format("CREATE TABLE %s.cf (k text, c1 text, c2 text, PRIMARY KEY (k)) WITH compaction = {'class': '%s', 'enabled': 'true'}", KEYSPACE, compactionStrategy));
        for (int i = 0; i < rowCount; ++i) {
            for (int n = 1; n < nodes; ++n) cluster.get(n).executeInternal(String.format("INSERT INTO %s.cf (k, c1, c2) VALUES (?, 'value1', 'value2');", KEYSPACE), Integer.toString(i));
        }
        cluster.get(nodes).executeInternal("TRUNCATE system.available_ranges;");
        {
            Object[][] results = cluster.get(nodes).executeInternal(String.format("SELECT k, c1, c2 FROM %s.cf;", KEYSPACE));
            Assert.assertEquals(0, results.length);
        }
        // collect message and state
        registerSink(cluster, nodes);
        cluster.get(nodes).runOnInstance(() -> StorageService.instance.rebuild(null, KEYSPACE, null, null));
        {
            Object[][] results = cluster.get(nodes).executeInternal(String.format("SELECT k, c1, c2 FROM %s.cf;", KEYSPACE));
            Assert.assertEquals(1000, results.length);
            Arrays.sort(results, Comparator.comparingInt(a -> Integer.parseInt((String) a[0])));
            for (int i = 0; i < results.length; ++i) {
                Assert.assertEquals(Integer.toString(i), results[i][0]);
                Assert.assertEquals("value1", results[i][1]);
                Assert.assertEquals("value2", results[i][2]);
            }
        }
    }
}
Also used : RECEIVED(org.apache.cassandra.streaming.messages.StreamMessage.Type.RECEIVED) InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) Arrays(java.util.Arrays) PREPARE_SYNACK(org.apache.cassandra.streaming.messages.StreamMessage.Type.PREPARE_SYNACK) STREAM_INIT(org.apache.cassandra.streaming.messages.StreamMessage.Type.STREAM_INIT) STREAMING(org.apache.cassandra.streaming.StreamSession.State.STREAMING) InetAddress(java.net.InetAddress) PREPARE_ACK(org.apache.cassandra.streaming.messages.StreamMessage.Type.PREPARE_ACK) StreamSession(org.apache.cassandra.streaming.StreamSession) PREPARE_SYN(org.apache.cassandra.streaming.messages.StreamMessage.Type.PREPARE_SYN) Map(java.util.Map) StreamMessage(org.apache.cassandra.streaming.messages.StreamMessage) LinkedList(java.util.LinkedList) NETWORK(org.apache.cassandra.distributed.api.Feature.NETWORK) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StorageService(org.apache.cassandra.service.StorageService) Test(org.junit.Test) InetSocketAddress(java.net.InetSocketAddress) Collectors(java.util.stream.Collectors) Serializable(java.io.Serializable) List(java.util.List) IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) PREPARING(org.apache.cassandra.streaming.StreamSession.State.PREPARING) STREAM(org.apache.cassandra.streaming.messages.StreamMessage.Type.STREAM) Cluster(org.apache.cassandra.distributed.Cluster) Queue(java.util.Queue) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Assert(org.junit.Assert) WAIT_COMPLETE(org.apache.cassandra.streaming.StreamSession.State.WAIT_COMPLETE) Cluster(org.apache.cassandra.distributed.Cluster)

Example 39 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class RestartTest method test.

@Test
public void test() throws Exception {
    try (Cluster cluster = init(Cluster.build(2).withDataDirCount(1).start())) {
        FBUtilities.waitOnFuture(cluster.get(2).shutdown());
        FBUtilities.waitOnFuture(cluster.get(1).shutdown());
        cluster.get(1).startup();
        cluster.get(2).startup();
    }
}
Also used : Cluster(org.apache.cassandra.distributed.Cluster) Test(org.junit.Test)

Example 40 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class SSTableSkippingReadTest method skippedSSTableWithPartitionDeletionShadowingDataOnAnotherNode2.

@Test
public void skippedSSTableWithPartitionDeletionShadowingDataOnAnotherNode2() throws Throwable {
    try (Cluster cluster = init(Cluster.create(2))) {
        cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (pk int, ck int, v int, PRIMARY KEY(pk, ck))"));
        // insert a partition tombstone on node 1, the deletion timestamp should end up being the sstable's minTimestamp
        cluster.get(1).executeInternal(withKeyspace("DELETE FROM %s.tbl USING TIMESTAMP 1 WHERE pk = 0"));
        // and a row from a different partition, to provide the sstable's min/max clustering
        cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (1, 1, 1) USING TIMESTAMP 3"));
        cluster.get(1).flush(KEYSPACE);
        // sstable 1 has minTimestamp == maxTimestamp == 1 and is skipped due to its min/max clusterings. Now we
        // insert a row which is not shadowed by the partition delete and flush to a second sstable. The first sstable
        // has a maxTimestamp > than the min timestamp of all sstables, so it is a candidate for reinclusion to the
        // merge. Hoever, the second sstable's minTimestamp is > than the partition delete. This would  cause the
        // first sstable not to be reincluded in the merge input, but we can't really make that decision as we don't
        // know what data and/or tombstones are present on other nodes
        cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (0, 6, 6) USING TIMESTAMP 2"));
        cluster.get(1).flush(KEYSPACE);
        // on node 2, add a row for the deleted partition with an older timestamp than the deletion so it should be shadowed
        cluster.get(2).executeInternal(withKeyspace("INSERT INTO %s.tbl (pk, ck, v) VALUES (0, 10, 10) USING TIMESTAMP 0"));
        Object[][] rows = cluster.coordinator(1).execute(withKeyspace("SELECT * FROM %s.tbl WHERE pk=0 AND ck > 5"), ALL);
        // we expect that the row from node 2 (0, 10, 10) was shadowed by the partition delete, but the row from
        // node 1 (0, 6, 6) was not.
        assertRows(rows, new Object[] { 0, 6, 6 });
    }
}
Also used : Cluster(org.apache.cassandra.distributed.Cluster) Test(org.junit.Test)

Aggregations

Cluster (org.apache.cassandra.distributed.Cluster)161 Test (org.junit.Test)151 IInvokableInstance (org.apache.cassandra.distributed.api.IInvokableInstance)37 Assert (org.junit.Assert)37 IOException (java.io.IOException)36 Feature (org.apache.cassandra.distributed.api.Feature)34 GOSSIP (org.apache.cassandra.distributed.api.Feature.GOSSIP)30 NETWORK (org.apache.cassandra.distributed.api.Feature.NETWORK)30 ConsistencyLevel (org.apache.cassandra.distributed.api.ConsistencyLevel)29 List (java.util.List)22 ImmutableMap (com.google.common.collect.ImmutableMap)21 InetAddress (java.net.InetAddress)20 TokenSupplier (org.apache.cassandra.distributed.api.TokenSupplier)20 StorageService (org.apache.cassandra.service.StorageService)18 Arrays (java.util.Arrays)17 Collections (java.util.Collections)17 Assertions (org.assertj.core.api.Assertions)17 Map (java.util.Map)16 TestBaseImpl (org.apache.cassandra.distributed.test.TestBaseImpl)15 ICoordinator (org.apache.cassandra.distributed.api.ICoordinator)14