Search in sources :

Example 26 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class RepairDigestTrackingTest method testPurgeableTombstonesAreIgnored.

@SuppressWarnings("Convert2MethodRef")
@Test
public void testPurgeableTombstonesAreIgnored() throws Throwable {
    try (Cluster cluster = init(builder().withNodes(2).start())) {
        cluster.get(1).runOnInstance(() -> StorageProxy.instance.enableRepairedDataTrackingForRangeReads());
        cluster.schemaChange("CREATE TABLE " + KS_TABLE + " (k INT, c INT, v1 INT, v2 INT, PRIMARY KEY (k,c)) WITH gc_grace_seconds=0");
        // on node1 only insert some tombstones, then flush
        for (int i = 0; i < 10; i++) {
            cluster.get(1).executeInternal("DELETE v1 FROM " + KS_TABLE + " USING TIMESTAMP 0 WHERE k=? and c=? ", i, i);
        }
        cluster.get(1).flush(KEYSPACE);
        // insert data on both nodes and flush
        for (int i = 0; i < 10; i++) {
            cluster.coordinator(1).execute("INSERT INTO " + KS_TABLE + " (k, c, v2) VALUES (?, ?, ?) USING TIMESTAMP 1", ConsistencyLevel.ALL, i, i, i);
        }
        cluster.forEach(i -> i.flush(KEYSPACE));
        // nothing is repaired yet
        cluster.forEach(i -> i.runOnInstance(assertNotRepaired()));
        // mark everything repaired
        cluster.forEach(i -> i.runOnInstance(markAllRepaired()));
        cluster.forEach(i -> i.runOnInstance(assertRepaired()));
        // now overwrite on node2 only to generate digest mismatches, but don't flush so the repaired dataset is not affected
        for (int i = 0; i < 10; i++) {
            cluster.get(2).executeInternal("INSERT INTO " + KS_TABLE + " (k, c, v2) VALUES (?, ?, ?) USING TIMESTAMP 2", i, i, i * 2);
        }
        long ccBefore = getConfirmedInconsistencies(cluster.get(1));
        // Unfortunately we need to sleep here to ensure that nowInSec > the local deletion time of the tombstones
        TimeUnit.SECONDS.sleep(2);
        cluster.coordinator(1).execute("SELECT * FROM " + KS_TABLE, ConsistencyLevel.ALL);
        long ccAfter = getConfirmedInconsistencies(cluster.get(1));
        Assert.assertEquals("No repaired data inconsistencies should be detected", ccBefore, ccAfter);
    }
}
Also used : Cluster(org.apache.cassandra.distributed.Cluster) Test(org.junit.Test)

Example 27 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class RepairDigestTrackingTest method testSnapshottingOnInconsistency.

@SuppressWarnings("Convert2MethodRef")
@Test
public void testSnapshottingOnInconsistency() throws Throwable {
    try (Cluster cluster = init(Cluster.create(2))) {
        cluster.get(1).runOnInstance(() -> StorageProxy.instance.enableRepairedDataTrackingForPartitionReads());
        cluster.schemaChange("CREATE TABLE " + KS_TABLE + " (k INT, c INT, v INT, PRIMARY KEY (k,c))");
        for (int i = 0; i < 10; i++) {
            cluster.coordinator(1).execute("INSERT INTO " + KS_TABLE + " (k, c, v) VALUES (0, ?, ?)", ConsistencyLevel.ALL, i, i);
        }
        cluster.forEach(c -> c.flush(KEYSPACE));
        for (int i = 10; i < 20; i++) {
            cluster.coordinator(1).execute("INSERT INTO " + KS_TABLE + " (k, c, v) VALUES (0, ?, ?)", ConsistencyLevel.ALL, i, i);
        }
        cluster.forEach(c -> c.flush(KEYSPACE));
        cluster.forEach(i -> i.runOnInstance(assertNotRepaired()));
        // Mark everything repaired on node2
        cluster.get(2).runOnInstance(markAllRepaired());
        cluster.get(2).runOnInstance(assertRepaired());
        // now overwrite on node1 only to generate digest mismatches
        cluster.get(1).executeInternal("INSERT INTO " + KS_TABLE + " (k, c, v) VALUES (0, ?, ?)", 5, 55);
        cluster.get(1).runOnInstance(assertNotRepaired());
        // Execute a partition read and assert inconsistency is detected (as nothing is repaired on node1)
        long ccBefore = getConfirmedInconsistencies(cluster.get(1));
        cluster.coordinator(1).execute("SELECT * FROM " + KS_TABLE + " WHERE k=0", ConsistencyLevel.ALL);
        long ccAfter = getConfirmedInconsistencies(cluster.get(1));
        Assert.assertEquals("confirmed count should increment by 1 after each partition read", ccBefore + 1, ccAfter);
        String snapshotName = DiagnosticSnapshotService.getSnapshotName(DiagnosticSnapshotService.REPAIRED_DATA_MISMATCH_SNAPSHOT_PREFIX);
        cluster.forEach(i -> i.runOnInstance(assertSnapshotNotPresent(snapshotName)));
        // re-introduce a mismatch, enable snapshotting and try again
        cluster.get(1).executeInternal("INSERT INTO " + KS_TABLE + " (k, c, v) VALUES (0, ?, ?)", 5, 555);
        cluster.get(1).runOnInstance(() -> StorageProxy.instance.enableSnapshotOnRepairedDataMismatch());
        cluster.coordinator(1).execute("SELECT * FROM " + KS_TABLE + " WHERE k=0", ConsistencyLevel.ALL);
        ccAfter = getConfirmedInconsistencies(cluster.get(1));
        Assert.assertEquals("confirmed count should increment by 1 after each partition read", ccBefore + 2, ccAfter);
        cluster.forEach(i -> i.runOnInstance(assertSnapshotPresent(snapshotName)));
    }
}
Also used : Cluster(org.apache.cassandra.distributed.Cluster) Test(org.junit.Test)

Example 28 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class RepairDigestTrackingTest method testRepairedReadCountNormalizationWithInitialUnderread.

@Test
public void testRepairedReadCountNormalizationWithInitialUnderread() throws Throwable {
    // limits of the read request.
    try (Cluster cluster = init(Cluster.create(2))) {
        cluster.get(1).runOnInstance(() -> {
            StorageProxy.instance.enableRepairedDataTrackingForRangeReads();
            StorageProxy.instance.enableRepairedDataTrackingForPartitionReads();
        });
        cluster.schemaChange("CREATE TABLE " + KS_TABLE + " (k INT, c INT, v1 INT, PRIMARY KEY (k,c)) " + "WITH CLUSTERING ORDER BY (c DESC)");
        // insert data on both nodes and flush
        for (int i = 0; i < 20; i++) {
            cluster.coordinator(1).execute("INSERT INTO " + KS_TABLE + " (k, c, v1) VALUES (0, ?, ?) USING TIMESTAMP 0", ConsistencyLevel.ALL, i, i);
            cluster.coordinator(1).execute("INSERT INTO " + KS_TABLE + " (k, c, v1) VALUES (1, ?, ?) USING TIMESTAMP 1", ConsistencyLevel.ALL, i, i);
        }
        cluster.forEach(c -> c.flush(KEYSPACE));
        // nothing is repaired yet
        cluster.forEach(i -> i.runOnInstance(assertNotRepaired()));
        // mark everything repaired
        cluster.forEach(i -> i.runOnInstance(markAllRepaired()));
        cluster.forEach(i -> i.runOnInstance(assertRepaired()));
        // Add some unrepaired data to both nodes
        for (int i = 20; i < 30; i++) {
            cluster.coordinator(1).execute("INSERT INTO " + KS_TABLE + " (k, c, v1) VALUES (1, ?, ?) USING TIMESTAMP 1", ConsistencyLevel.ALL, i, i);
        }
        // And some more unrepaired data to node2 only. This causes node2 to read less repaired data than node1
        // when satisfying the limits of the read. So node2 needs to overread more repaired data than node1 when
        // calculating the repaired data digest.
        cluster.get(2).executeInternal("INSERT INTO " + KS_TABLE + " (k, c, v1) VALUES (1, ?, ?) USING TIMESTAMP 1", 30, 30);
        // Verify single partition read
        long ccBefore = getConfirmedInconsistencies(cluster.get(1));
        assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KS_TABLE + " WHERE k=1 LIMIT 20", ConsistencyLevel.ALL), rows(1, 30, 11));
        long ccAfterPartitionRead = getConfirmedInconsistencies(cluster.get(1));
        // Recreate a mismatch in unrepaired data and verify partition range read
        cluster.get(2).executeInternal("INSERT INTO " + KS_TABLE + " (k, c, v1) VALUES (1, ?, ?)", 31, 31);
        assertRows(cluster.coordinator(1).execute("SELECT * FROM " + KS_TABLE + " LIMIT 30", ConsistencyLevel.ALL), rows(1, 31, 2));
        long ccAfterRangeRead = getConfirmedInconsistencies(cluster.get(1));
        if (ccAfterPartitionRead != ccAfterRangeRead)
            if (ccAfterPartitionRead != ccBefore)
                fail("Both range and partition reads reported data inconsistencies but none were expected");
            else
                fail("Reported inconsistency during range read but none were expected");
        else if (ccAfterPartitionRead != ccBefore)
            fail("Reported inconsistency during partition read but none were expected");
    }
}
Also used : Cluster(org.apache.cassandra.distributed.Cluster) Test(org.junit.Test)

Example 29 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class RepairOperationalTest method repairUnreplicatedKStest.

public void repairUnreplicatedKStest() throws IOException {
    try (Cluster cluster = init(Cluster.build(4).withDCs(2).withConfig(config -> config.with(GOSSIP).with(NETWORK)).start())) {
        cluster.schemaChange("alter keyspace " + KEYSPACE + " with replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0}");
        cluster.schemaChange("create table " + KEYSPACE + ".tbl (id int primary key, i int)");
        for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("insert into " + KEYSPACE + ".tbl (id, i) values (?, ?)", ConsistencyLevel.ALL, i, i);
        cluster.forEach(i -> i.flush(KEYSPACE));
        cluster.get(3).nodetoolResult("repair", "-full", KEYSPACE, "tbl", "-st", "0", "-et", "1000").asserts().failure().errorContains("Nothing to repair for (0,1000] in distributed_test_keyspace - aborting");
        cluster.get(3).nodetoolResult("repair", "-full", KEYSPACE, "tbl", "-st", "0", "-et", "1000", "--ignore-unreplicated-keyspaces").asserts().success().notificationContains("unreplicated keyspace is ignored since repair was called with --ignore-unreplicated-keyspaces");
    }
}
Also used : Cluster(org.apache.cassandra.distributed.Cluster)

Example 30 with Cluster

use of org.apache.cassandra.distributed.Cluster in project cassandra by apache.

the class RepairOperationalTest method mainDC.

@Test
public void mainDC() throws IOException {
    try (Cluster cluster = Cluster.build().withRacks(2, 1, 2).start()) {
        // 1-2 : datacenter1
        // 3-4 : datacenter2
        cluster.schemaChange("CREATE KEYSPACE " + KEYSPACE + " WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0}");
        cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (id int PRIMARY KEY, i int)");
        for (int i = 0; i < 10; i++) cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (id, i) VALUES (?, ?)", ConsistencyLevel.ALL, i, i);
        cluster.forEach(i -> i.flush(KEYSPACE));
        // choose a node in the DC that doesn't have any replicas
        IInvokableInstance node = cluster.get(1);
        Assertions.assertThat(node.config().localDatacenter()).isEqualTo("datacenter1");
        node.nodetoolResult("repair", "-full", "--ignore-unreplicated-keyspaces", "-st", "0", "-et", "1000", KEYSPACE, "tbl").asserts().success();
    }
}
Also used : IInvokableInstance(org.apache.cassandra.distributed.api.IInvokableInstance) Cluster(org.apache.cassandra.distributed.Cluster) Test(org.junit.Test)

Aggregations

Cluster (org.apache.cassandra.distributed.Cluster)161 Test (org.junit.Test)151 IInvokableInstance (org.apache.cassandra.distributed.api.IInvokableInstance)37 Assert (org.junit.Assert)37 IOException (java.io.IOException)36 Feature (org.apache.cassandra.distributed.api.Feature)34 GOSSIP (org.apache.cassandra.distributed.api.Feature.GOSSIP)30 NETWORK (org.apache.cassandra.distributed.api.Feature.NETWORK)30 ConsistencyLevel (org.apache.cassandra.distributed.api.ConsistencyLevel)29 List (java.util.List)22 ImmutableMap (com.google.common.collect.ImmutableMap)21 InetAddress (java.net.InetAddress)20 TokenSupplier (org.apache.cassandra.distributed.api.TokenSupplier)20 StorageService (org.apache.cassandra.service.StorageService)18 Arrays (java.util.Arrays)17 Collections (java.util.Collections)17 Assertions (org.assertj.core.api.Assertions)17 Map (java.util.Map)16 TestBaseImpl (org.apache.cassandra.distributed.test.TestBaseImpl)15 ICoordinator (org.apache.cassandra.distributed.api.ICoordinator)14