Search in sources :

Example 26 with ManagedCluster

use of org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster in project neo4j by neo4j.

the class TxPushStrategyConfigIT method slaveListIsCorrectAfterMasterSwitch.

@Test
public void slaveListIsCorrectAfterMasterSwitch() throws Exception {
    ManagedCluster cluster = startCluster(3, 1, HaSettings.TxPushStrategy.fixed_ascending);
    cluster.shutdown(cluster.getMaster());
    cluster.await(masterAvailable());
    HighlyAvailableGraphDatabase newMaster = cluster.getMaster();
    cluster.await(masterSeesSlavesAsAvailable(1));
    int missed = createTransaction(cluster, newMaster);
    assertLastTransactions(cluster, lastTx(FIRST_SLAVE, BASE_TX_ID + 1, missed), lastTx(SECOND_SLAVE, BASE_TX_ID + 1, missed));
}
Also used : ManagedCluster(org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster) Test(org.junit.Test)

Example 27 with ManagedCluster

use of org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster in project neo4j by neo4j.

the class TestBasicHaOperations method testBasicFailover.

@Test
public void testBasicFailover() throws Throwable {
    // given
    ManagedCluster cluster = clusterRule.startCluster();
    HighlyAvailableGraphDatabase master = cluster.getMaster();
    HighlyAvailableGraphDatabase slave1 = cluster.getAnySlave();
    HighlyAvailableGraphDatabase slave2 = cluster.getAnySlave(slave1);
    // When
    long start = System.nanoTime();
    RepairKit repair = cluster.shutdown(master);
    try {
        logger.getLogger().warning("Shut down master");
        cluster.await(ClusterManager.masterAvailable());
        long end = System.nanoTime();
        logger.getLogger().warning("Failover took:" + (end - start) / 1000000 + "ms");
        // Then
        boolean slave1Master = slave1.isMaster();
        boolean slave2Master = slave2.isMaster();
        if (slave1Master) {
            assertFalse(slave2Master);
        } else {
            assertTrue(slave2Master);
        }
    } finally {
        repair.repair();
    }
}
Also used : ManagedCluster(org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster) RepairKit(org.neo4j.kernel.impl.ha.ClusterManager.RepairKit) Test(org.junit.Test)

Example 28 with ManagedCluster

use of org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster in project neo4j by neo4j.

the class TestBranchedData method shouldCopyStoreFromMasterIfBranchedInLiveScenario.

/**
     * Main difference to {@link #shouldCopyStoreFromMasterIfBranched()} is that no instances are shut down
     * during the course of the test. This to test functionality of some internal components being restarted.
     */
@SuppressWarnings("unchecked")
@Test
public void shouldCopyStoreFromMasterIfBranchedInLiveScenario() throws Throwable {
    // GIVEN a cluster of 3, all having the same data (node A)
    // thor is whoever is the master to begin with
    // odin is whoever is picked as _the_ slave given thor as initial master
    File dir = directory.directory();
    ClusterManager clusterManager = life.add(new ClusterManager.Builder(dir).withSharedConfig(stringMap(// Effectively disable automatic transaction propagation within the cluster
    HaSettings.tx_push_factor.name(), "0", HaSettings.pull_interval.name(), "0")).build());
    ManagedCluster cluster = clusterManager.getCluster();
    cluster.await(allSeesAllAsAvailable());
    HighlyAvailableGraphDatabase thor = cluster.getMaster();
    String indexName = "valhalla";
    createNode(thor, "A", andIndexInto(indexName));
    cluster.sync();
    // WHEN creating a node B1 on thor (note the disabled cluster transaction propagation)
    createNode(thor, "B1", andIndexInto(indexName));
    // and right after that failing the master so that it falls out of the cluster
    HighlyAvailableGraphDatabase odin = cluster.getAnySlave();
    cluster.info(format("%n   ==== TAMPERING WITH " + thor + "'s CABLES ====%n"));
    RepairKit thorRepairKit = cluster.fail(thor);
    // try to create a transaction on odin until it succeeds
    cluster.await(ClusterManager.masterAvailable(thor));
    cluster.await(ClusterManager.memberThinksItIsRole(odin, HighAvailabilityModeSwitcher.MASTER));
    assertTrue(odin.isMaster());
    retryOnTransactionFailure(odin, db -> createNode(db, "B2", andIndexInto(indexName)));
    // perform transactions so that index files changes under the hood
    Set<File> odinLuceneFilesBefore = Iterables.asSet(gatherLuceneFiles(odin, indexName));
    for (char prefix = 'C'; !changed(odinLuceneFilesBefore, Iterables.asSet(gatherLuceneFiles(odin, indexName))); prefix++) {
        char fixedPrefix = prefix;
        retryOnTransactionFailure(odin, db -> createNodes(odin, String.valueOf(fixedPrefix), 10_000, andIndexInto(indexName)));
        // Force will most likely cause lucene legacy indexes to commit and change file structure
        cluster.force();
    }
    // so anyways, when thor comes back into the cluster
    cluster.info(format("%n   ==== REPAIRING CABLES ====%n"));
    cluster.await(memberThinksItIsRole(thor, UNKNOWN));
    BranchMonitor thorHasBranched = installBranchedDataMonitor(thor);
    thorRepairKit.repair();
    cluster.await(memberThinksItIsRole(thor, SLAVE));
    cluster.await(memberThinksItIsRole(odin, MASTER));
    cluster.await(allSeesAllAsAvailable());
    assertFalse(thor.isMaster());
    assertTrue("No store-copy performed", thorHasBranched.copyCompleted);
    assertTrue("Store-copy unsuccessful", thorHasBranched.copySucessful);
    // Now do some more transactions on current master (odin) and have thor pull those
    for (int i = 0; i < 3; i++) {
        int ii = i;
        retryOnTransactionFailure(odin, db -> createNodes(odin, String.valueOf("" + ii), 10, andIndexInto(indexName)));
        cluster.sync();
        cluster.force();
    }
    // THEN thor should be a slave, having copied a store from master and good to go
    assertFalse(hasNode(thor, "B1"));
    assertTrue(hasNode(thor, "B2"));
    assertTrue(hasNode(thor, "C-0"));
    assertTrue(hasNode(thor, "0-0"));
    assertTrue(hasNode(odin, "0-0"));
}
Also used : HighlyAvailableGraphDatabase(org.neo4j.kernel.ha.HighlyAvailableGraphDatabase) ManagedCluster(org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster) RepairKit(org.neo4j.kernel.impl.ha.ClusterManager.RepairKit) File(java.io.File) ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) Test(org.junit.Test)

Example 29 with ManagedCluster

use of org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster in project neo4j by neo4j.

the class TestBranchedData method shouldCopyStoreFromMasterIfBranched.

@Test
public void shouldCopyStoreFromMasterIfBranched() throws Throwable {
    // GIVEN
    File dir = directory.directory();
    ClusterManager clusterManager = life.add(new ClusterManager.Builder(dir).withCluster(clusterOfSize(2)).build());
    ManagedCluster cluster = clusterManager.getCluster();
    cluster.await(allSeesAllAsAvailable());
    createNode(cluster.getMaster(), "A");
    cluster.sync();
    // WHEN
    HighlyAvailableGraphDatabase slave = cluster.getAnySlave();
    File storeDir = new File(slave.getStoreDir());
    RepairKit starter = cluster.shutdown(slave);
    HighlyAvailableGraphDatabase master = cluster.getMaster();
    createNode(master, "B1");
    createNode(master, "C");
    createNodeOffline(storeDir, "B2");
    slave = starter.repair();
    // THEN
    cluster.await(allSeesAllAsAvailable());
    slave.beginTx().close();
}
Also used : HighlyAvailableGraphDatabase(org.neo4j.kernel.ha.HighlyAvailableGraphDatabase) ManagedCluster(org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster) RepairKit(org.neo4j.kernel.impl.ha.ClusterManager.RepairKit) File(java.io.File) ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) Test(org.junit.Test)

Example 30 with ManagedCluster

use of org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster in project neo4j by neo4j.

the class TestClusterIndexDeletion method givenClusterWithCreatedIndexWhenDeleteIndexOnMasterThenIndexIsDeletedOnSlave.

@Test
public void givenClusterWithCreatedIndexWhenDeleteIndexOnMasterThenIndexIsDeletedOnSlave() throws Throwable {
    ManagedCluster cluster = clusterRule.startCluster();
    HighlyAvailableGraphDatabase master = cluster.getMaster();
    try (Transaction tx = master.beginTx()) {
        master.index().forNodes("Test");
        tx.success();
    }
    cluster.sync();
    HighlyAvailableGraphDatabase aSlave = cluster.getAnySlave();
    try (Transaction tx = aSlave.beginTx()) {
        assertThat(aSlave.index().existsForNodes("Test"), equalTo(true));
        tx.success();
    }
    // When
    try (Transaction tx = master.beginTx()) {
        master.index().forNodes("Test").delete();
        tx.success();
    }
    cluster.sync();
    // Then
    HighlyAvailableGraphDatabase anotherSlave = cluster.getAnySlave();
    try (Transaction tx = anotherSlave.beginTx()) {
        assertThat(anotherSlave.index().existsForNodes("Test"), equalTo(false));
        tx.success();
    }
}
Also used : Transaction(org.neo4j.graphdb.Transaction) HighlyAvailableGraphDatabase(org.neo4j.kernel.ha.HighlyAvailableGraphDatabase) ManagedCluster(org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster) Test(org.junit.Test)

Aggregations

ManagedCluster (org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster)34 Test (org.junit.Test)32 HighlyAvailableGraphDatabase (org.neo4j.kernel.ha.HighlyAvailableGraphDatabase)16 Transaction (org.neo4j.graphdb.Transaction)15 Node (org.neo4j.graphdb.Node)13 InstanceId (org.neo4j.cluster.InstanceId)6 File (java.io.File)4 IndexDefinition (org.neo4j.graphdb.schema.IndexDefinition)4 WriteOperationsNotAllowedException (org.neo4j.graphdb.security.WriteOperationsNotAllowedException)4 ClusterManager (org.neo4j.kernel.impl.ha.ClusterManager)4 RepairKit (org.neo4j.kernel.impl.ha.ClusterManager.RepairKit)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 GraphDatabaseAPI (org.neo4j.kernel.internal.GraphDatabaseAPI)3 CountDownLatch (java.util.concurrent.CountDownLatch)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 CountsTracker (org.neo4j.kernel.impl.store.counts.CountsTracker)2 DbRepresentation (org.neo4j.test.DbRepresentation)2 Random (java.util.Random)1 ThreadLocalRandom (java.util.concurrent.ThreadLocalRandom)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1