Search in sources :

Example 6 with ClusterManager

use of org.neo4j.kernel.impl.ha.ClusterManager in project neo4j by neo4j.

the class ClusterTest method givenClusterWhenMasterGoesDownAndTxIsRunningThenDontWaitToSwitch.

@Test
public void givenClusterWhenMasterGoesDownAndTxIsRunningThenDontWaitToSwitch() throws Throwable {
    ClusterManager clusterManager = new ClusterManager.Builder(testDirectory.directory("waitfortx")).withCluster(ClusterManager.clusterOfSize(3)).build();
    try {
        clusterManager.start();
        ClusterManager.ManagedCluster cluster = clusterManager.getCluster();
        cluster.await(allSeesAllAsAvailable());
        HighlyAvailableGraphDatabase slave = cluster.getAnySlave();
        Transaction tx = slave.beginTx();
        // Do a little write operation so that all "write" aspects of this tx is initializes properly
        slave.createNode();
        // Shut down master while we're keeping this transaction open
        cluster.shutdown(cluster.getMaster());
        cluster.await(masterAvailable());
        cluster.await(masterSeesSlavesAsAvailable(1));
        // Ending up here means that we didn't wait for this transaction to complete
        tx.success();
        try {
            tx.close();
            fail("Exception expected");
        } catch (Exception e) {
            assertThat(e, instanceOf(TransientTransactionFailureException.class));
            Throwable rootCause = rootCause(e);
            assertThat(rootCause, instanceOf(TransactionTerminatedException.class));
            assertThat(((TransactionTerminatedException) rootCause).status(), Matchers.equalTo(Status.General.DatabaseUnavailable));
        }
    } finally {
        clusterManager.stop();
    }
}
Also used : TransactionTerminatedException(org.neo4j.graphdb.TransactionTerminatedException) Transaction(org.neo4j.graphdb.Transaction) HighlyAvailableGraphDatabase(org.neo4j.kernel.ha.HighlyAvailableGraphDatabase) ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) TransientTransactionFailureException(org.neo4j.graphdb.TransientTransactionFailureException) IOException(java.io.IOException) TransactionTerminatedException(org.neo4j.graphdb.TransactionTerminatedException) Test(org.junit.Test)

Example 7 with ClusterManager

use of org.neo4j.kernel.impl.ha.ClusterManager in project neo4j by neo4j.

the class ClusterTest method testCluster.

@Test
public void testCluster() throws Throwable {
    ClusterManager clusterManager = new ClusterManager.Builder(testDirectory.directory("testCluster")).withSharedConfig(MapUtil.stringMap(HaSettings.ha_server.name(), "localhost:6001-9999", HaSettings.tx_push_factor.name(), "2")).withCluster(clusterOfSize(3)).build();
    createClusterWithNode(clusterManager);
}
Also used : ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) Test(org.junit.Test)

Example 8 with ClusterManager

use of org.neo4j.kernel.impl.ha.ClusterManager in project neo4j by neo4j.

the class LabelScanStoreHaIT method setUp.

@Before
public void setUp() {
    KernelExtensionFactory<?> testExtension = labelScanStoreExtension(monitor);
    TestHighlyAvailableGraphDatabaseFactory factory = new TestHighlyAvailableGraphDatabaseFactory();
    factory.removeKernelExtensions(extension -> extension.getClass().getName().contains("LabelScan"));
    factory.addKernelExtension(testExtension);
    ClusterManager clusterManager = new ClusterManager.Builder(testDirectory.directory("root")).withDbFactory(factory).withSharedSetting(GraphDatabaseSettings.label_index, labelIndexSettingName()).withStoreDirInitializer((serverId, storeDir) -> {
        if (serverId == 1) {
            GraphDatabaseService db = new TestGraphDatabaseFactory().addKernelExtension(testExtension).newEmbeddedDatabaseBuilder(storeDir.getAbsoluteFile()).setConfig(GraphDatabaseSettings.label_index, labelIndexSettingName()).newGraphDatabase();
            try {
                createSomeLabeledNodes(db, new Label[] { Labels.First }, new Label[] { Labels.First, Labels.Second }, new Label[] { Labels.Second });
            } finally {
                db.shutdown();
            }
        }
    }).build();
    life.add(clusterManager);
    life.start();
    cluster = clusterManager.getCluster();
    cluster.await(allSeesAllAsAvailable());
    cluster.await(allAvailabilityGuardsReleased());
}
Also used : Label(org.neo4j.graphdb.Label) TestDirectory(org.neo4j.test.rule.TestDirectory) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) LabelScanStore(org.neo4j.kernel.api.labelscan.LabelScanStore) ManagedCluster(org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster) LifeSupport(org.neo4j.kernel.lifecycle.LifeSupport) TestGraphDatabaseFactory(org.neo4j.test.TestGraphDatabaseFactory) Iterators.count(org.neo4j.helpers.collection.Iterators.count) TestHighlyAvailableGraphDatabaseFactory(org.neo4j.graphdb.factory.TestHighlyAvailableGraphDatabaseFactory) Rule(org.junit.Rule) GraphDatabaseService(org.neo4j.graphdb.GraphDatabaseService) ClusterManager.allSeesAllAsAvailable(org.neo4j.kernel.impl.ha.ClusterManager.allSeesAllAsAvailable) After(org.junit.After) KernelExtensionFactory(org.neo4j.kernel.extension.KernelExtensionFactory) ClusterManager.allAvailabilityGuardsReleased(org.neo4j.kernel.impl.ha.ClusterManager.allAvailabilityGuardsReleased) GraphDatabaseSettings(org.neo4j.graphdb.factory.GraphDatabaseSettings) Transaction(org.neo4j.graphdb.Transaction) ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) Assert.assertEquals(org.junit.Assert.assertEquals) Before(org.junit.Before) GraphDatabaseService(org.neo4j.graphdb.GraphDatabaseService) TestHighlyAvailableGraphDatabaseFactory(org.neo4j.graphdb.factory.TestHighlyAvailableGraphDatabaseFactory) TestGraphDatabaseFactory(org.neo4j.test.TestGraphDatabaseFactory) Label(org.neo4j.graphdb.Label) ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) Before(org.junit.Before)

Example 9 with ClusterManager

use of org.neo4j.kernel.impl.ha.ClusterManager in project neo4j by neo4j.

the class ClusterPartitionIT method losingQuorumAbruptlyShouldMakeAllInstancesPendingAndReadOnly.

@Test
public void losingQuorumAbruptlyShouldMakeAllInstancesPendingAndReadOnly() throws Throwable {
    // we need 5 to differentiate between all other instances gone and just quorum being gone
    int clusterSize = 5;
    assumeTrue(TestRunConditions.shouldRunAtClusterSize(clusterSize));
    ClusterManager manager = new ClusterManager.Builder().withRootDirectory(dir.cleanDirectory("testcluster")).withCluster(ClusterManager.clusterOfSize(clusterSize)).withSharedConfig(config()).build();
    try {
        manager.start();
        ClusterManager.ManagedCluster cluster = manager.getCluster();
        cluster.await(allSeesAllAsAvailable());
        cluster.await(masterAvailable());
        HighlyAvailableGraphDatabase master = cluster.getMaster();
        addSomeData(master);
        /*
             * we need 3 failures. We'll end up with the old master and a slave connected. They should both be in
             * PENDING state, allowing reads but not writes. Repairing just one of the removed instances should
             * result in a master being elected and all instances being read and writable.
             * The instances we remove do not need additional verification for their state. Their behaviour is already
             * known by other tests.
             */
        HighlyAvailableGraphDatabase failed1;
        ClusterManager.RepairKit rk1;
        HighlyAvailableGraphDatabase failed2;
        HighlyAvailableGraphDatabase failed3;
        HighlyAvailableGraphDatabase remainingSlave;
        failed1 = cluster.getAnySlave();
        failed2 = cluster.getAnySlave(failed1);
        failed3 = cluster.getAnySlave(failed1, failed2);
        remainingSlave = cluster.getAnySlave(failed1, failed2, failed3);
        CountDownLatch masterTransitionLatch = new CountDownLatch(1);
        CountDownLatch slaveTransitionLatch = new CountDownLatch(1);
        setupForWaitOnSwitchToDetached(master, masterTransitionLatch);
        setupForWaitOnSwitchToDetached(remainingSlave, slaveTransitionLatch);
        rk1 = killAbruptly(cluster, failed1, failed2, failed3);
        cluster.await(memberSeesOtherMemberAsFailed(remainingSlave, failed1));
        cluster.await(memberSeesOtherMemberAsFailed(remainingSlave, failed2));
        cluster.await(memberSeesOtherMemberAsFailed(remainingSlave, failed3));
        cluster.await(memberSeesOtherMemberAsFailed(master, failed1));
        cluster.await(memberSeesOtherMemberAsFailed(master, failed2));
        cluster.await(memberSeesOtherMemberAsFailed(master, failed3));
        masterTransitionLatch.await();
        slaveTransitionLatch.await();
        ensureInstanceIsReadOnlyInPendingState(master);
        ensureInstanceIsReadOnlyInPendingState(remainingSlave);
        rk1.repair();
        cluster.await(masterAvailable(failed2, failed3));
        cluster.await(masterSeesSlavesAsAvailable(2));
        ensureInstanceIsWritable(master);
        ensureInstanceIsWritable(remainingSlave);
        ensureInstanceIsWritable(failed1);
    } finally {
        manager.shutdown();
    }
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) Test(org.junit.Test)

Example 10 with ClusterManager

use of org.neo4j.kernel.impl.ha.ClusterManager in project neo4j by neo4j.

the class ClusterPartitionIT method isolatedSlaveShouldRemoveSelfFromClusterAndBecomeReadOnly.

@Test
public void isolatedSlaveShouldRemoveSelfFromClusterAndBecomeReadOnly() throws Throwable {
    int clusterSize = 3;
    ClusterManager manager = new ClusterManager.Builder().withRootDirectory(dir.cleanDirectory("testcluster")).withCluster(ClusterManager.clusterOfSize(clusterSize)).build();
    try {
        manager.start();
        ClusterManager.ManagedCluster cluster = manager.getCluster();
        cluster.await(allSeesAllAsAvailable());
        cluster.await(masterAvailable());
        HighlyAvailableGraphDatabase slave = cluster.getAnySlave();
        CountDownLatch slaveTransitionLatch = new CountDownLatch(1);
        setupForWaitOnSwitchToDetached(slave, slaveTransitionLatch);
        addSomeData(slave);
        ClusterManager.RepairKit fail = cluster.fail(slave, NetworkFlag.values());
        cluster.await(instanceEvicted(slave), 20);
        slaveTransitionLatch.await();
        ensureInstanceIsReadOnlyInPendingState(slave);
        fail.repair();
        cluster.await(allSeesAllAsAvailable());
        ensureInstanceIsWritable(slave);
    } finally {
        manager.safeShutdown();
    }
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) ClusterManager(org.neo4j.kernel.impl.ha.ClusterManager) Test(org.junit.Test)

Aggregations

ClusterManager (org.neo4j.kernel.impl.ha.ClusterManager)17 Test (org.junit.Test)14 File (java.io.File)5 HighlyAvailableGraphDatabase (org.neo4j.kernel.ha.HighlyAvailableGraphDatabase)5 CountDownLatch (java.util.concurrent.CountDownLatch)4 Transaction (org.neo4j.graphdb.Transaction)3 ManagedCluster (org.neo4j.kernel.impl.ha.ClusterManager.ManagedCluster)3 RepairKit (org.neo4j.kernel.impl.ha.ClusterManager.RepairKit)3 IOException (java.io.IOException)2 GraphDatabaseService (org.neo4j.graphdb.GraphDatabaseService)2 LifeSupport (org.neo4j.kernel.lifecycle.LifeSupport)2 ArrayList (java.util.ArrayList)1 Map (java.util.Map)1 After (org.junit.After)1 Assert.assertEquals (org.junit.Assert.assertEquals)1 Assert.assertTrue (org.junit.Assert.assertTrue)1 Before (org.junit.Before)1 Rule (org.junit.Rule)1 Label (org.neo4j.graphdb.Label)1 TransactionTerminatedException (org.neo4j.graphdb.TransactionTerminatedException)1