use of org.neo4j.graphdb.factory.TestHighlyAvailableGraphDatabaseFactory in project neo4j by neo4j.
the class TestInstanceJoin method start.
private static HighlyAvailableGraphDatabase start(File storeDir, int i, Map<String, String> additionalConfig) {
HighlyAvailableGraphDatabase db = (HighlyAvailableGraphDatabase) new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(storeDir).setConfig(ClusterSettings.cluster_server, "127.0.0.1:" + (5001 + i)).setConfig(ClusterSettings.server_id, i + "").setConfig(HaSettings.ha_server, "127.0.0.1:" + (6666 + i)).setConfig(HaSettings.pull_interval, "0ms").setConfig(additionalConfig).newGraphDatabase();
awaitStart(db);
return db;
}
use of org.neo4j.graphdb.factory.TestHighlyAvailableGraphDatabaseFactory in project neo4j by neo4j.
the class ClusterTest method testInstancesWithConflictingClusterPorts.
@Test
public void testInstancesWithConflictingClusterPorts() throws Throwable {
HighlyAvailableGraphDatabase first = null;
try {
File masterStoreDir = testDirectory.directory("testConflictingClusterPortsMaster");
first = (HighlyAvailableGraphDatabase) new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(masterStoreDir).setConfig(ClusterSettings.initial_hosts, "127.0.0.1:5001").setConfig(ClusterSettings.cluster_server, "127.0.0.1:5001").setConfig(ClusterSettings.server_id, "1").setConfig(HaSettings.ha_server, "127.0.0.1:6666").newGraphDatabase();
try {
File slaveStoreDir = testDirectory.directory("testConflictingClusterPortsSlave");
HighlyAvailableGraphDatabase failed = (HighlyAvailableGraphDatabase) new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(slaveStoreDir).setConfig(ClusterSettings.initial_hosts, "127.0.0.1:5001").setConfig(ClusterSettings.cluster_server, "127.0.0.1:5001").setConfig(ClusterSettings.server_id, "2").setConfig(HaSettings.ha_server, "127.0.0.1:6667").newGraphDatabase();
failed.shutdown();
fail("Should not start when ports conflict");
} catch (Exception e) {
// good
}
} finally {
if (first != null) {
first.shutdown();
}
}
}
use of org.neo4j.graphdb.factory.TestHighlyAvailableGraphDatabaseFactory in project neo4j by neo4j.
the class RollingUpgradeIT method rollOver.
private void rollOver(LegacyDatabase legacyDb, int i, int masterServerId, int authorativeSlaveId) throws Exception {
String storeDir = legacyDb.getStoreDir();
if (i == 0) {
storeDir += "new";
}
stop(i);
File storeDirFile = new File(storeDir);
debug("Starting " + i + " as current version");
switch(authorativeSlaveId) {
case -1:
break;
case -2:
debug("At last master starting, deleting store so that it fetches from the new master");
FileUtils.deleteRecursively(storeDirFile);
break;
default:
debug("Consecutive slave starting, making it so that I will copy store from " + authorativeSlaveId);
FileUtils.deleteRecursively(storeDirFile);
storeDirFile.mkdirs();
backup(authorativeSlaveId, storeDirFile);
break;
}
startStandaloneDbToRunUpgrade(storeDirFile, i);
// start that db up in this JVM
newDbs[i] = (GraphDatabaseAPI) new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(storeDirFile).setConfig(config(i)).newGraphDatabase();
debug("Started " + i + " as current version");
legacyDbs[i] = null;
// issue transaction and see that it propagates
if (i != masterServerId) {
// if the instance is not the old master, create on the old master
legacyDbs[masterServerId].doComplexLoad(centralNode);
debug("Node created on " + i);
} else {
doComplexLoad(newDbs[1], centralNode);
}
for (int j = 0; j < legacyDbs.length; j++) {
if (legacyDbs[j] != null) {
legacyDbs[j].verifyComplexLoad(centralNode);
debug("Verified on legacy db " + j);
}
}
for (int j = 0; j < newDbs.length; j++) {
if (newDbs[j] != null) {
assertTrue("Rolled over database " + j + " not available within 1 minute", newDbs[i].isAvailable(MINUTES.toMillis(1)));
verifyComplexLoad(newDbs[j], centralNode);
debug("Verified on new db " + j);
}
}
}
use of org.neo4j.graphdb.factory.TestHighlyAvailableGraphDatabaseFactory in project neo4j by neo4j.
the class TestBranchedData method migrationOfBranchedDataDirectories.
@Test
public void migrationOfBranchedDataDirectories() throws Exception {
long[] timestamps = new long[3];
for (int i = 0; i < timestamps.length; i++) {
startDbAndCreateNode();
timestamps[i] = moveAwayToLookLikeOldBranchedDirectory();
// To make sure we get different timestamps
Thread.sleep(1);
}
File dir = directory.directory();
new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(dir).setConfig(ClusterSettings.server_id, "1").setConfig(ClusterSettings.initial_hosts, "localhost:5001").newGraphDatabase().shutdown();
// It should have migrated those to the new location. Verify that.
for (long timestamp : timestamps) {
assertFalse("directory branched-" + timestamp + " still exists.", new File(dir, "branched-" + timestamp).exists());
assertTrue("directory " + timestamp + " is not there", StoreUtil.getBranchedDataDirectory(dir, timestamp).exists());
}
}
use of org.neo4j.graphdb.factory.TestHighlyAvailableGraphDatabaseFactory in project neo4j by neo4j.
the class TestPullUpdates method shouldPullUpdatesOnStartupNoMatterWhat.
@Test
public void shouldPullUpdatesOnStartupNoMatterWhat() throws Exception {
HighlyAvailableGraphDatabase slave = null;
HighlyAvailableGraphDatabase master = null;
try {
File testRootDir = clusterRule.cleanDirectory("shouldPullUpdatesOnStartupNoMatterWhat");
File masterDir = new File(testRootDir, "master");
master = (HighlyAvailableGraphDatabase) new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(masterDir).setConfig(ClusterSettings.server_id, "1").setConfig(ClusterSettings.initial_hosts, "localhost:5001").newGraphDatabase();
// Copy the store, then shutdown, so update pulling later makes sense
File slaveDir = new File(testRootDir, "slave");
slave = (HighlyAvailableGraphDatabase) new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(slaveDir).setConfig(ClusterSettings.server_id, "2").setConfig(ClusterSettings.initial_hosts, "localhost:5001").newGraphDatabase();
// Required to block until the slave has left for sure
final CountDownLatch slaveLeftLatch = new CountDownLatch(1);
final ClusterClient masterClusterClient = master.getDependencyResolver().resolveDependency(ClusterClient.class);
masterClusterClient.addClusterListener(new ClusterListener.Adapter() {
@Override
public void leftCluster(InstanceId instanceId, URI member) {
slaveLeftLatch.countDown();
masterClusterClient.removeClusterListener(this);
}
});
master.getDependencyResolver().resolveDependency(LogService.class).getInternalLog(getClass()).info("SHUTTING DOWN SLAVE");
slave.shutdown();
slave = null;
// Make sure that the slave has left, because shutdown() may return before the master knows
assertTrue("Timeout waiting for slave to leave", slaveLeftLatch.await(60, TimeUnit.SECONDS));
long nodeId;
try (Transaction tx = master.beginTx()) {
Node node = master.createNode();
node.setProperty("from", "master");
nodeId = node.getId();
tx.success();
}
// Store is already in place, should pull updates
slave = (HighlyAvailableGraphDatabase) new TestHighlyAvailableGraphDatabaseFactory().newEmbeddedDatabaseBuilder(slaveDir).setConfig(ClusterSettings.server_id, "2").setConfig(ClusterSettings.initial_hosts, "localhost:5001").setConfig(HaSettings.pull_interval, // no pull updates, should pull on startup
"0").newGraphDatabase();
// Make sure switch to slave completes and so does the update pulling on startup
slave.beginTx().close();
try (Transaction tx = slave.beginTx()) {
assertEquals("master", slave.getNodeById(nodeId).getProperty("from"));
tx.success();
}
} finally {
if (slave != null) {
slave.shutdown();
}
if (master != null) {
master.shutdown();
}
}
}
Aggregations