use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class StoreCopyClient method writeTransactionsToActiveLogFile.
private void writeTransactionsToActiveLogFile(File tempStoreDir, Response<?> response) throws Exception {
LifeSupport life = new LifeSupport();
try {
// Start the log and appender
PhysicalLogFiles logFiles = new PhysicalLogFiles(tempStoreDir, fs);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository(pageCache, tempStoreDir);
ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore(pageCache, tempStoreDir);
LogFile logFile = life.add(new PhysicalLogFile(fs, logFiles, Long.MAX_VALUE, /*don't rotate*/
readOnlyTransactionIdStore::getLastCommittedTransactionId, logVersionRepository, new Monitors().newMonitor(PhysicalLogFile.Monitor.class), logHeaderCache));
life.start();
// Just write all transactions to the active log version. Remember that this is after a store copy
// where there are no logs, and the transaction stream we're about to write will probably contain
// transactions that goes some time back, before the last committed transaction id. So we cannot
// use a TransactionAppender, since it has checks for which transactions one can append.
FlushableChannel channel = logFile.getWriter();
final TransactionLogWriter writer = new TransactionLogWriter(new LogEntryWriter(channel));
final AtomicLong firstTxId = new AtomicLong(BASE_TX_ID);
response.accept(new Response.Handler() {
@Override
public void obligation(long txId) throws IOException {
throw new UnsupportedOperationException("Shouldn't be called");
}
@Override
public Visitor<CommittedTransactionRepresentation, Exception> transactions() {
return transaction -> {
long txId = transaction.getCommitEntry().getTxId();
if (firstTxId.compareAndSet(BASE_TX_ID, txId)) {
monitor.startReceivingTransactions(txId);
}
writer.append(transaction.getTransactionRepresentation(), txId);
return false;
};
}
});
long endTxId = firstTxId.get();
if (endTxId != BASE_TX_ID) {
monitor.finishReceivingTransactions(endTxId);
}
long currentLogVersion = logVersionRepository.getCurrentLogVersion();
writer.checkPoint(new LogPosition(currentLogVersion, LOG_HEADER_SIZE));
// And since we write this manually we need to set the correct transaction id in the
// header of the log that we just wrote.
File currentLogFile = logFiles.getLogFileForVersion(currentLogVersion);
writeLogHeader(fs, currentLogFile, currentLogVersion, max(BASE_TX_ID, endTxId - 1));
if (!forensics) {
// since we just create new log and put checkpoint into it with offset equals to
// LOG_HEADER_SIZE we need to update last transaction offset to be equal to this newly defined max
// offset otherwise next checkpoint that use last transaction offset will be created for non
// existing offset that is in most of the cases bigger than new log size.
// Recovery will treat that as last checkpoint and will not try to recover store till new
// last closed transaction offset will not overcome old one. Till that happens it will be
// impossible for recovery process to restore the store
File neoStore = new File(tempStoreDir, MetaDataStore.DEFAULT_NAME);
MetaDataStore.setRecord(pageCache, neoStore, MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET, LOG_HEADER_SIZE);
}
} finally {
life.shutdown();
}
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class SwitchToSlave method copyStoreFromMasterIfNeeded.
private void copyStoreFromMasterIfNeeded(URI masterUri, URI me, CancellationRequest cancellationRequest) throws Throwable {
if (!isStorePresent(pageCache, storeDir)) {
boolean success = false;
monitor.storeCopyStarted();
LifeSupport copyLife = new LifeSupport();
try {
MasterClient masterClient = newMasterClient(masterUri, me, null, copyLife);
copyLife.start();
boolean masterIsOld = MasterClient.CURRENT.compareTo(masterClient.getProtocolVersion()) > 0;
if (masterIsOld) {
throw new UnableToCopyStoreFromOldMasterException(MasterClient.CURRENT.getApplicationProtocol(), masterClient.getProtocolVersion().getApplicationProtocol());
} else {
copyStoreFromMaster(masterClient, cancellationRequest, MoveAfterCopy.moveReplaceExisting());
success = true;
}
} finally {
monitor.storeCopyCompleted(success);
copyLife.shutdown();
}
}
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class SwitchToSlaveCopyThenBranch method copyStore.
private void copyStore(URI masterUri, URI me, CancellationRequest cancellationRequest, MoveAfterCopy moveAfterCopy) throws Throwable {
boolean success = false;
monitor.storeCopyStarted();
LifeSupport copyLife = new LifeSupport();
try {
MasterClient masterClient = newMasterClient(masterUri, me, null, copyLife);
copyLife.start();
boolean masterIsOld = MasterClient.CURRENT.compareTo(masterClient.getProtocolVersion()) > 0;
if (masterIsOld) {
throw new UnableToCopyStoreFromOldMasterException(MasterClient.CURRENT.getApplicationProtocol(), masterClient.getProtocolVersion().getApplicationProtocol());
} else {
copyStoreFromMaster(masterClient, cancellationRequest, moveAfterCopy);
success = true;
}
} finally {
monitor.storeCopyCompleted(success);
copyLife.shutdown();
}
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class MultiPaxosServer method start.
public void start() throws IOException {
broadcastSerializer = new AtomicBroadcastSerializer(new ObjectStreamFactory(), new ObjectStreamFactory());
final LifeSupport life = new LifeSupport();
try {
MessageTimeoutStrategy timeoutStrategy = new MessageTimeoutStrategy(new FixedTimeoutStrategy(5000)).timeout(HeartbeatMessage.sendHeartbeat, 200);
Monitors monitors = new Monitors();
NetworkedServerFactory serverFactory = new NetworkedServerFactory(life, new MultiPaxosServerFactory(new ClusterConfiguration("default", NullLogProvider.getInstance()), NullLogProvider.getInstance(), monitors.newMonitor(StateMachines.Monitor.class)), timeoutStrategy, NullLogProvider.getInstance(), new ObjectStreamFactory(), new ObjectStreamFactory(), monitors.newMonitor(NetworkReceiver.Monitor.class), monitors.newMonitor(NetworkSender.Monitor.class), monitors.newMonitor(NamedThreadFactory.Monitor.class));
ServerIdElectionCredentialsProvider electionCredentialsProvider = new ServerIdElectionCredentialsProvider();
server = serverFactory.newNetworkedServer(Config.embeddedDefaults(), new InMemoryAcceptorInstanceStore(), electionCredentialsProvider);
server.addBindingListener(electionCredentialsProvider);
server.addBindingListener(new BindingListener() {
@Override
public void listeningAt(URI me) {
System.out.println("Listening at:" + me);
}
});
cluster = server.newClient(Cluster.class);
cluster.addClusterListener(new ClusterListener() {
@Override
public void enteredCluster(ClusterConfiguration clusterConfiguration) {
System.out.println("Entered cluster:" + clusterConfiguration);
}
@Override
public void joinedCluster(InstanceId instanceId, URI member) {
System.out.println("Joined cluster:" + instanceId + " (at URI " + member + ")");
}
@Override
public void leftCluster(InstanceId instanceId, URI member) {
System.out.println("Left cluster:" + instanceId);
}
@Override
public void leftCluster() {
System.out.println("Left cluster");
}
@Override
public void elected(String role, InstanceId instanceId, URI electedMember) {
System.out.println(instanceId + " at URI " + electedMember + " was elected as " + role);
}
@Override
public void unelected(String role, InstanceId instanceId, URI electedMember) {
System.out.println(instanceId + " at URI " + electedMember + " was removed from " + role);
}
});
Heartbeat heartbeat = server.newClient(Heartbeat.class);
heartbeat.addHeartbeatListener(new HeartbeatListener() {
@Override
public void failed(InstanceId server) {
System.out.println(server + " failed");
}
@Override
public void alive(InstanceId server) {
System.out.println(server + " alive");
}
});
broadcast = server.newClient(AtomicBroadcast.class);
broadcast.addAtomicBroadcastListener(new AtomicBroadcastListener() {
@Override
public void receive(Payload value) {
try {
System.out.println(broadcastSerializer.receive(value));
} catch (IOException e) {
e.printStackTrace();
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
});
life.start();
String command;
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
while (!(command = reader.readLine()).equals("quit")) {
String[] arguments = command.split(" ");
Method method = getCommandMethod(arguments[0]);
if (method != null) {
String[] realArgs = new String[arguments.length - 1];
System.arraycopy(arguments, 1, realArgs, 0, realArgs.length);
try {
method.invoke(this, (Object[]) realArgs);
} catch (IllegalAccessException e) {
e.printStackTrace();
} catch (IllegalArgumentException e) {
e.printStackTrace();
} catch (InvocationTargetException e) {
e.printStackTrace();
}
}
}
cluster.leave();
} finally {
life.shutdown();
System.out.println("Done");
}
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class NetworkSenderReceiverTest method shouldSendAMessageFromAClientWhichIsReceivedByAServer.
@Test
public void shouldSendAMessageFromAClientWhichIsReceivedByAServer() throws Exception {
// given
CountDownLatch latch = new CountDownLatch(1);
LifeSupport life = new LifeSupport();
Server server1 = new Server(latch, MapUtil.stringMap(ClusterSettings.cluster_server.name(), "localhost:1234", ClusterSettings.server_id.name(), "1", ClusterSettings.initial_hosts.name(), "localhost:1234,localhost:1235"));
life.add(server1);
Server server2 = new Server(latch, MapUtil.stringMap(ClusterSettings.cluster_server.name(), "localhost:1235", ClusterSettings.server_id.name(), "2", ClusterSettings.initial_hosts.name(), "localhost:1234,localhost:1235"));
life.add(server2);
life.start();
// when
server1.process(Message.to(TestMessage.helloWorld, URI.create("cluster://127.0.0.1:1235"), "Hello World"));
// then
assertTrue(latch.await(5, TimeUnit.SECONDS));
assertTrue("server1 should have processed the message", server1.processedMessage());
assertTrue("server2 should have processed the message", server2.processedMessage());
life.shutdown();
}
Aggregations