use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class RecoveryTest method shouldRecoverExistingData.
@Test
public void shouldRecoverExistingData() throws Exception {
final PhysicalLogFiles logFiles = new PhysicalLogFiles(directory.directory(), "log", fileSystemRule.get());
File file = logFiles.getLogFileForVersion(logVersion);
writeSomeData(file, new Visitor<Pair<LogEntryWriter, Consumer<LogPositionMarker>>, IOException>() {
@Override
public boolean visit(Pair<LogEntryWriter, Consumer<LogPositionMarker>> pair) throws IOException {
LogEntryWriter writer = pair.first();
Consumer<LogPositionMarker> consumer = pair.other();
LogPositionMarker marker = new LogPositionMarker();
// last committed tx
consumer.accept(marker);
LogPosition lastCommittedTxPosition = marker.newPosition();
writer.writeStartEntry(0, 1, 2L, 3L, new byte[0]);
lastCommittedTxStartEntry = new LogEntryStart(0, 1, 2L, 3L, new byte[0], lastCommittedTxPosition);
writer.writeCommitEntry(4L, 5L);
lastCommittedTxCommitEntry = new OnePhaseCommit(4L, 5L);
// check point pointing to the previously committed transaction
writer.writeCheckPointEntry(lastCommittedTxPosition);
expectedCheckPointEntry = new CheckPoint(lastCommittedTxPosition);
// tx committed after checkpoint
consumer.accept(marker);
writer.writeStartEntry(0, 1, 6L, 4L, new byte[0]);
expectedStartEntry = new LogEntryStart(0, 1, 6L, 4L, new byte[0], marker.newPosition());
writer.writeCommitEntry(5L, 7L);
expectedCommitEntry = new OnePhaseCommit(5L, 7L);
return true;
}
});
LifeSupport life = new LifeSupport();
Recovery.Monitor monitor = mock(Recovery.Monitor.class);
final AtomicBoolean recoveryRequired = new AtomicBoolean();
try {
StorageEngine storageEngine = mock(StorageEngine.class);
final LogEntryReader<ReadableClosablePositionAwareChannel> reader = new VersionAwareLogEntryReader<>();
LatestCheckPointFinder finder = new LatestCheckPointFinder(logFiles, fileSystemRule.get(), reader);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
TransactionMetadataCache metadataCache = new TransactionMetadataCache(100);
LogFile logFile = life.add(new PhysicalLogFile(fileSystemRule.get(), logFiles, 50, () -> transactionIdStore.getLastCommittedTransactionId(), logVersionRepository, mock(PhysicalLogFile.Monitor.class), logHeaderCache));
LogicalTransactionStore txStore = new PhysicalLogicalTransactionStore(logFile, metadataCache, reader);
life.add(new Recovery(new DefaultRecoverySPI(storageEngine, logFiles, fileSystemRule.get(), logVersionRepository, finder, transactionIdStore, txStore, NO_MONITOR) {
private int nr = 0;
@Override
public Visitor<CommittedTransactionRepresentation, Exception> startRecovery() {
recoveryRequired.set(true);
final Visitor<CommittedTransactionRepresentation, Exception> actual = super.startRecovery();
return new Visitor<CommittedTransactionRepresentation, Exception>() {
@Override
public boolean visit(CommittedTransactionRepresentation tx) throws Exception {
actual.visit(tx);
switch(nr++) {
case 0:
assertEquals(lastCommittedTxStartEntry, tx.getStartEntry());
assertEquals(lastCommittedTxCommitEntry, tx.getCommitEntry());
break;
case 1:
assertEquals(expectedStartEntry, tx.getStartEntry());
assertEquals(expectedCommitEntry, tx.getCommitEntry());
break;
default:
fail("Too many recovered transactions");
}
return false;
}
};
}
}, monitor));
life.start();
InOrder order = inOrder(monitor);
order.verify(monitor, times(1)).recoveryRequired(any(LogPosition.class));
order.verify(monitor, times(1)).recoveryCompleted(2);
assertTrue(recoveryRequired.get());
} finally {
life.shutdown();
}
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class ImportTool method doImport.
public static void doImport(PrintStream out, PrintStream err, File storeDir, File logsDir, File badFile, FileSystemAbstraction fs, Collection<Option<File[]>> nodesFiles, Collection<Option<File[]>> relationshipsFiles, boolean enableStacktrace, Input input, Config dbConfig, OutputStream badOutput, org.neo4j.unsafe.impl.batchimport.Configuration configuration) throws IOException {
boolean success;
LifeSupport life = new LifeSupport();
LogService logService = life.add(StoreLogService.inLogsDirectory(fs, logsDir));
life.start();
//TODO: add file watcher here?
BatchImporter importer = new ParallelBatchImporter(storeDir, fs, configuration, logService, ExecutionMonitors.defaultVisible(), dbConfig);
printOverview(storeDir, nodesFiles, relationshipsFiles, configuration, out);
success = false;
try {
importer.doImport(input);
success = true;
} catch (Exception e) {
throw andPrintError("Import error", e, enableStacktrace, err);
} finally {
Collector collector = input.badCollector();
int numberOfBadEntries = collector.badEntries();
collector.close();
badOutput.close();
if (numberOfBadEntries > 0) {
out.println("There were bad entries which were skipped and logged into " + badFile.getAbsolutePath());
}
life.shutdown();
if (!success) {
try {
StoreFile.fileOperation(FileOperation.DELETE, fs, storeDir, null, Iterables.<StoreFile, StoreFile>iterable(StoreFile.values()), false, ExistingTargetStrategy.FAIL, StoreFileType.values());
} catch (IOException e) {
err.println("Unable to delete store files after an aborted import " + e);
if (enableStacktrace) {
e.printStackTrace();
}
}
}
}
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class NeoStoreDataSource method start.
@Override
public void start() throws IOException {
dependencies = new Dependencies();
life = new LifeSupport();
schemaIndexProvider = dependencyResolver.resolveDependency(SchemaIndexProvider.class, HighestSelectionStrategy.getInstance());
labelScanStoreProvider = dependencyResolver.resolveDependency(LabelScanStoreProvider.class, new NamedLabelScanStoreSelectionStrategy(config));
dependencyResolver.resolveDependency(LabelScanStoreProvider.class, new DeleteStoresFromOtherLabelScanStoreProviders(labelScanStoreProvider));
IndexConfigStore indexConfigStore = new IndexConfigStore(storeDir, fs);
dependencies.satisfyDependency(lockService);
dependencies.satisfyDependency(indexConfigStore);
life.add(indexConfigStore);
// Monitor listeners
LoggingLogFileMonitor loggingLogMonitor = new LoggingLogFileMonitor(msgLog);
monitors.addMonitorListener(loggingLogMonitor);
life.add(new Delegate(Lifecycles.multiple(indexProviders.values())));
// Upgrade the store before we begin
RecordFormats formats = selectStoreFormats(config, storeDir, fs, pageCache, logService);
upgradeStore(formats);
// Build all modules and their services
StorageEngine storageEngine = null;
try {
UpdateableSchemaState updateableSchemaState = new KernelSchemaStateStore(logProvider);
SynchronizedArrayIdOrderingQueue legacyIndexTransactionOrdering = new SynchronizedArrayIdOrderingQueue(20);
storageEngine = buildStorageEngine(propertyKeyTokenHolder, labelTokens, relationshipTypeTokens, legacyIndexProviderLookup, indexConfigStore, updateableSchemaState::clear, legacyIndexTransactionOrdering);
LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader = new VersionAwareLogEntryReader<>(storageEngine.commandReaderFactory());
TransactionIdStore transactionIdStore = dependencies.resolveDependency(TransactionIdStore.class);
LogVersionRepository logVersionRepository = dependencies.resolveDependency(LogVersionRepository.class);
NeoStoreTransactionLogModule transactionLogModule = buildTransactionLogs(storeDir, config, logProvider, scheduler, fs, storageEngine, logEntryReader, legacyIndexTransactionOrdering, transactionIdStore, logVersionRepository);
transactionLogModule.satisfyDependencies(dependencies);
buildRecovery(fs, transactionIdStore, logVersionRepository, monitors.newMonitor(Recovery.Monitor.class), monitors.newMonitor(PositionToRecoverFrom.Monitor.class), transactionLogModule.logFiles(), startupStatistics, storageEngine, logEntryReader, transactionLogModule.logicalTransactionStore());
// At the time of writing this comes from the storage engine (IndexStoreView)
PropertyAccessor propertyAccessor = dependencies.resolveDependency(PropertyAccessor.class);
final NeoStoreKernelModule kernelModule = buildKernel(transactionLogModule.transactionAppender(), dependencies.resolveDependency(IndexingService.class), storageEngine.storeReadLayer(), updateableSchemaState, dependencies.resolveDependency(LabelScanStore.class), storageEngine, indexConfigStore, transactionIdStore, availabilityGuard, clock, propertyAccessor);
kernelModule.satisfyDependencies(dependencies);
// Do these assignments last so that we can ensure no cyclical dependencies exist
this.storageEngine = storageEngine;
this.transactionLogModule = transactionLogModule;
this.kernelModule = kernelModule;
dependencies.satisfyDependency(this);
dependencies.satisfyDependency(updateableSchemaState);
dependencies.satisfyDependency(storageEngine.storeReadLayer());
dependencies.satisfyDependency(logEntryReader);
dependencies.satisfyDependency(storageEngine);
} catch (Throwable e) {
// Something unexpected happened during startup
msgLog.warn("Exception occurred while setting up store modules. Attempting to close things down.", e);
try {
// Close the neostore, so that locks are released properly
if (storageEngine != null) {
storageEngine.forceClose();
}
} catch (Exception closeException) {
msgLog.error("Couldn't close neostore after startup failure", closeException);
}
throw Exceptions.launderedException(e);
}
// NOTE: please make sure this is performed after having added everything to the life, in fact we would like
// to perform the checkpointing as first step when the life is shutdown.
life.add(lifecycleToTriggerCheckPointOnShutdown());
try {
life.start();
} catch (Throwable e) {
// Something unexpected happened during startup
msgLog.warn("Exception occurred while starting the datasource. Attempting to close things down.", e);
try {
life.shutdown();
// Close the neostore, so that locks are released properly
storageEngine.forceClose();
} catch (Exception closeException) {
msgLog.error("Couldn't close neostore after startup failure", closeException);
}
throw Exceptions.launderedException(e);
}
/*
* At this point recovery has completed and the datasource is ready for use. Whatever panic might have
* happened before has been healed. So we can safely set the kernel health to ok.
* This right now has any real effect only in the case of internal restarts (for example, after a store copy
* in the case of HA). Standalone instances will have to be restarted by the user, as is proper for all
* kernel panics.
*/
databaseHealth.healed();
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class SwitchToSlave method executeConsistencyChecks.
private boolean executeConsistencyChecks(TransactionIdStore txIdStore, URI masterUri, URI me, StoreId storeId, CancellationRequest cancellationRequest) throws Throwable {
LifeSupport consistencyCheckLife = new LifeSupport();
try {
MasterClient masterClient = newMasterClient(masterUri, me, storeId, consistencyCheckLife);
consistencyCheckLife.start();
if (cancellationRequest.cancellationRequested()) {
return false;
}
checkDataConsistency(masterClient, txIdStore, storeId, masterUri, me, cancellationRequest);
} finally {
consistencyCheckLife.shutdown();
}
return true;
}
use of org.neo4j.kernel.lifecycle.LifeSupport in project neo4j by neo4j.
the class ArbiterBootstrapperIT method before.
@Before
public void before() throws Exception {
directory = testDirectory.directory("temp");
life = new LifeSupport();
// So that the clients get started as they are added
life.start();
clients = new ClusterClient[2];
for (int i = 1; i <= clients.length; i++) {
Map<String, String> config = stringMap();
config.put(cluster_server.name(), ":" + (5000 + i));
config.put(server_id.name(), "" + i);
config.put(initial_hosts.name(), ":5001");
LifeSupport moduleLife = new LifeSupport();
ClusterClientModule clusterClientModule = new ClusterClientModule(moduleLife, new Dependencies(), new Monitors(), Config.embeddedDefaults(config), NullLogService.getInstance(), new ServerIdElectionCredentialsProvider());
ClusterClient client = clusterClientModule.clusterClient;
CountDownLatch latch = new CountDownLatch(1);
client.addClusterListener(new ClusterListener.Adapter() {
@Override
public void enteredCluster(ClusterConfiguration configuration) {
latch.countDown();
client.removeClusterListener(this);
}
});
life.add(moduleLife);
clients[i - 1] = client;
assertTrue("Didn't join the cluster", latch.await(20, SECONDS));
}
}
Aggregations