use of org.neo4j.kernel.lifecycle.Lifecycle in project neo4j by neo4j.
the class HighAvailabilitySlavesTest method shouldSupportConcurrentConsumptionOfSlaves.
@Test
public void shouldSupportConcurrentConsumptionOfSlaves() throws Exception {
// Given
LogEntryReader<ReadableClosablePositionAwareChannel> logEntryReader = new VersionAwareLogEntryReader<>();
HighAvailabilitySlaves haSlaves = new HighAvailabilitySlaves(clusterMembersOfSize(1000), mock(Cluster.class), new DefaultSlaveFactory(NullLogProvider.getInstance(), new Monitors(), 42, Suppliers.singleton(logEntryReader)), new HostnamePort(null, 0));
// When
ExecutorService executor = Executors.newFixedThreadPool(5);
for (int i = 0; i < 5; i++) {
executor.submit(slavesConsumingRunnable(haSlaves));
}
executor.shutdown();
executor.awaitTermination(30, SECONDS);
// Then
int slavesCount = 0;
LifeSupport life = ReflectionUtil.getPrivateField(haSlaves, "life", LifeSupport.class);
for (Lifecycle lifecycle : life.getLifecycleInstances()) {
if (lifecycle instanceof Slave) {
slavesCount++;
}
}
// One instance is master
assertEquals("Unexpected number of slaves", 1000 - 1, slavesCount);
}
use of org.neo4j.kernel.lifecycle.Lifecycle in project neo4j by neo4j.
the class BoltKernelExtension method newInstance.
@Override
public Lifecycle newInstance(KernelContext context, Dependencies dependencies) throws Throwable {
Config config = dependencies.config();
GraphDatabaseService gdb = dependencies.db();
GraphDatabaseAPI api = (GraphDatabaseAPI) gdb;
LogService logService = dependencies.logService();
Clock clock = dependencies.clock();
Log log = logService.getInternalLog(WorkerFactory.class);
LifeSupport life = new LifeSupport();
JobScheduler scheduler = dependencies.scheduler();
InternalLoggerFactory.setDefaultFactory(new Netty4LoggerFactory(logService.getInternalLogProvider()));
Authentication authentication = authentication(dependencies.authManager(), dependencies.userManagerSupplier());
BoltFactory boltFactory = life.add(new BoltFactoryImpl(api, dependencies.usageData(), logService, dependencies.txBridge(), authentication, dependencies.sessionTracker(), config));
WorkerFactory workerFactory = createWorkerFactory(boltFactory, scheduler, dependencies, logService, clock);
List<ProtocolInitializer> connectors = config.enabledBoltConnectors().stream().map((connConfig) -> {
ListenSocketAddress listenAddress = config.get(connConfig.listen_address);
AdvertisedSocketAddress advertisedAddress = config.get(connConfig.advertised_address);
SslContext sslCtx;
boolean requireEncryption;
final BoltConnector.EncryptionLevel encryptionLevel = config.get(connConfig.encryption_level);
switch(encryptionLevel) {
case REQUIRED:
// Encrypted connections are mandatory, a self-signed certificate may be generated.
requireEncryption = true;
sslCtx = createSslContext(config, log, advertisedAddress);
break;
case OPTIONAL:
// Encrypted connections are optional, a self-signed certificate may be generated.
requireEncryption = false;
sslCtx = createSslContext(config, log, advertisedAddress);
break;
case DISABLED:
// Encryption is turned off, no self-signed certificate will be generated.
requireEncryption = false;
sslCtx = null;
break;
default:
// In the unlikely event that we happen to fall through to the default option here,
// there is a mismatch between the BoltConnector.EncryptionLevel enum and the options
// handled in this switch statement. In this case, we'll log a warning and default to
// disabling encryption, since this mirrors the functionality introduced in 3.0.
log.warn(format("Unhandled encryption level %s - assuming DISABLED.", encryptionLevel.name()));
requireEncryption = false;
sslCtx = null;
break;
}
final Map<Long, BiFunction<Channel, Boolean, BoltProtocol>> versions = newVersions(logService, workerFactory);
return new SocketTransport(listenAddress, sslCtx, requireEncryption, logService.getInternalLogProvider(), versions);
}).collect(toList());
if (connectors.size() > 0 && !config.get(GraphDatabaseSettings.disconnected)) {
life.add(new NettyServer(scheduler.threadFactory(boltNetworkIO), connectors));
log.info("Bolt Server extension loaded.");
for (ProtocolInitializer connector : connectors) {
logService.getUserLog(WorkerFactory.class).info("Bolt enabled on %s.", connector.address());
}
}
return life;
}
use of org.neo4j.kernel.lifecycle.Lifecycle in project neo4j by neo4j.
the class KernelExtensions method init.
@Override
public void init() throws Throwable {
for (KernelExtensionFactory kernelExtensionFactory : kernelExtensionFactories) {
Object kernelExtensionDependencies = getKernelExtensionDependencies(kernelExtensionFactory);
try {
Lifecycle dependency = kernelExtensionFactory.newInstance(kernelContext, kernelExtensionDependencies);
Objects.requireNonNull(dependency, kernelExtensionFactory.toString() + " returned a null " + "KernelExtension");
life.add(dependencies.satisfyDependency(dependency));
} catch (UnsatisfiedDependencyException e) {
unsatisfiedDepencyStrategy.handle(kernelExtensionFactory, e);
}
}
life.init();
}
use of org.neo4j.kernel.lifecycle.Lifecycle in project neo4j by neo4j.
the class AbstractExtensions method init.
@Override
public void init() {
for (ExtensionFactory<?> extensionFactory : extensionFactories) {
try {
Object extensionDependencies = getExtensionDependencies(extensionFactory);
Lifecycle dependency = newInstance(extensionContext, extensionFactory, extensionDependencies);
Objects.requireNonNull(dependency, extensionFactory + " returned a null extension.");
life.add(dependencies.satisfyDependency(dependency));
} catch (UnsatisfiedDependencyException exception) {
extensionFailureStrategy.handle(extensionFactory, exception);
} catch (Throwable throwable) {
extensionFailureStrategy.handle(extensionFactory, throwable);
}
}
life.init();
}
use of org.neo4j.kernel.lifecycle.Lifecycle in project neo4j by neo4j.
the class TransactionLogsRecoveryTest method shouldInitSchemaLifeWhenRecoveryNotRequired.
@Test
void shouldInitSchemaLifeWhenRecoveryNotRequired() throws Exception {
Lifecycle schemaLife = mock(Lifecycle.class);
RecoveryService recoveryService = mock(RecoveryService.class);
when(recoveryService.getRecoveryStartInformation()).thenReturn(NO_RECOVERY_REQUIRED);
CorruptedLogsTruncator logPruner = new CorruptedLogsTruncator(storeDir, logFiles, fileSystem, INSTANCE);
RecoveryMonitor monitor = mock(RecoveryMonitor.class);
TransactionLogsRecovery logsRecovery = new TransactionLogsRecovery(recoveryService, logPruner, schemaLife, monitor, ProgressReporter.SILENT, true, EMPTY_CHECKER, NULL);
logsRecovery.init();
verify(monitor, never()).recoveryRequired(any());
verify(schemaLife).init();
}
Aggregations