use of org.elasticsearch.env.NodeMetadata in project crate by crate.
the class ElasticsearchNodeCommand method createPersistedClusterStateService.
public static PersistedClusterStateService createPersistedClusterStateService(Settings settings, Path[] dataPaths) throws IOException {
final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths);
if (nodeMetadata == null) {
throw new ElasticsearchException(NO_NODE_METADATA_FOUND_MSG);
}
String nodeId = nodeMetadata.nodeId();
return new PersistedClusterStateService(dataPaths, nodeId, NAMED_X_CONTENT_REGISTRY, BigArrays.NON_RECYCLING_INSTANCE, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L, true);
}
use of org.elasticsearch.env.NodeMetadata in project crate by crate.
the class PersistedClusterStateService method nodeMetadata.
/**
* Returns the node metadata for the given data paths, and checks if the node ids are unique
* @param dataPaths the data paths to scan
*/
@Nullable
public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException {
String nodeId = null;
Version version = null;
for (final Path dataPath : dataPaths) {
final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME);
if (Files.exists(indexPath)) {
try (DirectoryReader reader = DirectoryReader.open(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
final Map<String, String> userData = reader.getIndexCommit().getUserData();
assert userData.get(NODE_VERSION_KEY) != null;
final String thisNodeId = userData.get(NODE_ID_KEY);
assert thisNodeId != null;
if (nodeId != null && nodeId.equals(thisNodeId) == false) {
// Do nothing, because the metadata does not belong to this node
} else if (nodeId == null) {
nodeId = thisNodeId;
version = Version.fromId(Integer.parseInt(userData.get(NODE_VERSION_KEY)));
}
} catch (IndexNotFoundException e) {
LOGGER.debug(new ParameterizedMessage("no on-disk state at {}", indexPath), e);
}
}
}
if (nodeId == null) {
return null;
}
return new NodeMetadata(nodeId, version);
}
use of org.elasticsearch.env.NodeMetadata in project crate by crate.
the class Node method start.
/**
* Start the node. If the node is already started, this method is no-op.
*/
public Node start() throws NodeValidationException {
if (!lifecycle.moveToStarted()) {
return this;
}
logger.info("starting ...");
pluginLifecycleComponents.forEach(LifecycleComponent::start);
injector.getInstance(BlobService.class).start();
injector.getInstance(DecommissioningService.class).start();
injector.getInstance(NodeDisconnectJobMonitorService.class).start();
injector.getInstance(JobsLogService.class).start();
injector.getInstance(PostgresNetty.class).start();
injector.getInstance(TasksService.class).start();
injector.getInstance(Schemas.class).start();
injector.getInstance(ArrayMapperService.class).start();
injector.getInstance(DanglingArtifactsService.class).start();
injector.getInstance(SslContextProviderService.class).start();
injector.getInstance(MappingUpdatedAction.class).setClient(client);
injector.getInstance(IndicesService.class).start();
injector.getInstance(IndicesClusterStateService.class).start();
injector.getInstance(SnapshotsService.class).start();
injector.getInstance(SnapshotShardsService.class).start();
nodeService.getMonitorService().start();
final ClusterService clusterService = injector.getInstance(ClusterService.class);
final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class);
nodeConnectionsService.start();
clusterService.setNodeConnectionsService(nodeConnectionsService);
injector.getInstance(GatewayService.class).start();
Discovery discovery = injector.getInstance(Discovery.class);
clusterService.getMasterService().setClusterStatePublisher(discovery::publish);
HttpServerTransport httpServerTransport = injector.getInstance(HttpServerTransport.class);
httpServerTransport.start();
// CRATE_PATCH: add http publish address to the discovery node
TransportAddress publishAddress = httpServerTransport.info().address().publishAddress();
localNodeFactory.httpPublishAddress = publishAddress.getAddress() + ':' + publishAddress.getPort();
// Start the transport service now so the publish address will be added to the local disco node in ClusterService
TransportService transportService = injector.getInstance(TransportService.class);
transportService.start();
assert localNodeFactory.getNode() != null;
assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided";
injector.getInstance(PeerRecoverySourceService.class).start();
// Load (and maybe upgrade) the metadata stored on disk
final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class);
gatewayMetaState.start(settings(), transportService, clusterService, injector.getInstance(MetaStateService.class), injector.getInstance(MetadataIndexUpgradeService.class), injector.getInstance(MetadataUpgrader.class), injector.getInstance(PersistedClusterStateService.class));
if (Assertions.ENABLED) {
try {
assert injector.getInstance(MetaStateService.class).loadFullState().v1().isEmpty();
final NodeMetadata nodeMetaData = NodeMetadata.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, nodeEnvironment.nodeDataPaths());
assert nodeMetaData != null;
assert nodeMetaData.nodeVersion().equals(Version.CURRENT);
assert nodeMetaData.nodeId().equals(localNodeFactory.getNode().getId());
} catch (IOException e) {
assert false : e;
}
}
// we load the global state here (the persistent part of the cluster state stored on disk) to
// pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state.
final Metadata onDiskMetadata = gatewayMetaState.getPersistedState().getLastAcceptedState().metadata();
// this is never null
assert onDiskMetadata != null : "metadata is null but shouldn't";
validateNodeBeforeAcceptingRequests(transportService.boundAddress(), pluginsService.filterPlugins(Plugin.class).stream().flatMap(p -> p.getBootstrapChecks().stream()).collect(Collectors.toList()));
// start after transport service so the local disco is known
// start before cluster service so that it can set initial state on ClusterApplierService
discovery.start();
clusterService.start();
assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided";
transportService.acceptIncomingRequests();
discovery.startInitialJoin();
final TimeValue initialStateTimeout = INITIAL_STATE_TIMEOUT_SETTING.get(settings);
configureNodeAndClusterIdStateListener(clusterService);
if (initialStateTimeout.millis() > 0) {
final ThreadPool thread = injector.getInstance(ThreadPool.class);
ClusterState clusterState = clusterService.state();
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger);
if (clusterState.nodes().getMasterNodeId() == null) {
logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout);
final CountDownLatch latch = new CountDownLatch(1);
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
latch.countDown();
}
@Override
public void onClusterServiceClose() {
latch.countDown();
}
@Override
public void onTimeout(TimeValue timeout) {
logger.warn("timed out while waiting for initial discovery state - timeout: {}", initialStateTimeout);
latch.countDown();
}
}, state -> state.nodes().getMasterNodeId() != null, initialStateTimeout);
try {
latch.await();
} catch (InterruptedException e) {
throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state");
}
}
}
if (WRITE_PORTS_FILE_SETTING.get(settings)) {
TransportService transport = injector.getInstance(TransportService.class);
writePortsFile("transport", transport.boundAddress());
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
writePortsFile("http", http.boundAddress());
}
logger.info("started");
pluginsService.filterPlugins(ClusterPlugin.class).forEach(ClusterPlugin::onNodeStarted);
return this;
}
use of org.elasticsearch.env.NodeMetadata in project crate by crate.
the class GatewayMetaState method start.
public void start(Settings settings, TransportService transportService, ClusterService clusterService, MetaStateService metaStateService, MetadataIndexUpgradeService metadataIndexUpgradeService, MetadataUpgrader metadataUpgrader, PersistedClusterStateService persistedClusterStateService) {
assert persistedState.get() == null : "should only start once, but already have " + persistedState.get();
if (DiscoveryNode.isMasterEligibleNode(settings) || DiscoveryNode.isDataNode(settings)) {
try {
final PersistedClusterStateService.OnDiskState onDiskState = persistedClusterStateService.loadBestOnDiskState();
Metadata metadata = onDiskState.metadata;
long lastAcceptedVersion = onDiskState.lastAcceptedVersion;
long currentTerm = onDiskState.currentTerm;
if (onDiskState.empty()) {
final Tuple<Manifest, Metadata> legacyState = metaStateService.loadFullState();
if (legacyState.v1().isEmpty() == false) {
metadata = legacyState.v2();
lastAcceptedVersion = legacyState.v1().getClusterStateVersion();
currentTerm = legacyState.v1().getCurrentTerm();
}
}
PersistedState persistedState = null;
boolean success = false;
try {
final ClusterState clusterState = prepareInitialClusterState(transportService, clusterService, ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)).version(lastAcceptedVersion).metadata(upgradeMetadataForNode(metadata, metadataIndexUpgradeService, metadataUpgrader)).build());
if (DiscoveryNode.isMasterEligibleNode(settings)) {
persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState);
} else {
persistedState = new AsyncLucenePersistedState(settings, transportService.getThreadPool(), new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState));
}
if (DiscoveryNode.isDataNode(settings)) {
// unreference legacy files (only keep them for dangling indices functionality)
metaStateService.unreferenceAll();
} else {
// delete legacy files
metaStateService.deleteAll();
}
// write legacy node metadata to prevent accidental downgrades from spawning empty cluster state
NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT), persistedClusterStateService.getDataPaths());
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(persistedState);
}
}
this.persistedState.set(persistedState);
} catch (IOException e) {
throw new ElasticsearchException("failed to load metadata", e);
}
} else {
final long currentTerm = 0L;
final ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)).build();
if (persistedClusterStateService.getDataPaths().length > 0) {
// cluster uuid as coordinating-only nodes do not snap into a cluster as they carry no state
try (PersistedClusterStateService.Writer persistenceWriter = persistedClusterStateService.createWriter()) {
persistenceWriter.writeFullStateAndCommit(currentTerm, clusterState);
} catch (IOException e) {
throw new ElasticsearchException("failed to load metadata", e);
}
try {
// delete legacy cluster state files
metaStateService.deleteAll();
// write legacy node metadata to prevent downgrades from spawning empty cluster state
NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(persistedClusterStateService.getNodeId(), Version.CURRENT), persistedClusterStateService.getDataPaths());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
persistedState.set(new InMemoryPersistedState(currentTerm, clusterState));
}
}
Aggregations