use of org.elasticsearch.discovery.Discovery in project elasticsearch by elastic.
the class Node method close.
// During concurrent close() calls we want to make sure that all of them return after the node has completed it's shutdown cycle.
// If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be executed, in case another (for example api) call
// to close() has already set some lifecycles to stopped. In this case the process will be terminated even if the first call to close() has not finished yet.
@Override
public synchronized void close() throws IOException {
if (lifecycle.started()) {
stop();
}
if (!lifecycle.moveToClosed()) {
return;
}
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
logger.info("closing ...");
List<Closeable> toClose = new ArrayList<>();
StopWatch stopWatch = new StopWatch("node_close");
toClose.add(() -> stopWatch.start("tribe"));
toClose.add(injector.getInstance(TribeService.class));
toClose.add(() -> stopWatch.stop().start("node_service"));
toClose.add(injector.getInstance(NodeService.class));
toClose.add(() -> stopWatch.stop().start("http"));
if (NetworkModule.HTTP_ENABLED.get(settings)) {
toClose.add(injector.getInstance(HttpServerTransport.class));
}
toClose.add(() -> stopWatch.stop().start("snapshot_service"));
toClose.add(injector.getInstance(SnapshotsService.class));
toClose.add(injector.getInstance(SnapshotShardsService.class));
toClose.add(() -> stopWatch.stop().start("client"));
Releasables.close(injector.getInstance(Client.class));
toClose.add(() -> stopWatch.stop().start("indices_cluster"));
toClose.add(injector.getInstance(IndicesClusterStateService.class));
toClose.add(() -> stopWatch.stop().start("indices"));
toClose.add(injector.getInstance(IndicesService.class));
// close filter/fielddata caches after indices
toClose.add(injector.getInstance(IndicesStore.class));
toClose.add(() -> stopWatch.stop().start("routing"));
toClose.add(injector.getInstance(RoutingService.class));
toClose.add(() -> stopWatch.stop().start("cluster"));
toClose.add(injector.getInstance(ClusterService.class));
toClose.add(() -> stopWatch.stop().start("node_connections_service"));
toClose.add(injector.getInstance(NodeConnectionsService.class));
toClose.add(() -> stopWatch.stop().start("discovery"));
toClose.add(injector.getInstance(Discovery.class));
toClose.add(() -> stopWatch.stop().start("monitor"));
toClose.add(injector.getInstance(MonitorService.class));
toClose.add(() -> stopWatch.stop().start("gateway"));
toClose.add(injector.getInstance(GatewayService.class));
toClose.add(() -> stopWatch.stop().start("search"));
toClose.add(injector.getInstance(SearchService.class));
toClose.add(() -> stopWatch.stop().start("transport"));
toClose.add(injector.getInstance(TransportService.class));
toClose.add(() -> stopWatch.stop().start("search_transport_service"));
toClose.add(injector.getInstance(SearchTransportService.class));
for (LifecycleComponent plugin : pluginLifecycleComponents) {
toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getClass().getName() + ")"));
toClose.add(plugin);
}
toClose.addAll(pluginsService.filterPlugins(Plugin.class));
toClose.add(() -> stopWatch.stop().start("script"));
toClose.add(injector.getInstance(ScriptService.class));
toClose.add(() -> stopWatch.stop().start("thread_pool"));
// TODO this should really use ThreadPool.terminate()
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown());
toClose.add(() -> {
try {
injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
});
toClose.add(() -> stopWatch.stop().start("thread_pool_force_shutdown"));
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdownNow());
toClose.add(() -> stopWatch.stop());
toClose.add(injector.getInstance(NodeEnvironment.class));
toClose.add(injector.getInstance(BigArrays.class));
if (logger.isTraceEnabled()) {
logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
}
IOUtils.close(toClose);
logger.info("closed");
}
use of org.elasticsearch.discovery.Discovery in project elasticsearch by elastic.
the class ZenDiscoveryUnitTests method testNodesUpdatedAfterClusterStatePublished.
public void testNodesUpdatedAfterClusterStatePublished() throws Exception {
ThreadPool threadPool = new TestThreadPool(getClass().getName());
// randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure
int minMasterNodes = randomBoolean() ? 3 : 1;
Settings settings = Settings.builder().put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build();
ArrayDeque<Closeable> toClose = new ArrayDeque<>();
try {
Set<DiscoveryNode> expectedFDNodes = null;
final MockTransportService masterTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null);
masterTransport.start();
DiscoveryNode masterNode = masterTransport.getLocalNode();
toClose.addFirst(masterTransport);
ClusterState state = ClusterStateCreationUtils.state(masterNode, masterNode, masterNode);
// build the zen discovery and cluster service
ClusterService masterClusterService = createClusterService(threadPool, masterNode);
toClose.addFirst(masterClusterService);
// TODO: clustername shouldn't be stored twice in cluster service, but for now, work around it
state = ClusterState.builder(masterClusterService.getClusterName()).nodes(state.nodes()).build();
setState(masterClusterService, state);
ZenDiscovery masterZen = buildZenDiscovery(settings, masterTransport, masterClusterService, threadPool);
toClose.addFirst(masterZen);
masterTransport.acceptIncomingRequests();
final MockTransportService otherTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null);
otherTransport.start();
toClose.addFirst(otherTransport);
DiscoveryNode otherNode = otherTransport.getLocalNode();
final ClusterState otherState = ClusterState.builder(masterClusterService.getClusterName()).nodes(DiscoveryNodes.builder().add(otherNode).localNodeId(otherNode.getId())).build();
ClusterService otherClusterService = createClusterService(threadPool, masterNode);
toClose.addFirst(otherClusterService);
setState(otherClusterService, otherState);
ZenDiscovery otherZen = buildZenDiscovery(settings, otherTransport, otherClusterService, threadPool);
toClose.addFirst(otherZen);
otherTransport.acceptIncomingRequests();
masterTransport.connectToNode(otherNode);
otherTransport.connectToNode(masterNode);
// a new cluster state with a new discovery node (we will test if the cluster state
// was updated by the presence of this node in NodesFaultDetection)
ClusterState newState = ClusterState.builder(masterClusterService.state()).incrementVersion().nodes(DiscoveryNodes.builder(state.nodes()).add(otherNode).masterNodeId(masterNode.getId())).build();
try {
// publishing a new cluster state
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state);
AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1);
expectedFDNodes = masterZen.getFaultDetectionNodes();
masterZen.publish(clusterChangedEvent, listener);
listener.await(1, TimeUnit.HOURS);
// publish was a success, update expected FD nodes based on new cluster state
expectedFDNodes = fdNodesForState(newState, masterNode);
} catch (Discovery.FailedToCommitClusterStateException e) {
// not successful, so expectedFDNodes above should remain what it was originally assigned
// ensure min master nodes is the higher value, otherwise we shouldn't fail
assertEquals(3, minMasterNodes);
}
assertEquals(expectedFDNodes, masterZen.getFaultDetectionNodes());
} finally {
IOUtils.close(toClose);
terminate(threadPool);
}
}
use of org.elasticsearch.discovery.Discovery in project elasticsearch by elastic.
the class ESIntegTestCase method afterInternal.
protected final void afterInternal(boolean afterClass) throws Exception {
boolean success = false;
try {
final Scope currentClusterScope = getCurrentClusterScope();
clearDisruptionScheme();
try {
if (cluster() != null) {
if (currentClusterScope != Scope.TEST) {
MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
final Map<String, String> persistent = metaData.persistentSettings().getAsMap();
assertThat("test leaves persistent cluster metadata behind: " + persistent, persistent.size(), equalTo(0));
final Map<String, String> transientSettings = new HashMap<>(metaData.transientSettings().getAsMap());
if (isInternalCluster() && internalCluster().getAutoManageMinMasterNode()) {
// this is set by the test infra
transientSettings.remove(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey());
}
assertThat("test leaves transient cluster metadata behind: " + transientSettings, transientSettings.keySet(), empty());
}
ensureClusterSizeConsistency();
ensureClusterStateConsistency();
if (isInternalCluster()) {
// check no pending cluster states are leaked
for (Discovery discovery : internalCluster().getInstances(Discovery.class)) {
if (discovery instanceof ZenDiscovery) {
final ZenDiscovery zenDiscovery = (ZenDiscovery) discovery;
assertBusy(() -> {
final ClusterState[] states = zenDiscovery.pendingClusterStates();
assertThat(zenDiscovery.localNode().getName() + " still having pending states:\n" + Stream.of(states).map(ClusterState::toString).collect(Collectors.joining("\n")), states, emptyArray());
});
}
}
}
beforeIndexDeletion();
// wipe after to make sure we fail in the test that didn't ack the delete
cluster().wipe(excludeTemplates());
if (afterClass || currentClusterScope == Scope.TEST) {
cluster().close();
}
cluster().assertAfterTest();
}
} finally {
if (currentClusterScope == Scope.TEST) {
// it is ok to leave persistent / transient cluster state behind if scope is TEST
clearClusters();
}
}
success = true;
} finally {
if (!success) {
// if we failed here that means that something broke horribly so we should clear all clusters
// TODO: just let the exception happen, WTF is all this horseshit
// afterTestRule.forceFailure();
}
}
}
use of org.elasticsearch.discovery.Discovery in project crate by crate.
the class Node method close.
// During concurrent close() calls we want to make sure that all of them return after the node has completed it's shutdown cycle.
// If not, the hook that is added in Bootstrap#setup() will be useless:
// close() might not be executed, in case another (for example api) call to close() has already set some lifecycles to stopped.
// In this case the process will be terminated even if the first call to close() has not finished yet.
@Override
public synchronized void close() throws IOException {
synchronized (lifecycle) {
if (lifecycle.started()) {
stop();
}
if (!lifecycle.moveToClosed()) {
return;
}
}
logger.info("closing ...");
List<Closeable> toClose = new ArrayList<>();
StopWatch stopWatch = new StopWatch("node_close");
toClose.add(() -> stopWatch.start("node_service"));
toClose.add(nodeService);
toClose.add(() -> stopWatch.stop().start("http"));
toClose.add(injector.getInstance(HttpServerTransport.class));
toClose.add(() -> stopWatch.stop().start("snapshot_service"));
toClose.add(injector.getInstance(SnapshotsService.class));
toClose.add(injector.getInstance(SnapshotShardsService.class));
toClose.add(() -> stopWatch.stop().start("client"));
Releasables.close(injector.getInstance(Client.class));
toClose.add(() -> stopWatch.stop().start("indices_cluster"));
toClose.add(injector.getInstance(IndicesClusterStateService.class));
toClose.add(() -> stopWatch.stop().start("indices"));
toClose.add(injector.getInstance(IndicesService.class));
// close filter/fielddata caches after indices
toClose.add(injector.getInstance(IndicesStore.class));
toClose.add(injector.getInstance(PeerRecoverySourceService.class));
toClose.add(() -> stopWatch.stop().start("routing"));
toClose.add(() -> stopWatch.stop().start("cluster"));
toClose.add(injector.getInstance(ClusterService.class));
toClose.add(() -> stopWatch.stop().start("node_connections_service"));
toClose.add(injector.getInstance(NodeConnectionsService.class));
toClose.add(() -> stopWatch.stop().start("discovery"));
toClose.add(injector.getInstance(Discovery.class));
toClose.add(() -> stopWatch.stop().start("monitor"));
toClose.add(nodeService.getMonitorService());
toClose.add(() -> stopWatch.stop().start("gateway"));
toClose.add(injector.getInstance(GatewayService.class));
toClose.add(() -> stopWatch.stop().start("transport"));
toClose.add(injector.getInstance(TransportService.class));
toClose.add(() -> stopWatch.stop().start("gateway_meta_state"));
toClose.add(injector.getInstance(GatewayMetaState.class));
toClose.add(() -> stopWatch.stop().start("node_environment"));
toClose.add(injector.getInstance(NodeEnvironment.class));
toClose.add(() -> stopWatch.stop().start("decommission_service"));
toClose.add(injector.getInstance(DecommissioningService.class));
toClose.add(() -> stopWatch.stop().start("node_disconnect_job_monitor_service"));
toClose.add(injector.getInstance(NodeDisconnectJobMonitorService.class));
toClose.add(() -> stopWatch.stop().start("jobs_log_service"));
toClose.add(injector.getInstance(JobsLogService.class));
toClose.add(() -> stopWatch.stop().start("postgres_netty"));
toClose.add(injector.getInstance(PostgresNetty.class));
toClose.add(() -> stopWatch.stop().start("tasks_service"));
toClose.add(injector.getInstance(TasksService.class));
toClose.add(() -> stopWatch.stop().start("schemas"));
toClose.add(injector.getInstance(Schemas.class));
toClose.add(() -> stopWatch.stop().start("array_mapper_service"));
toClose.add(injector.getInstance(ArrayMapperService.class));
toClose.add(() -> stopWatch.stop().start("dangling_artifacts_service"));
toClose.add(injector.getInstance(DanglingArtifactsService.class));
toClose.add(() -> stopWatch.stop().start("ssl_context_provider_service"));
toClose.add(injector.getInstance(SslContextProviderService.class));
toClose.add(() -> stopWatch.stop().start("blob_service"));
toClose.add(injector.getInstance(BlobService.class));
for (LifecycleComponent plugin : pluginLifecycleComponents) {
toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getClass().getName() + ")"));
toClose.add(plugin);
}
toClose.addAll(pluginsService.filterPlugins(Plugin.class));
toClose.add(() -> stopWatch.stop().start("thread_pool"));
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown());
// Don't call shutdownNow here, it might break ongoing operations on Lucene indices.
// See https://issues.apache.org/jira/browse/LUCENE-7248. We call shutdownNow in
// awaitClose if the node doesn't finish closing within the specified time.
toClose.add(() -> stopWatch.stop());
if (logger.isTraceEnabled()) {
logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
}
IOUtils.close(toClose);
logger.info("closed");
}
Aggregations