use of org.elasticsearch.common.transport.TransportAddress in project crate by crate.
the class Netty4HttpServerTransport method bindAddress.
private TransportAddress bindAddress(final InetAddress hostAddress) {
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = port.iterate(portNumber -> {
try {
synchronized (serverChannels) {
ChannelFuture future = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)).sync();
serverChannels.add(future.channel());
boundSocket.set((InetSocketAddress) future.channel().localAddress());
}
} catch (Exception e) {
lastException.set(e);
return false;
}
return true;
});
if (!success) {
throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get());
}
if (logger.isDebugEnabled()) {
logger.debug("Bound http to address {{}}", NetworkAddress.format(boundSocket.get()));
}
return new TransportAddress(boundSocket.get());
}
use of org.elasticsearch.common.transport.TransportAddress in project crate by crate.
the class SeedHostsResolver method resolveHosts.
@Override
public List<TransportAddress> resolveHosts(final List<String> hosts) {
Objects.requireNonNull(hosts);
if (resolveTimeout.nanos() < 0) {
throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
}
// create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
final List<Callable<TransportAddress[]>> callables = hosts.stream().map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn)).collect(Collectors.toList());
final SetOnce<List<Future<TransportAddress[]>>> futures = new SetOnce<>();
try {
cancellableThreads.execute(() -> futures.set(executorService.get().invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS)));
} catch (CancellableThreads.ExecutionCancelledException e) {
return Collections.emptyList();
}
final List<TransportAddress> transportAddresses = new ArrayList<>();
final Set<TransportAddress> localAddresses = new HashSet<>();
localAddresses.add(transportService.boundAddress().publishAddress());
localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
// hostname with the corresponding task by iterating together
final Iterator<String> it = hosts.iterator();
for (final Future<TransportAddress[]> future : futures.get()) {
assert future.isDone();
final String hostname = it.next();
if (!future.isCancelled()) {
try {
final TransportAddress[] addresses = future.get();
LOGGER.trace("resolved host [{}] to {}", hostname, addresses);
for (int addressId = 0; addressId < addresses.length; addressId++) {
final TransportAddress address = addresses[addressId];
// no point in pinging ourselves
if (localAddresses.contains(address) == false) {
transportAddresses.add(address);
}
}
} catch (final ExecutionException e) {
assert e.getCause() != null;
final String message = "failed to resolve host [" + hostname + "]";
LOGGER.warn(message, e.getCause());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
// ignore
}
} else {
LOGGER.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
}
}
return Collections.unmodifiableList(transportAddresses);
}
use of org.elasticsearch.common.transport.TransportAddress in project crate by crate.
the class Node method start.
/**
* Start the node. If the node is already started, this method is no-op.
*/
public Node start() throws NodeValidationException {
if (!lifecycle.moveToStarted()) {
return this;
}
logger.info("starting ...");
pluginLifecycleComponents.forEach(LifecycleComponent::start);
injector.getInstance(BlobService.class).start();
injector.getInstance(DecommissioningService.class).start();
injector.getInstance(NodeDisconnectJobMonitorService.class).start();
injector.getInstance(JobsLogService.class).start();
injector.getInstance(PostgresNetty.class).start();
injector.getInstance(TasksService.class).start();
injector.getInstance(Schemas.class).start();
injector.getInstance(ArrayMapperService.class).start();
injector.getInstance(DanglingArtifactsService.class).start();
injector.getInstance(SslContextProviderService.class).start();
injector.getInstance(MappingUpdatedAction.class).setClient(client);
injector.getInstance(IndicesService.class).start();
injector.getInstance(IndicesClusterStateService.class).start();
injector.getInstance(SnapshotsService.class).start();
injector.getInstance(SnapshotShardsService.class).start();
nodeService.getMonitorService().start();
final ClusterService clusterService = injector.getInstance(ClusterService.class);
final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class);
nodeConnectionsService.start();
clusterService.setNodeConnectionsService(nodeConnectionsService);
injector.getInstance(GatewayService.class).start();
Discovery discovery = injector.getInstance(Discovery.class);
clusterService.getMasterService().setClusterStatePublisher(discovery::publish);
HttpServerTransport httpServerTransport = injector.getInstance(HttpServerTransport.class);
httpServerTransport.start();
// CRATE_PATCH: add http publish address to the discovery node
TransportAddress publishAddress = httpServerTransport.info().address().publishAddress();
localNodeFactory.httpPublishAddress = publishAddress.getAddress() + ':' + publishAddress.getPort();
// Start the transport service now so the publish address will be added to the local disco node in ClusterService
TransportService transportService = injector.getInstance(TransportService.class);
transportService.start();
assert localNodeFactory.getNode() != null;
assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided";
injector.getInstance(PeerRecoverySourceService.class).start();
// Load (and maybe upgrade) the metadata stored on disk
final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class);
gatewayMetaState.start(settings(), transportService, clusterService, injector.getInstance(MetaStateService.class), injector.getInstance(MetadataIndexUpgradeService.class), injector.getInstance(MetadataUpgrader.class), injector.getInstance(PersistedClusterStateService.class));
if (Assertions.ENABLED) {
try {
assert injector.getInstance(MetaStateService.class).loadFullState().v1().isEmpty();
final NodeMetadata nodeMetaData = NodeMetadata.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, nodeEnvironment.nodeDataPaths());
assert nodeMetaData != null;
assert nodeMetaData.nodeVersion().equals(Version.CURRENT);
assert nodeMetaData.nodeId().equals(localNodeFactory.getNode().getId());
} catch (IOException e) {
assert false : e;
}
}
// we load the global state here (the persistent part of the cluster state stored on disk) to
// pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state.
final Metadata onDiskMetadata = gatewayMetaState.getPersistedState().getLastAcceptedState().metadata();
// this is never null
assert onDiskMetadata != null : "metadata is null but shouldn't";
validateNodeBeforeAcceptingRequests(transportService.boundAddress(), pluginsService.filterPlugins(Plugin.class).stream().flatMap(p -> p.getBootstrapChecks().stream()).collect(Collectors.toList()));
// start after transport service so the local disco is known
// start before cluster service so that it can set initial state on ClusterApplierService
discovery.start();
clusterService.start();
assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided";
transportService.acceptIncomingRequests();
discovery.startInitialJoin();
final TimeValue initialStateTimeout = INITIAL_STATE_TIMEOUT_SETTING.get(settings);
configureNodeAndClusterIdStateListener(clusterService);
if (initialStateTimeout.millis() > 0) {
final ThreadPool thread = injector.getInstance(ThreadPool.class);
ClusterState clusterState = clusterService.state();
ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger);
if (clusterState.nodes().getMasterNodeId() == null) {
logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout);
final CountDownLatch latch = new CountDownLatch(1);
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
latch.countDown();
}
@Override
public void onClusterServiceClose() {
latch.countDown();
}
@Override
public void onTimeout(TimeValue timeout) {
logger.warn("timed out while waiting for initial discovery state - timeout: {}", initialStateTimeout);
latch.countDown();
}
}, state -> state.nodes().getMasterNodeId() != null, initialStateTimeout);
try {
latch.await();
} catch (InterruptedException e) {
throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state");
}
}
}
if (WRITE_PORTS_FILE_SETTING.get(settings)) {
TransportService transport = injector.getInstance(TransportService.class);
writePortsFile("transport", transport.boundAddress());
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
writePortsFile("http", http.boundAddress());
}
logger.info("started");
pluginsService.filterPlugins(ClusterPlugin.class).forEach(ClusterPlugin::onNodeStarted);
return this;
}
use of org.elasticsearch.common.transport.TransportAddress in project crate by crate.
the class Node method writePortsFile.
/**
* Writes a file to the logs dir containing the ports for the given transport type
*/
private void writePortsFile(String type, BoundTransportAddress boundAddress) {
Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp");
try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) {
for (TransportAddress address : boundAddress.boundAddresses()) {
InetAddress inetAddress = InetAddress.getByName(address.getAddress());
writer.write(NetworkAddress.format(new InetSocketAddress(inetAddress, address.getPort())) + "\n");
}
} catch (IOException e) {
throw new RuntimeException("Failed to write ports file", e);
}
Path portsFile = environment.logsFile().resolve(type + ".ports");
try {
Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e) {
throw new RuntimeException("Failed to rename ports file", e);
}
}
use of org.elasticsearch.common.transport.TransportAddress in project crate by crate.
the class PeerFinderTests method testDoesNotReturnDuplicateNodesWithDistinctAddresses.
public void testDoesNotReturnDuplicateNodesWithDistinctAddresses() {
final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list");
final TransportAddress alternativeAddress = buildNewFakeTransportAddress();
providedAddresses.add(otherNode.getAddress());
providedAddresses.add(alternativeAddress);
transportAddressConnector.addReachableNode(otherNode);
transportAddressConnector.reachableNodes.put(alternativeAddress, otherNode);
peerFinder.activate(lastAcceptedNodes);
runAllRunnableTasks();
assertFoundPeers(otherNode);
}
Aggregations