use of org.elasticsearch.node.Node in project elasticsearch by elastic.
the class AbstractSimpleTransportTestCase method testConcurrentSendRespondAndDisconnect.
public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierException, InterruptedException {
Set<Exception> sendingErrors = ConcurrentCollections.newConcurrentSet();
Set<Exception> responseErrors = ConcurrentCollections.newConcurrentSet();
serviceA.registerRequestHandler("test", TestRequest::new, randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, (request, channel) -> {
try {
channel.sendResponse(new TestResponse());
} catch (Exception e) {
logger.info("caught exception while responding", e);
responseErrors.add(e);
}
});
final TransportRequestHandler<TestRequest> ignoringRequestHandler = (request, channel) -> {
try {
channel.sendResponse(new TestResponse());
} catch (Exception e) {
// we don't really care what's going on B, we're testing through A
logger.trace("caught exception while responding from node B", e);
}
};
serviceB.registerRequestHandler("test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler);
int halfSenders = scaledRandomIntBetween(3, 10);
final CyclicBarrier go = new CyclicBarrier(halfSenders * 2 + 1);
final CountDownLatch done = new CountDownLatch(halfSenders * 2);
for (int i = 0; i < halfSenders; i++) {
// B senders just generated activity so serciveA can respond, we don't test what's going on there
final int sender = i;
threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.trace("caught exception while sending from B", e);
}
@Override
protected void doRun() throws Exception {
go.await();
for (int iter = 0; iter < 10; iter++) {
PlainActionFuture<TestResponse> listener = new PlainActionFuture<>();
final String info = sender + "_B_" + iter;
serviceB.sendRequest(nodeA, "test", new TestRequest(info), new ActionListenerResponseHandler<>(listener, TestResponse::new));
try {
listener.actionGet();
} catch (Exception e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("caught exception while sending to node {}", nodeA), e);
}
}
}
@Override
public void onAfter() {
done.countDown();
}
});
}
for (int i = 0; i < halfSenders; i++) {
final int sender = i;
threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.error("unexpected error", e);
sendingErrors.add(e);
}
@Override
protected void doRun() throws Exception {
go.await();
for (int iter = 0; iter < 10; iter++) {
PlainActionFuture<TestResponse> listener = new PlainActionFuture<>();
final String info = sender + "_" + iter;
// capture now
final DiscoveryNode node = nodeB;
try {
serviceA.sendRequest(node, "test", new TestRequest(info), new ActionListenerResponseHandler<>(listener, TestResponse::new));
try {
listener.actionGet();
} catch (ConnectTransportException e) {
// ok!
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("caught exception while sending to node {}", node), e);
sendingErrors.add(e);
}
} catch (NodeNotConnectedException ex) {
// ok
}
}
}
@Override
public void onAfter() {
done.countDown();
}
});
}
go.await();
for (int i = 0; i <= 10; i++) {
if (i % 3 == 0) {
// simulate restart of nodeB
serviceB.close();
MockTransportService newService = buildService("TS_B_" + i, version1, null);
newService.registerRequestHandler("test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler);
serviceB = newService;
nodeB = newService.getLocalDiscoNode();
serviceB.connectToNode(nodeA);
serviceA.connectToNode(nodeB);
} else if (serviceA.nodeConnected(nodeB)) {
serviceA.disconnectFromNode(nodeB);
} else {
serviceA.connectToNode(nodeB);
}
}
done.await();
assertThat("found non connection errors while sending", sendingErrors, empty());
assertThat("found non connection errors while responding", responseErrors, empty());
}
use of org.elasticsearch.node.Node in project elasticsearch by elastic.
the class Bootstrap method setup.
private void setup(boolean addShutdownHook, Environment environment) throws BootstrapException {
Settings settings = environment.settings();
try {
spawner.spawnNativePluginControllers(environment);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
spawner.close();
} catch (IOException e) {
throw new ElasticsearchException("Failed to destroy spawned controllers", e);
}
}
});
} catch (IOException e) {
throw new BootstrapException(e);
}
initializeNatives(environment.tmpFile(), BootstrapSettings.MEMORY_LOCK_SETTING.get(settings), BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings), BootstrapSettings.CTRLHANDLER_SETTING.get(settings));
// initialize probes before the security manager is installed
initializeProbes();
if (addShutdownHook) {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
IOUtils.close(node);
LoggerContext context = (LoggerContext) LogManager.getContext(false);
Configurator.shutdown(context);
} catch (IOException ex) {
throw new ElasticsearchException("failed to stop node", ex);
}
}
});
}
try {
// look for jar hell
JarHell.checkJarHell();
} catch (IOException | URISyntaxException e) {
throw new BootstrapException(e);
}
// Log ifconfig output before SecurityManager is installed
IfConfig.logIfNecessary();
// install SM after natives, shutdown hooks, etc.
try {
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
} catch (IOException | NoSuchAlgorithmException e) {
throw new BootstrapException(e);
}
node = new Node(environment) {
@Override
protected void validateNodeBeforeAcceptingRequests(final Settings settings, final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> checks) throws NodeValidationException {
BootstrapChecks.check(settings, boundTransportAddress, checks);
}
};
}
use of org.elasticsearch.node.Node in project elasticsearch by elastic.
the class AzureRepositoryF method main.
public static void main(String[] args) throws Throwable {
Settings.Builder settings = Settings.builder();
settings.put("http.cors.enabled", "true");
settings.put("http.cors.allow-origin", "*");
settings.put("cluster.name", AzureRepositoryF.class.getSimpleName());
// Example for azure repo settings
// settings.put("cloud.azure.storage.my_account1.account", "account_name");
// settings.put("cloud.azure.storage.my_account1.key", "account_key");
// settings.put("cloud.azure.storage.my_account1.default", true);
// settings.put("cloud.azure.storage.my_account2.account", "account_name");
// settings.put("cloud.azure.storage.my_account2.key", "account_key_secondary");
final CountDownLatch latch = new CountDownLatch(1);
final Node node = new MockNode(settings.build(), Collections.singletonList(AzureRepositoryPlugin.class));
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
IOUtils.close(node);
} catch (IOException e) {
throw new ElasticsearchException(e);
} finally {
latch.countDown();
}
}
});
node.start();
latch.await();
}
use of org.elasticsearch.node.Node in project elasticsearch by elastic.
the class PublishClusterStateActionTests method testUnexpectedDiffPublishing.
public void testUnexpectedDiffPublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, event -> {
fail("Shouldn't send cluster state to myself");
}).setAsMaster();
MockNode nodeB = createMockNode("nodeB");
// Initial cluster state with both states - the second node still shouldn't
// get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();
ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromFull(nodeB.clusterState, clusterState);
// cluster state update - add block
previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
publishStateAndWait(nodeA.action, clusterState, previousClusterState);
assertSameStateFromDiff(nodeB.clusterState, clusterState);
}
use of org.elasticsearch.node.Node in project elasticsearch by elastic.
the class InternalTestCluster method fullRestart.
/**
* Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
*/
public synchronized void fullRestart(RestartCallback callback) throws Exception {
int numNodesRestarted = 0;
Map<Set<Role>, List<NodeAndClient>> nodesByRoles = new HashMap<>();
Set[] rolesOrderedByOriginalStartupOrder = new Set[nextNodeId.get()];
for (NodeAndClient nodeAndClient : nodes.values()) {
callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
logger.info("Stopping node [{}] ", nodeAndClient.name);
if (activeDisruptionScheme != null) {
activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
}
nodeAndClient.closeNode();
// delete data folders now, before we start other nodes that may claim it
nodeAndClient.clearDataIfNeeded(callback);
DiscoveryNode discoveryNode = getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode();
rolesOrderedByOriginalStartupOrder[nodeAndClient.nodeAndClientId] = discoveryNode.getRoles();
nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient);
}
assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == nodes.size();
// will still belong to data nodes
for (List<NodeAndClient> sameRoleNodes : nodesByRoles.values()) {
Collections.shuffle(sameRoleNodes, random);
}
List<NodeAndClient> startUpOrder = new ArrayList<>();
for (Set roles : rolesOrderedByOriginalStartupOrder) {
if (roles == null) {
// if some nodes were stopped, we want have a role for that ordinal
continue;
}
final List<NodeAndClient> nodesByRole = nodesByRoles.get(roles);
startUpOrder.add(nodesByRole.remove(0));
}
assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == 0;
// do two rounds to minimize pinging (mock zen pings pings with no delay and can create a lot of logs)
for (NodeAndClient nodeAndClient : startUpOrder) {
logger.info("resetting node [{}] ", nodeAndClient.name);
// we already cleared data folders, before starting nodes up
nodeAndClient.recreateNodeOnRestart(callback, false, autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1);
}
startAndPublishNodesAndClients(startUpOrder);
if (callback.validateClusterForming()) {
validateClusterFormed();
}
}
Aggregations