use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class Netty4ScheduledPingTests method testScheduledPing.
public void testScheduledPing() throws Exception {
ThreadPool threadPool = new TestThreadPool(getClass().getName());
Settings settings = Settings.builder().put(TcpTransport.PING_SCHEDULE.getKey(), "5ms").put(TransportSettings.PORT.getKey(), 0).put("cluster.name", "test").build();
CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList());
final Netty4Transport nettyA = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
serviceA.start();
serviceA.acceptIncomingRequests();
final Netty4Transport nettyB = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService);
MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
serviceB.start();
serviceB.acceptIncomingRequests();
DiscoveryNode nodeA = serviceA.getLocalDiscoNode();
DiscoveryNode nodeB = serviceB.getLocalDiscoNode();
serviceA.connectToNode(nodeB);
serviceB.connectToNode(nodeA);
assertBusy(new Runnable() {
@Override
public void run() {
assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(100L));
assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(100L));
}
});
assertThat(nettyA.getPing().getFailedPings(), equalTo(0L));
assertThat(nettyB.getPing().getFailedPings(), equalTo(0L));
serviceA.registerRequestHandler("sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, new TransportRequestHandler<TransportRequest.Empty>() {
@Override
public void messageReceived(TransportRequest.Empty request, TransportChannel channel) {
try {
channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.EMPTY);
} catch (IOException e) {
logger.error("Unexpected failure", e);
fail(e.getMessage());
}
}
});
int rounds = scaledRandomIntBetween(100, 5000);
for (int i = 0; i < rounds; i++) {
serviceB.submitRequest(nodeA, "sayHello", TransportRequest.Empty.INSTANCE, TransportRequestOptions.builder().withCompress(randomBoolean()).build(), new TransportResponseHandler<TransportResponse.Empty>() {
@Override
public TransportResponse.Empty newInstance() {
return TransportResponse.Empty.INSTANCE;
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void handleResponse(TransportResponse.Empty response) {
}
@Override
public void handleException(TransportException exp) {
logger.error("Unexpected failure", exp);
fail("got exception instead of a response: " + exp.getMessage());
}
}).txGet();
}
assertBusy(() -> {
assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(200L));
assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(200L));
});
assertThat(nettyA.getPing().getFailedPings(), equalTo(0L));
assertThat(nettyB.getPing().getFailedPings(), equalTo(0L));
Releasables.close(serviceA, serviceB);
terminate(threadPool);
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class TransportMultiSearchActionTests method testBatchExecute.
public void testBatchExecute() throws Exception {
// Initialize dependencies of TransportMultiSearchAction
Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build();
ActionFilters actionFilters = mock(ActionFilters.class);
when(actionFilters.filters()).thenReturn(new ActionFilter[0]);
ThreadPool threadPool = new ThreadPool(settings);
TaskManager taskManager = mock(TaskManager.class);
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null) {
@Override
public TaskManager getTaskManager() {
return taskManager;
}
};
ClusterService clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build());
IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY);
// Keep track of the number of concurrent searches started by multi search api,
// and if there are more searches than is allowed create an error and remember that.
int maxAllowedConcurrentSearches = scaledRandomIntBetween(1, 16);
AtomicInteger counter = new AtomicInteger();
AtomicReference<AssertionError> errorHolder = new AtomicReference<>();
// randomize whether or not requests are executed asynchronously
final List<String> threadPoolNames = Arrays.asList(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME);
Randomness.shuffle(threadPoolNames);
final ExecutorService commonExecutor = threadPool.executor(threadPoolNames.get(0));
final ExecutorService rarelyExecutor = threadPool.executor(threadPoolNames.get(1));
final Set<SearchRequest> requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>()));
TransportAction<SearchRequest, SearchResponse> searchAction = new TransportAction<SearchRequest, SearchResponse>(Settings.EMPTY, "action", threadPool, actionFilters, resolver, taskManager) {
@Override
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
requests.add(request);
int currentConcurrentSearches = counter.incrementAndGet();
if (currentConcurrentSearches > maxAllowedConcurrentSearches) {
errorHolder.set(new AssertionError("Current concurrent search [" + currentConcurrentSearches + "] is higher than is allowed [" + maxAllowedConcurrentSearches + "]"));
}
final ExecutorService executorService = rarely() ? rarelyExecutor : commonExecutor;
executorService.execute(() -> {
counter.decrementAndGet();
listener.onResponse(new SearchResponse());
});
}
};
TransportMultiSearchAction action = new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, 10);
// Execute the multi search api and fail if we find an error after executing:
try {
/*
* Allow for a large number of search requests in a single batch as previous implementations could stack overflow if the number
* of requests in a single batch was large
*/
int numSearchRequests = scaledRandomIntBetween(1, 8192);
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
multiSearchRequest.maxConcurrentSearchRequests(maxAllowedConcurrentSearches);
for (int i = 0; i < numSearchRequests; i++) {
multiSearchRequest.add(new SearchRequest());
}
MultiSearchResponse response = action.execute(multiSearchRequest).actionGet();
assertThat(response.getResponses().length, equalTo(numSearchRequests));
assertThat(requests.size(), equalTo(numSearchRequests));
assertThat(errorHolder.get(), nullValue());
} finally {
assertTrue(ESTestCase.terminate(threadPool));
}
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class ListenableActionFutureTests method testListenerIsCallableFromNetworkThreads.
public void testListenerIsCallableFromNetworkThreads() throws Throwable {
ThreadPool threadPool = new TestThreadPool("testListenerIsCallableFromNetworkThreads");
try {
final PlainListenableActionFuture<Object> future = new PlainListenableActionFuture<>(threadPool);
final CountDownLatch listenerCalled = new CountDownLatch(1);
final AtomicReference<Throwable> error = new AtomicReference<>();
final Object response = new Object();
future.addListener(new ActionListener<Object>() {
@Override
public void onResponse(Object o) {
listenerCalled.countDown();
}
@Override
public void onFailure(Exception e) {
error.set(e);
listenerCalled.countDown();
}
});
Thread networkThread = new Thread(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
error.set(e);
listenerCalled.countDown();
}
@Override
protected void doRun() throws Exception {
future.onResponse(response);
}
}, Transports.TEST_MOCK_TRANSPORT_THREAD_PREFIX + "_testListenerIsCallableFromNetworkThread");
networkThread.start();
networkThread.join();
listenerCalled.await();
if (error.get() != null) {
throw error.get();
}
} finally {
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class UnicastZenPingTests method testSimplePings.
public void testSimplePings() throws IOException, InterruptedException, ExecutionException {
// use ephemeral ports
final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build();
final Settings settingsMismatch = Settings.builder().put(settings).put("cluster.name", "mismatch").put(TransportSettings.PORT.getKey(), 0).build();
NetworkService networkService = new NetworkService(settings, Collections.emptyList());
final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(s, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, v) {
@Override
public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, CheckedBiConsumer<Connection, ConnectionProfile, IOException> connectionValidator) throws ConnectTransportException {
throw new AssertionError("zen pings should never connect to node (got [" + node + "])");
}
};
NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier);
closeables.push(handleA.transportService);
NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier);
closeables.push(handleB.transportService);
NetworkHandle handleC = startServices(settingsMismatch, threadPool, "UZP_C", Version.CURRENT, supplier);
closeables.push(handleC.transportService);
final Version versionD;
if (randomBoolean()) {
versionD = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT);
} else {
versionD = Version.CURRENT;
}
logger.info("UZP_D version set to [{}]", versionD);
NetworkHandle handleD = startServices(settingsMismatch, threadPool, "UZP_D", versionD, supplier);
closeables.push(handleD.transportService);
final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build();
final ClusterState stateMismatch = ClusterState.builder(new ClusterName("mismatch")).version(randomNonNegativeLong()).build();
Settings hostsSettings = Settings.builder().putArray("discovery.zen.ping.unicast.hosts", NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())), NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())), NetworkAddress.format(new InetSocketAddress(handleC.address.address().getAddress(), handleC.address.address().getPort())), NetworkAddress.format(new InetSocketAddress(handleD.address.address().getAddress(), handleD.address.address().getPort()))).put("cluster.name", "test").build();
Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build();
TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);
zenPingA.start(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build();
}
@Override
public ClusterState clusterState() {
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
}
});
closeables.push(zenPingA);
TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);
zenPingB.start(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
}
@Override
public ClusterState clusterState() {
return state;
}
});
closeables.push(zenPingB);
TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC, EMPTY_HOSTS_PROVIDER) {
@Override
protected Version getVersion() {
return versionD;
}
};
zenPingC.start(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build();
}
@Override
public ClusterState clusterState() {
return stateMismatch;
}
});
closeables.push(zenPingC);
TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD, EMPTY_HOSTS_PROVIDER);
zenPingD.start(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D").build();
}
@Override
public ClusterState clusterState() {
return stateMismatch;
}
});
closeables.push(zenPingD);
logger.info("ping from UZP_A");
Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();
assertThat(pingResponses.size(), equalTo(1));
ZenPing.PingResponse ping = pingResponses.iterator().next();
assertThat(ping.node().getId(), equalTo("UZP_B"));
assertThat(ping.getClusterStateVersion(), equalTo(state.version()));
assertPingCount(handleA, handleB, 3);
// mismatch, shouldn't ping
assertPingCount(handleA, handleC, 0);
// mismatch, shouldn't ping
assertPingCount(handleA, handleD, 0);
// ping again, this time from B,
logger.info("ping from UZP_B");
pingResponses = zenPingB.pingAndWait().toList();
assertThat(pingResponses.size(), equalTo(1));
ping = pingResponses.iterator().next();
assertThat(ping.node().getId(), equalTo("UZP_A"));
assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION));
assertPingCount(handleB, handleA, 3);
// mismatch, shouldn't ping
assertPingCount(handleB, handleC, 0);
// mismatch, shouldn't ping
assertPingCount(handleB, handleD, 0);
logger.info("ping from UZP_C");
pingResponses = zenPingC.pingAndWait().toList();
assertThat(pingResponses.size(), equalTo(1));
assertPingCount(handleC, handleA, 0);
assertPingCount(handleC, handleB, 0);
assertPingCount(handleC, handleD, 3);
logger.info("ping from UZP_D");
pingResponses = zenPingD.pingAndWait().toList();
assertThat(pingResponses.size(), equalTo(1));
assertPingCount(handleD, handleA, 0);
assertPingCount(handleD, handleB, 0);
assertPingCount(handleD, handleC, 3);
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class UnicastZenPingTests method startServices.
private NetworkHandle startServices(final Settings settings, final ThreadPool threadPool, final String nodeId, final Version version, final BiFunction<Settings, Version, Transport> supplier, final Set<Role> nodeRoles) {
final Settings nodeSettings = Settings.builder().put(settings).put("node.name", nodeId).put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "internal:discovery/zen/unicast").build();
final Transport transport = supplier.apply(nodeSettings, version);
final MockTransportService transportService = new MockTransportService(nodeSettings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> new DiscoveryNode(nodeId, nodeId, boundAddress.publishAddress(), emptyMap(), nodeRoles, version), null);
transportService.start();
transportService.acceptIncomingRequests();
final ConcurrentMap<TransportAddress, AtomicInteger> counters = ConcurrentCollections.newConcurrentMap();
transportService.addTracer(new MockTransportService.Tracer() {
@Override
public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
counters.computeIfAbsent(node.getAddress(), k -> new AtomicInteger());
counters.get(node.getAddress()).incrementAndGet();
}
});
return new NetworkHandle(transport.boundAddress().publishAddress(), transportService, transportService.getLocalNode(), counters);
}
Aggregations