use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class UnicastZenPingTests method testUnknownHostNotCached.
public void testUnknownHostNotCached() throws ExecutionException, InterruptedException {
// use ephemeral ports
final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build();
final NetworkService networkService = new NetworkService(settings, Collections.emptyList());
final Map<String, TransportAddress[]> addresses = new HashMap<>();
final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(s, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, v) {
@Override
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
final TransportAddress[] transportAddresses = addresses.get(address);
if (transportAddresses == null) {
throw new UnknownHostException(address);
} else {
return transportAddresses;
}
}
};
final NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier);
closeables.push(handleA.transportService);
final NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier);
closeables.push(handleB.transportService);
final NetworkHandle handleC = startServices(settings, threadPool, "UZP_C", Version.CURRENT, supplier);
closeables.push(handleC.transportService);
addresses.put("UZP_A", new TransportAddress[] { new TransportAddress(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())) });
addresses.put("UZP_C", new TransportAddress[] { new TransportAddress(new InetSocketAddress(handleC.address.address().getAddress(), handleC.address.address().getPort())) });
final Settings hostsSettings = Settings.builder().putArray("discovery.zen.ping.unicast.hosts", "UZP_A", "UZP_B", "UZP_C").put("cluster.name", "test").build();
final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build();
final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);
zenPingA.start(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build();
}
@Override
public ClusterState clusterState() {
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
}
});
closeables.push(zenPingA);
TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);
zenPingB.start(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
}
@Override
public ClusterState clusterState() {
return state;
}
});
closeables.push(zenPingB);
TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, EMPTY_HOSTS_PROVIDER);
zenPingC.start(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build();
}
@Override
public ClusterState clusterState() {
return state;
}
});
closeables.push(zenPingC);
// the presence of an unresolvable host should not prevent resolvable hosts from being pinged
{
final Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();
assertThat(pingResponses.size(), equalTo(1));
ZenPing.PingResponse ping = pingResponses.iterator().next();
assertThat(ping.node().getId(), equalTo("UZP_C"));
assertThat(ping.getClusterStateVersion(), equalTo(state.version()));
assertPingCount(handleA, handleB, 0);
assertPingCount(handleA, handleC, 3);
assertNull(handleA.counters.get(handleB.address));
}
final HashMap<TransportAddress, Integer> moreThan = new HashMap<>();
// we should see at least one ping to UZP_B, and one more ping than we have already seen to UZP_C
moreThan.put(handleB.address, 0);
moreThan.put(handleC.address, handleA.counters.get(handleC.address).intValue());
// now allow UZP_B to be resolvable
addresses.put("UZP_B", new TransportAddress[] { new TransportAddress(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())) });
// now we should see pings to UZP_B; this establishes that host resolutions are not cached
{
handleA.counters.clear();
final Collection<ZenPing.PingResponse> secondPingResponses = zenPingA.pingAndWait().toList();
assertThat(secondPingResponses.size(), equalTo(2));
final Set<String> ids = new HashSet<>(secondPingResponses.stream().map(p -> p.node().getId()).collect(Collectors.toList()));
assertThat(ids, equalTo(new HashSet<>(Arrays.asList("UZP_B", "UZP_C"))));
assertPingCount(handleA, handleB, 3);
assertPingCount(handleA, handleC, 3);
}
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class ZenDiscoveryUnitTests method testNodesUpdatedAfterClusterStatePublished.
public void testNodesUpdatedAfterClusterStatePublished() throws Exception {
ThreadPool threadPool = new TestThreadPool(getClass().getName());
// randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure
int minMasterNodes = randomBoolean() ? 3 : 1;
Settings settings = Settings.builder().put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build();
ArrayDeque<Closeable> toClose = new ArrayDeque<>();
try {
Set<DiscoveryNode> expectedFDNodes = null;
final MockTransportService masterTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null);
masterTransport.start();
DiscoveryNode masterNode = masterTransport.getLocalNode();
toClose.addFirst(masterTransport);
ClusterState state = ClusterStateCreationUtils.state(masterNode, masterNode, masterNode);
// build the zen discovery and cluster service
ClusterService masterClusterService = createClusterService(threadPool, masterNode);
toClose.addFirst(masterClusterService);
// TODO: clustername shouldn't be stored twice in cluster service, but for now, work around it
state = ClusterState.builder(masterClusterService.getClusterName()).nodes(state.nodes()).build();
setState(masterClusterService, state);
ZenDiscovery masterZen = buildZenDiscovery(settings, masterTransport, masterClusterService, threadPool);
toClose.addFirst(masterZen);
masterTransport.acceptIncomingRequests();
final MockTransportService otherTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null);
otherTransport.start();
toClose.addFirst(otherTransport);
DiscoveryNode otherNode = otherTransport.getLocalNode();
final ClusterState otherState = ClusterState.builder(masterClusterService.getClusterName()).nodes(DiscoveryNodes.builder().add(otherNode).localNodeId(otherNode.getId())).build();
ClusterService otherClusterService = createClusterService(threadPool, masterNode);
toClose.addFirst(otherClusterService);
setState(otherClusterService, otherState);
ZenDiscovery otherZen = buildZenDiscovery(settings, otherTransport, otherClusterService, threadPool);
toClose.addFirst(otherZen);
otherTransport.acceptIncomingRequests();
masterTransport.connectToNode(otherNode);
otherTransport.connectToNode(masterNode);
// a new cluster state with a new discovery node (we will test if the cluster state
// was updated by the presence of this node in NodesFaultDetection)
ClusterState newState = ClusterState.builder(masterClusterService.state()).incrementVersion().nodes(DiscoveryNodes.builder(state.nodes()).add(otherNode).masterNodeId(masterNode.getId())).build();
try {
// publishing a new cluster state
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state);
AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1);
expectedFDNodes = masterZen.getFaultDetectionNodes();
masterZen.publish(clusterChangedEvent, listener);
listener.await(1, TimeUnit.HOURS);
// publish was a success, update expected FD nodes based on new cluster state
expectedFDNodes = fdNodesForState(newState, masterNode);
} catch (Discovery.FailedToCommitClusterStateException e) {
// not successful, so expectedFDNodes above should remain what it was originally assigned
// ensure min master nodes is the higher value, otherwise we shouldn't fail
assertEquals(3, minMasterNodes);
}
assertEquals(expectedFDNodes, masterZen.getFaultDetectionNodes());
} finally {
IOUtils.close(toClose);
terminate(threadPool);
}
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class IndexingMemoryControllerTests method testDeletesAloneCanTriggerRefresh.
// #10312
public void testDeletesAloneCanTriggerRefresh() throws Exception {
createIndex("index", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).put("index.refresh_interval", -1).build());
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService(resolveIndex("index"));
IndexShard shard = indexService.getShardOrNull(0);
assertNotNull(shard);
for (int i = 0; i < 100; i++) {
String id = Integer.toString(i);
client().prepareIndex("index", "type", id).setSource("field", "value").get();
}
// Force merge so we know all merges are done before we start deleting:
ForceMergeResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
assertNoFailures(r);
// Make a shell of an IMC to check up on indexing buffer usage:
Settings settings = Settings.builder().put("indices.memory.index_buffer_size", "1kb").build();
// TODO: would be cleaner if I could pass this 1kb setting to the single node this test created....
IndexingMemoryController imc = new IndexingMemoryController(settings, null, null) {
@Override
protected List<IndexShard> availableShards() {
return Collections.singletonList(shard);
}
@Override
protected long getIndexBufferRAMBytesUsed(IndexShard shard) {
return shard.getIndexBufferRAMBytesUsed();
}
@Override
protected void writeIndexingBufferAsync(IndexShard shard) {
// just do it sync'd for this test
shard.writeIndexingBuffer();
}
@Override
protected Cancellable scheduleTask(ThreadPool threadPool) {
return null;
}
};
for (int i = 0; i < 100; i++) {
String id = Integer.toString(i);
client().prepareDelete("index", "type", id).get();
}
final long indexingBufferBytes1 = shard.getIndexBufferRAMBytesUsed();
imc.forceCheck();
// We must assertBusy because the writeIndexingBufferAsync is done in background (REFRESH) thread pool:
assertBusy(new Runnable() {
@Override
public void run() {
try (Engine.Searcher s2 = shard.acquireSearcher("index")) {
// 100 buffered deletes will easily exceed our 1 KB indexing buffer so it should trigger a write:
final long indexingBufferBytes2 = shard.getIndexBufferRAMBytesUsed();
assertTrue(indexingBufferBytes2 < indexingBufferBytes1);
}
}
});
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class IndicesClusterStateServiceRandomUpdatesTests method createIndicesClusterStateService.
private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNode discoveryNode, final Supplier<MockIndicesService> indicesServiceSupplier) {
final ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.generic()).thenReturn(mock(ExecutorService.class));
final MockIndicesService indicesService = indicesServiceSupplier.get();
final Settings settings = Settings.builder().put("node.name", discoveryNode.getName()).build();
final TransportService transportService = new TransportService(settings, null, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null);
final ClusterService clusterService = mock(ClusterService.class);
final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService, transportService, null);
final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(settings, threadPool, transportService, null, clusterService);
final ShardStateAction shardStateAction = mock(ShardStateAction.class);
return new IndicesClusterStateService(settings, indicesService, clusterService, threadPool, recoveryTargetService, shardStateAction, null, repositoriesService, null, null, null, null, shardId -> {
});
}
use of org.elasticsearch.threadpool.ThreadPool in project elasticsearch by elastic.
the class ResourceWatcherServiceTests method testHandle.
public void testHandle() throws Exception {
ThreadPool threadPool = new TestThreadPool("test");
Settings settings = Settings.builder().build();
ResourceWatcherService service = new ResourceWatcherService(settings, threadPool);
ResourceWatcher watcher = new ResourceWatcher() {
@Override
public void init() {
}
@Override
public void checkAndNotify() {
}
};
// checking default freq
WatcherHandle handle = service.add(watcher);
assertThat(handle, notNullValue());
assertThat(handle.frequency(), equalTo(ResourceWatcherService.Frequency.MEDIUM));
assertThat(service.lowMonitor.watchers.size(), is(0));
assertThat(service.highMonitor.watchers.size(), is(0));
assertThat(service.mediumMonitor.watchers.size(), is(1));
handle.stop();
assertThat(service.mediumMonitor.watchers.size(), is(0));
handle.resume();
assertThat(service.mediumMonitor.watchers.size(), is(1));
handle.stop();
// checking custom freq
handle = service.add(watcher, ResourceWatcherService.Frequency.HIGH);
assertThat(handle, notNullValue());
assertThat(handle.frequency(), equalTo(ResourceWatcherService.Frequency.HIGH));
assertThat(service.lowMonitor.watchers.size(), is(0));
assertThat(service.mediumMonitor.watchers.size(), is(0));
assertThat(service.highMonitor.watchers.size(), is(1));
handle.stop();
assertThat(service.highMonitor.watchers.size(), is(0));
handle.resume();
assertThat(service.highMonitor.watchers.size(), is(1));
terminate(threadPool);
}
Aggregations