use of org.elasticsearch.test.InternalTestCluster in project crate by crate.
the class InternalTestClusterTests method testDataFolderAssignmentAndCleaning.
public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException {
long clusterSeed = randomLong();
boolean masterNodes = randomBoolean();
// we need one stable node
final int minNumDataNodes = 2;
final int maxNumDataNodes = 2;
final int numClientNodes = randomIntBetween(0, 2);
final String clusterName1 = "shared1";
String transportClient = getTestTransportType();
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()).putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file").putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()).build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
@Override
public Settings transportClientSettings() {
return Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build();
}
};
String nodePrefix = "test";
Path baseDir = createTempDir();
InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins());
try {
cluster.beforeTest(random());
final int originalMasterCount = cluster.numMasterNodes();
final Map<String, Path[]> shardNodePaths = new HashMap<>();
for (String name : cluster.getNodeNames()) {
shardNodePaths.put(name, getNodePaths(cluster, name));
}
String poorNode = randomValueOtherThanMany(n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()), () -> randomFrom(cluster.getNodeNames()));
Path dataPath = getNodePaths(cluster, poorNode)[0];
final Settings poorNodeDataPathSettings = cluster.dataPathSettings(poorNode);
final Path testMarker = dataPath.resolve("testMarker");
Files.createDirectories(testMarker);
cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode));
// stopping a node half way shouldn't clean data
assertFileExists(testMarker);
final String stableNode = randomFrom(cluster.getNodeNames());
final Path stableDataPath = getNodePaths(cluster, stableNode)[0];
final Path stableTestMarker = stableDataPath.resolve("stableTestMarker");
assertThat(stableDataPath, not(dataPath));
Files.createDirectories(stableTestMarker);
final String newNode1 = cluster.startNode();
assertThat(getNodePaths(cluster, newNode1)[0], not(dataPath));
// starting a node should re-use data folders and not clean it
assertFileExists(testMarker);
final String newNode2 = cluster.startNode();
final Path newDataPath = getNodePaths(cluster, newNode2)[0];
final Path newTestMarker = newDataPath.resolve("newTestMarker");
assertThat(newDataPath, not(dataPath));
Files.createDirectories(newTestMarker);
final String newNode3 = cluster.startNode(poorNodeDataPathSettings);
assertThat(getNodePaths(cluster, newNode3)[0], equalTo(dataPath));
cluster.beforeTest(random());
// the cluster should be reset for a new test, cleaning up the extra path we made
assertFileNotExists(newTestMarker);
// a new unknown node used this path, it should be cleaned
assertFileNotExists(testMarker);
// but leaving the structure of existing, reused nodes
assertFileExists(stableTestMarker);
for (String name : cluster.getNodeNames()) {
assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), equalTo(shardNodePaths.get(name)));
}
cluster.beforeTest(random());
// but leaving the structure of existing, reused nodes
assertFileExists(stableTestMarker);
for (String name : cluster.getNodeNames()) {
assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), equalTo(shardNodePaths.get(name)));
}
} finally {
cluster.close();
}
}
use of org.elasticsearch.test.InternalTestCluster in project elasticsearch by elastic.
the class ClusterInfoServiceIT method testClusterInfoServiceInformationClearOnError.
public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException {
internalCluster().startNodes(2, // manually control publishing
Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build());
prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get();
ensureGreen("test");
InternalTestCluster internalTestCluster = internalCluster();
InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName());
// get one healthy sample
ClusterInfo info = infoService.refresh();
assertNotNull("failed to collect info", info);
assertThat("some usages are populated", info.getNodeLeastAvailableDiskUsages().size(), Matchers.equalTo(2));
assertThat("some shard sizes are populated", info.shardSizes.size(), greaterThan(0));
MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, internalTestCluster.getMasterName());
final AtomicBoolean timeout = new AtomicBoolean(false);
final Set<String> blockedActions = newHashSet(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]");
// drop all outgoing stats requests to force a timeout.
for (DiscoveryNode node : internalTestCluster.clusterService().state().getNodes()) {
mockTransportService.addDelegate(internalTestCluster.getInstance(TransportService.class, node.getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (blockedActions.contains(action)) {
if (timeout.get()) {
logger.info("dropping [{}] to [{}]", action, node);
return;
}
}
super.sendRequest(connection, requestId, action, request, options);
}
});
}
// timeouts shouldn't clear the info
timeout.set(true);
info = infoService.refresh();
assertNotNull("info should not be null", info);
// node info will time out both on the request level on the count down latch. this means
// it is likely to update the node disk usage based on the one response that came be from local
// node.
assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThanOrEqualTo(1));
assertThat(info.getNodeMostAvailableDiskUsages().size(), greaterThanOrEqualTo(1));
// indices is guaranteed to time out on the latch, not updating anything.
assertThat(info.shardSizes.size(), greaterThan(1));
// now we cause an exception
timeout.set(false);
ActionFilters actionFilters = internalTestCluster.getInstance(ActionFilters.class, internalTestCluster.getMasterName());
BlockingActionFilter blockingActionFilter = null;
for (ActionFilter filter : actionFilters.filters()) {
if (filter instanceof BlockingActionFilter) {
blockingActionFilter = (BlockingActionFilter) filter;
break;
}
}
assertNotNull("failed to find BlockingActionFilter", blockingActionFilter);
blockingActionFilter.blockActions(blockedActions.toArray(Strings.EMPTY_ARRAY));
info = infoService.refresh();
assertNotNull("info should not be null", info);
assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(0));
assertThat(info.getNodeMostAvailableDiskUsages().size(), equalTo(0));
assertThat(info.shardSizes.size(), equalTo(0));
// check we recover
blockingActionFilter.blockActions();
info = infoService.refresh();
assertNotNull("info should not be null", info);
assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(2));
assertThat(info.getNodeMostAvailableDiskUsages().size(), equalTo(2));
assertThat(info.shardSizes.size(), greaterThan(0));
}
use of org.elasticsearch.test.InternalTestCluster in project elasticsearch by elastic.
the class ClusterInfoServiceIT method testClusterInfoServiceCollectsInformation.
public void testClusterInfoServiceCollectsInformation() throws Exception {
internalCluster().startNodes(2);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build()));
ensureGreen("test");
InternalTestCluster internalTestCluster = internalCluster();
// Get the cluster info service on the master node
final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName());
infoService.setUpdateFrequency(TimeValue.timeValueMillis(200));
infoService.onMaster();
ClusterInfo info = infoService.refresh();
assertNotNull("info should not be null", info);
ImmutableOpenMap<String, DiskUsage> leastUsages = info.getNodeLeastAvailableDiskUsages();
ImmutableOpenMap<String, DiskUsage> mostUsages = info.getNodeMostAvailableDiskUsages();
ImmutableOpenMap<String, Long> shardSizes = info.shardSizes;
assertNotNull(leastUsages);
assertNotNull(shardSizes);
assertThat("some usages are populated", leastUsages.values().size(), Matchers.equalTo(2));
assertThat("some shard sizes are populated", shardSizes.values().size(), greaterThan(0));
for (ObjectCursor<DiskUsage> usage : leastUsages.values()) {
logger.info("--> usage: {}", usage.value);
assertThat("usage has be retrieved", usage.value.getFreeBytes(), greaterThan(0L));
}
for (ObjectCursor<DiskUsage> usage : mostUsages.values()) {
logger.info("--> usage: {}", usage.value);
assertThat("usage has be retrieved", usage.value.getFreeBytes(), greaterThan(0L));
}
for (ObjectCursor<Long> size : shardSizes.values()) {
logger.info("--> shard size: {}", size.value);
assertThat("shard size is greater than 0", size.value, greaterThanOrEqualTo(0L));
}
ClusterService clusterService = internalTestCluster.getInstance(ClusterService.class, internalTestCluster.getMasterName());
ClusterState state = clusterService.state();
for (ShardRouting shard : state.routingTable().allShards()) {
String dataPath = info.getDataPath(shard);
assertNotNull(dataPath);
String nodeId = shard.currentNodeId();
DiscoveryNode discoveryNode = state.getNodes().get(nodeId);
IndicesService indicesService = internalTestCluster.getInstance(IndicesService.class, discoveryNode.getName());
IndexService indexService = indicesService.indexService(shard.index());
IndexShard indexShard = indexService.getShardOrNull(shard.id());
assertEquals(indexShard.shardPath().getRootDataPath().toString(), dataPath);
}
}
use of org.elasticsearch.test.InternalTestCluster in project elasticsearch by elastic.
the class TribeIT method testClusterStateNodes.
/**
* Test that the tribe node's cluster state correctly reflect the number of nodes
* of the remote clusters the tribe node is connected to.
*/
public void testClusterStateNodes() throws Exception {
List<Predicate<InternalTestCluster>> predicates = Arrays.asList(NONE, CLUSTER1_ONLY, CLUSTER2_ONLY, ALL);
Collections.shuffle(predicates, random());
for (Predicate<InternalTestCluster> predicate : predicates) {
try (Releasable tribeNode = startTribeNode(predicate, Settings.EMPTY)) {
assertNodes(predicate);
}
}
}
use of org.elasticsearch.test.InternalTestCluster in project elasticsearch by elastic.
the class TribeIT method startRemoteClusters.
@Before
public void startRemoteClusters() {
final int minNumDataNodes = 2;
final int maxNumDataNodes = 4;
final NodeConfigurationSource nodeConfigurationSource = getNodeConfigSource();
final Collection<Class<? extends Plugin>> plugins = nodePlugins();
if (cluster1 == null) {
cluster1 = new InternalTestCluster(randomLong(), createTempDir(), true, true, minNumDataNodes, maxNumDataNodes, UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_1", plugins, Function.identity());
}
if (cluster2 == null) {
cluster2 = new InternalTestCluster(randomLong(), createTempDir(), true, true, minNumDataNodes, maxNumDataNodes, UUIDs.randomBase64UUID(random()), nodeConfigurationSource, 0, false, "cluster_2", plugins, Function.identity());
}
doWithAllClusters(c -> {
try {
c.beforeTest(random(), 0.1);
c.ensureAtLeastNumDataNodes(minNumDataNodes);
} catch (Exception e) {
throw new RuntimeException("Failed to set up remote cluster [" + c.getClusterName() + "]", e);
}
});
}
Aggregations