use of org.elasticsearch.test.gateway.TestGatewayAllocator in project elasticsearch by elastic.
the class ClusterStateHealthTests method testClusterHealthWaitsForClusterStateApplication.
public void testClusterHealthWaitsForClusterStateApplication() throws InterruptedException, ExecutionException {
final CountDownLatch applyLatch = new CountDownLatch(1);
final CountDownLatch listenerCalled = new CountDownLatch(1);
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null)).build());
clusterService.addStateApplier(event -> {
listenerCalled.countDown();
try {
applyLatch.await();
} catch (InterruptedException e) {
logger.debug("interrupted", e);
}
});
logger.info("--> submit task to restore master");
clusterService.submitStateUpdateTask("restore master", new LocalClusterUpdateTask() {
@Override
public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) throws Exception {
return newState(ClusterState.builder(currentState).nodes(DiscoveryNodes.builder(currentState.nodes()).masterNodeId(currentState.nodes().getLocalNodeId())).build());
}
@Override
public void onFailure(String source, Exception e) {
logger.warn("unexpected failure", e);
}
});
logger.info("--> waiting for listener to be called and cluster state being blocked");
listenerCalled.await();
TransportClusterHealthAction action = new TransportClusterHealthAction(Settings.EMPTY, transportService, clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, new TestGatewayAllocator());
PlainActionFuture<ClusterHealthResponse> listener = new PlainActionFuture<>();
action.execute(new ClusterHealthRequest().waitForGreenStatus(), listener);
assertFalse(listener.isDone());
logger.info("--> realising task to restore master");
applyLatch.countDown();
listener.get();
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project elasticsearch by elastic.
the class MetaDataCreateIndexServiceTests method testValidateShrinkIndex.
public void testValidateShrinkIndex() {
int numShards = randomIntBetween(2, 42);
ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), Settings.builder().put("index.blocks.write", true).build());
assertEquals("index [source] already exists", expectThrows(ResourceAlreadyExistsException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(state, "target", Collections.emptySet(), "source", Settings.EMPTY)).getMessage());
assertEquals("no such index", expectThrows(IndexNotFoundException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(state, "no such index", Collections.emptySet(), "target", Settings.EMPTY)).getMessage());
assertEquals("can't shrink an index with only one shard", expectThrows(IllegalArgumentException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(createClusterState("source", 1, 0, Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target", Settings.EMPTY)).getMessage());
assertEquals("the number of target shards must be less that the number of source shards", expectThrows(IllegalArgumentException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(createClusterState("source", 5, 0, Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target", Settings.builder().put("index.number_of_shards", 10).build())).getMessage());
assertEquals("index source must be read-only to shrink index. use \"index.blocks.write=true\"", expectThrows(IllegalStateException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY), "source", Collections.emptySet(), "target", Settings.EMPTY)).getMessage());
assertEquals("index source must have all shards allocated on the same node to shrink index", expectThrows(IllegalStateException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(state, "source", Collections.emptySet(), "target", Settings.EMPTY)).getMessage());
assertEquals("the number of source shards [8] must be a must be a multiple of [3]", expectThrows(IllegalArgumentException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(createClusterState("source", 8, randomIntBetween(0, 10), Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target", Settings.builder().put("index.number_of_shards", 3).build())).getMessage());
assertEquals("mappings are not allowed when shrinking indices, all mappings are copied from the source index", expectThrows(IllegalArgumentException.class, () -> {
MetaDataCreateIndexService.validateShrinkIndex(state, "source", Collections.singleton("foo"), "target", Settings.EMPTY);
}).getMessage());
// create one that won't fail
ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
// now we start the shard
routingTable = service.applyStartedShards(clusterState, routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
int targetShards;
do {
targetShards = randomIntBetween(1, numShards / 2);
} while (isShrinkable(numShards, targetShards) == false);
MetaDataCreateIndexService.validateShrinkIndex(clusterState, "source", Collections.emptySet(), "target", Settings.builder().put("index.number_of_shards", targetShards).build());
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project crate by crate.
the class DiskThresholdDeciderTests method testCanRemainWithShardRelocatingAway.
public void testCanRemainWithShardRelocatingAway() {
Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build();
// We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
// 80% used
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 20));
// 0% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 100));
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
shardSizesBuilder.put("[test][0][p]", 40L);
shardSizesBuilder.put("[test][1][p]", 40L);
shardSizesBuilder.put("[foo][0][p]", 10L);
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings);
Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "false"))).put(IndexMetadata.builder("foo").settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "false"))).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).addAsNew(metadata.index("foo")).build();
DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();
ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metadata(metadata).routingTable(initialRoutingTable).nodes(discoveryNodes).build();
// Two shards consuming each 80% of disk space while 70% is allowed, so shard 0 isn't allowed here
ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED);
RoutingNode firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
RoutingTable.Builder builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());
routingAllocation.debugDecision(true);
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.NO));
assertThat(decision.getExplanation(), containsString("the shard cannot remain on this node because it is above the high watermark cluster setting " + "[cluster.routing.allocation.disk.watermark.high=70%] and there is less than the required [30.0%] free disk on node, " + "actual free: [20.0%]"));
// Two shards consuming each 80% of disk space while 70% is allowed, but one is relocating, so shard 0 can stay
firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", "node2", true, ShardRoutingState.RELOCATING);
ShardRouting fooRouting = TestShardRouting.newShardRouting("foo", 0, null, true, ShardRoutingState.UNASSIGNED);
firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime());
routingAllocation.debugDecision(true);
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.YES));
assertEquals("there is enough disk on this node for the shard to remain, free: [60b]", decision.getExplanation());
decision = diskThresholdDecider.canAllocate(fooRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.NO));
if (fooRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE) {
assertThat(decision.getExplanation(), containsString("the node is above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=70%], using " + "more disk space than the maximum allowed [70.0%], actual free: [20.0%]"));
} else {
assertThat(decision.getExplanation(), containsString("the node is above the low watermark cluster setting [cluster.routing.allocation.disk.watermark.low=60%], using more " + "disk space than the maximum allowed [60.0%], actual free: [20.0%]"));
}
// Creating AllocationService instance and the services it depends on...
ClusterInfoService cis = () -> {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
};
AllocationDeciders deciders = new AllocationDeciders(new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), diskThresholdDecider)));
AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
// Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away
// and therefor we will have sufficient disk space on node1.
ClusterState result = strategy.reroute(clusterState, "reroute");
assertThat(result, equalTo(clusterState));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(RELOCATING));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node1"));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node2"));
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project crate by crate.
the class EnableAllocationShortCircuitTests method createAllocationService.
private static AllocationService createAllocationService(Settings.Builder settings, ClusterPlugin plugin) {
final ClusterSettings emptyClusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
List<AllocationDecider> deciders = new ArrayList<>(ClusterModule.createAllocationDeciders(settings.build(), emptyClusterSettings, Collections.singletonList(plugin)));
return new MockAllocationService(new AllocationDeciders(deciders), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project crate by crate.
the class NodeVersionAllocationDeciderTests method test_primary_and_replica_allocation_on_lower_hotfix_version_is_allowed.
@Test
public void test_primary_and_replica_allocation_on_lower_hotfix_version_is_allowed() {
ShardId shard1 = new ShardId("test1", UUIDs.randomBase64UUID(), 0);
final DiscoveryNode newNode1 = new DiscoveryNode("newNode1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.V_4_4_2);
final DiscoveryNode newNode2 = new DiscoveryNode("newNode2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.V_4_4_2);
final DiscoveryNode oldNode1 = new DiscoveryNode("oldNode1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.V_4_4_1);
final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.V_4_4_1);
AllocationId allocationId1P = AllocationId.newInitializing();
AllocationId allocationId1R = AllocationId.newInitializing();
Metadata metadata = Metadata.builder().put(IndexMetadata.builder(shard1.getIndexName()).settings(settings(Version.CURRENT).put(AutoExpandReplicas.SETTING.getKey(), "false").put(IndexMetadata.SETTING_INDEX_UUID, shard1.getIndex().getUUID()).put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Set.of(allocationId1P.getId(), allocationId1R.getId()))).build();
RoutingTable routingTable = RoutingTable.builder().add(IndexRoutingTable.builder(shard1.getIndex()).addIndexShard(new IndexShardRoutingTable.Builder(shard1).addShard(TestShardRouting.newShardRouting(shard1.getIndex(), shard1.getId(), newNode1.getId(), null, true, ShardRoutingState.STARTED, allocationId1P)).addShard(TestShardRouting.newShardRouting(shard1.getIndex(), shard1.getId(), newNode2.getId(), null, false, ShardRoutingState.STARTED, allocationId1R)).build())).build();
AllocationDeciders allocationDeciders = new AllocationDeciders(Collections.singleton(new NodeVersionAllocationDecider()));
AllocationService strategy = new MockAllocationService(allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
// Test downgrade to same MAJOR.MINOR but lower HOTFIX version
logger.trace("Downgrade 1st node");
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metadata(metadata).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(oldNode1).add(newNode2)).build();
state = stabilize(state, strategy);
assertThat(state.routingTable().index(shard1.getIndex()).shardsWithState(STARTED).size(), equalTo(2));
logger.trace("Downgrade 2nd node");
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder().add(oldNode1).add(oldNode2)).build();
state = stabilize(state, strategy);
assertThat(state.routingTable().index(shard1.getIndex()).shardsWithState(STARTED).size(), equalTo(2));
}
Aggregations