use of org.elasticsearch.test.gateway.TestGatewayAllocator in project elasticsearch by elastic.
the class DiskThresholdDeciderTests method testShardRelocationsTakenIntoAccount.
public void testShardRelocationsTakenIntoAccount() {
Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build();
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
// 60% used
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40));
// 60% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40));
// 60% used
usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 40));
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
// 14 bytes
shardSizesBuilder.put("[test][0][p]", 14L);
shardSizesBuilder.put("[test][0][r]", 14L);
// 1 bytes
shardSizesBuilder.put("[test2][0][p]", 1L);
shardSizesBuilder.put("[test2][0][r]", 1L);
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
DiskThresholdDecider decider = makeDecider(diskSettings);
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), decider)));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
@Override
public void addListener(Listener listener) {
// noop
}
};
AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test2")).build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// shards should be initializing
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
logger.info("--> start the shards");
clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
logShardStates(clusterState);
// Assert that we're able to start the primary and replicas
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
AllocationCommand relocate1 = new MoveAllocationCommand("test", 0, "node2", "node3");
AllocationCommands cmds = new AllocationCommands(relocate1);
clusterState = strategy.reroute(clusterState, cmds, false, false).getClusterState();
logShardStates(clusterState);
AllocationCommand relocate2 = new MoveAllocationCommand("test2", 0, "node2", "node3");
cmds = new AllocationCommands(relocate2);
try {
// The shard for the "test" index is already being relocated to
// node3, which will put it over the low watermark when it
// completes, with shard relocations taken into account this should
// throw an exception about not being able to complete
strategy.reroute(clusterState, cmds, false, false);
fail("should not have been able to reroute the shard");
} catch (IllegalArgumentException e) {
assertThat("can't be allocated because there isn't enough room: " + e.getMessage(), e.getMessage(), containsString("the node is above the low watermark cluster setting " + "[cluster.routing.allocation.disk.watermark.low=0.7], using more disk space than the maximum " + "allowed [70.0%], actual free: [26.0%]"));
}
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project elasticsearch by elastic.
the class FilterAllocationDeciderTests method testFilterInitialRecovery.
public void testFilterInitialRecovery() {
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(Settings.EMPTY, clusterSettings);
AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList(filterAllocationDecider, new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY)));
AllocationService service = new AllocationService(Settings.builder().build(), allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
ClusterState state = createInitialClusterState(service, Settings.builder().put("index.routing.allocation.initial_recovery._id", "node2").build());
RoutingTable routingTable = state.routingTable();
// we can initally only allocate on node2
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).currentNodeId(), "node2");
routingTable = service.applyFailedShard(state, routingTable.index("idx").shard(0).shards().get(0)).routingTable();
state = ClusterState.builder(state).routingTable(routingTable).build();
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
assertNull(routingTable.index("idx").shard(0).shards().get(0).currentNodeId());
// after failing the shard we are unassigned since the node is blacklisted and we can't initialize on the other node
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, 0, false);
allocation.debugDecision(true);
Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(), state.getRoutingNodes().node("node2"), allocation);
assertEquals(Type.YES, decision.type());
assertEquals("node passes include/exclude/require filters", decision.getExplanation());
ShardRouting primaryShard = routingTable.index("idx").shard(0).primaryShard();
decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(), state.getRoutingNodes().node("node1"), allocation);
assertEquals(Type.NO, decision.type());
if (primaryShard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
assertEquals("initial allocation of the shrunken index is only allowed on nodes [_id:\"node2\"] that " + "hold a copy of every shard in the index", decision.getExplanation());
} else {
assertEquals("initial allocation of the index is only allowed on nodes [_id:\"node2\"]", decision.getExplanation());
}
state = service.reroute(state, "try allocate again");
routingTable = state.routingTable();
assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node2");
state = service.applyStartedShards(state, routingTable.index("idx").shard(0).shardsWithState(INITIALIZING));
routingTable = state.routingTable();
// ok now we are started and can be allocated anywhere!! lets see...
// first create another copy
assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
state = service.applyStartedShards(state, routingTable.index("idx").shard(0).replicaShardsWithState(INITIALIZING));
routingTable = state.routingTable();
assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), STARTED);
assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
// now remove the node of the other copy and fail the current
DiscoveryNode node1 = state.nodes().resolveNode("node1");
state = service.deassociateDeadNodes(ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).remove("node1")).build(), true, "test");
state = service.applyFailedShard(state, routingTable.index("idx").shard(0).primaryShard());
// now bring back node1 and see it's assigned
state = service.reroute(ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).add(node1)).build(), "test");
routingTable = state.routingTable();
assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node1");
allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, 0, false);
allocation.debugDecision(true);
decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0), state.getRoutingNodes().node("node2"), allocation);
assertEquals(Type.YES, decision.type());
assertEquals("node passes include/exclude/require filters", decision.getExplanation());
decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0), state.getRoutingNodes().node("node1"), allocation);
assertEquals(Type.YES, decision.type());
assertEquals("node passes include/exclude/require filters", decision.getExplanation());
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project elasticsearch by elastic.
the class NodeVersionAllocationDeciderTests method testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNodes.
public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNodes() {
ShardId shard1 = new ShardId("test1", "_na_", 0);
ShardId shard2 = new ShardId("test2", "_na_", 0);
final DiscoveryNode newNode = new DiscoveryNode("newNode", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
final DiscoveryNode oldNode1 = new DiscoveryNode("oldNode1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion());
final DiscoveryNode oldNode2 = new DiscoveryNode("oldNode2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, VersionUtils.getPreviousVersion());
AllocationId allocationId1P = AllocationId.newInitializing();
AllocationId allocationId1R = AllocationId.newInitializing();
AllocationId allocationId2P = AllocationId.newInitializing();
AllocationId allocationId2R = AllocationId.newInitializing();
MetaData metaData = MetaData.builder().put(IndexMetaData.builder(shard1.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId1P.getId(), allocationId1R.getId()))).put(IndexMetaData.builder(shard2.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId2P.getId(), allocationId2R.getId()))).build();
RoutingTable routingTable = RoutingTable.builder().add(IndexRoutingTable.builder(shard1.getIndex()).addIndexShard(new IndexShardRoutingTable.Builder(shard1).addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.getId(), null, true, ShardRoutingState.STARTED, allocationId1P)).addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.getId(), null, false, ShardRoutingState.STARTED, allocationId1R)).build())).add(IndexRoutingTable.builder(shard2.getIndex()).addIndexShard(new IndexShardRoutingTable.Builder(shard2).addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.getId(), null, true, ShardRoutingState.STARTED, allocationId2P)).addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.getId(), null, false, ShardRoutingState.STARTED, allocationId2R)).build())).build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build();
AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Collections.singleton(new NodeVersionAllocationDecider(Settings.EMPTY)));
AllocationService strategy = new MockAllocationService(Settings.EMPTY, allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
state = strategy.reroute(state, new AllocationCommands(), true, false).getClusterState();
// the two indices must stay as is, the replicas cannot move to oldNode2 because versions don't match
assertThat(state.routingTable().index(shard2.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0));
assertThat(state.routingTable().index(shard1.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(0));
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project elasticsearch by elastic.
the class DiskThresholdDeciderTests method testForSingleDataNode.
public void testForSingleDataNode() {
Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build();
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
// 0% used
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100));
// 80% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 20));
// 0% used
usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 100));
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
// We have an index with 1 primary shards each taking 40 bytes. Each node has 100 bytes available
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
shardSizes.put("[test][0][p]", 40L);
shardSizes.put("[test][1][p]", 40L);
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes.build());
DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings);
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
logger.info("--> adding one master node, one data node");
DiscoveryNode discoveryNode1 = new DiscoveryNode("", "node1", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();
ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).nodes(discoveryNodes).build();
// Two shards consumes 80% of disk space in data node, but we have only one data node, shards should remain.
ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, true, ShardRoutingState.STARTED);
ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", null, true, ShardRoutingState.STARTED);
RoutingNode firstRoutingNode = new RoutingNode("node2", discoveryNode2, firstRouting, secondRouting);
RoutingTable.Builder builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), false);
routingAllocation.debugDecision(true);
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
// Two shards should start happily
assertThat(decision.type(), equalTo(Decision.Type.YES));
assertThat(((Decision.Single) decision).getExplanation(), containsString("there is only a single data node present"));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
@Override
public void addListener(Listener listener) {
}
};
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), diskThresholdDecider)));
AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
ClusterState result = strategy.reroute(clusterState, "reroute");
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), nullValue());
// Add another datanode, it should relocate.
logger.info("--> adding node3");
DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT);
ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(discoveryNode3)).build();
firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, true, ShardRoutingState.STARTED);
secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", true, ShardRoutingState.RELOCATING);
firstRoutingNode = new RoutingNode("node2", discoveryNode2, firstRouting, secondRouting);
builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build();
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), false);
routingAllocation.debugDecision(true);
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.YES));
assertThat(((Decision.Single) decision).getExplanation(), containsString("there is enough disk on this node for the shard to remain, free: [60b]"));
result = strategy.reroute(clusterState, "reroute");
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(RELOCATING));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node3"));
}
use of org.elasticsearch.test.gateway.TestGatewayAllocator in project elasticsearch by elastic.
the class TransportShrinkActionTests method testErrorCondition.
public void testErrorCondition() {
ClusterState state = createClusterState("source", randomIntBetween(2, 42), randomIntBetween(0, 10), Settings.builder().put("index.blocks.write", true).build());
assertTrue(expectThrows(IllegalStateException.class, () -> TransportShrinkAction.prepareCreateIndexRequest(new ShrinkRequest("target", "source"), state, (i) -> new DocsStats(Integer.MAX_VALUE, randomIntBetween(1, 1000)), new IndexNameExpressionResolver(Settings.EMPTY))).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards "));
assertTrue(expectThrows(IllegalStateException.class, () -> {
ShrinkRequest req = new ShrinkRequest("target", "source");
req.getShrinkIndexRequest().settings(Settings.builder().put("index.number_of_shards", 4));
ClusterState clusterState = createClusterState("source", 8, 1, Settings.builder().put("index.blocks.write", true).build());
TransportShrinkAction.prepareCreateIndexRequest(req, clusterState, (i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, randomIntBetween(1, 1000)) : null, new IndexNameExpressionResolver(Settings.EMPTY));
}).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards "));
// create one that won't fail
ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
// now we start the shard
routingTable = service.applyStartedShards(clusterState, routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
TransportShrinkAction.prepareCreateIndexRequest(new ShrinkRequest("target", "source"), clusterState, (i) -> new DocsStats(randomIntBetween(1, 1000), randomIntBetween(1, 1000)), new IndexNameExpressionResolver(Settings.EMPTY));
}
Aggregations