Search in sources :

Example 1 with DiskUsage

use of org.elasticsearch.cluster.DiskUsage in project elasticsearch by elastic.

the class FsProbe method stats.

public FsInfo stats(FsInfo previous, @Nullable ClusterInfo clusterInfo) throws IOException {
    if (!nodeEnv.hasNodeFile()) {
        return new FsInfo(System.currentTimeMillis(), null, new FsInfo.Path[0]);
    }
    NodePath[] dataLocations = nodeEnv.nodePaths();
    FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length];
    for (int i = 0; i < dataLocations.length; i++) {
        paths[i] = getFSInfo(dataLocations[i]);
    }
    FsInfo.IoStats ioStats = null;
    if (Constants.LINUX) {
        Set<Tuple<Integer, Integer>> devicesNumbers = new HashSet<>();
        for (int i = 0; i < dataLocations.length; i++) {
            if (dataLocations[i].majorDeviceNumber != -1 && dataLocations[i].minorDeviceNumber != -1) {
                devicesNumbers.add(Tuple.tuple(dataLocations[i].majorDeviceNumber, dataLocations[i].minorDeviceNumber));
            }
        }
        ioStats = ioStats(devicesNumbers, previous);
    }
    DiskUsage leastDiskEstimate = null;
    DiskUsage mostDiskEstimate = null;
    if (clusterInfo != null) {
        leastDiskEstimate = clusterInfo.getNodeLeastAvailableDiskUsages().get(nodeEnv.nodeId());
        mostDiskEstimate = clusterInfo.getNodeMostAvailableDiskUsages().get(nodeEnv.nodeId());
    }
    return new FsInfo(System.currentTimeMillis(), ioStats, paths, leastDiskEstimate, mostDiskEstimate);
}
Also used : NodePath(org.elasticsearch.env.NodeEnvironment.NodePath) DiskUsage(org.elasticsearch.cluster.DiskUsage) NodePath(org.elasticsearch.env.NodeEnvironment.NodePath) Tuple(org.elasticsearch.common.collect.Tuple) HashSet(java.util.HashSet)

Example 2 with DiskUsage

use of org.elasticsearch.cluster.DiskUsage in project elasticsearch by elastic.

the class DiskThresholdDecider method canRemain.

@Override
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
    if (shardRouting.currentNodeId().equals(node.nodeId()) == false) {
        throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]");
    }
    final ClusterInfo clusterInfo = allocation.clusterInfo();
    final ImmutableOpenMap<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
    final Decision decision = earlyTerminate(allocation, usages);
    if (decision != null) {
        return decision;
    }
    // subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk
    // since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check.
    final DiskUsage usage = getDiskUsage(node, allocation, usages, true);
    final String dataPath = clusterInfo.getDataPath(shardRouting);
    // If this node is already above the high threshold, the shard cannot remain (get it off!)
    final double freeDiskPercentage = usage.getFreeDiskAsPercentage();
    final long freeBytes = usage.getFreeBytes();
    if (logger.isTraceEnabled()) {
        logger.trace("node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
    }
    if (dataPath == null || usage.getPath().equals(dataPath) == false) {
        return allocation.decision(Decision.YES, NAME, "this shard is not allocated on the most utilized disk and can remain");
    }
    if (freeBytes < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
        if (logger.isDebugEnabled()) {
            logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain", diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
        }
        return allocation.decision(Decision.NO, NAME, "the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " + "and there is less than the required [%s] free space on node, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
    }
    if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdHigh()) {
        if (logger.isDebugEnabled()) {
            logger.debug("less than the required {}% free disk threshold ({}% free) on node {}, shard cannot remain", diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage, node.nodeId());
        }
        return allocation.decision(Decision.NO, NAME, "the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " + "and there is less than the required [%s%%] free disk on node, actual free: [%s%%]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage);
    }
    return allocation.decision(Decision.YES, NAME, "there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes));
}
Also used : ClusterInfo(org.elasticsearch.cluster.ClusterInfo) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) DiskUsage(org.elasticsearch.cluster.DiskUsage)

Example 3 with DiskUsage

use of org.elasticsearch.cluster.DiskUsage in project elasticsearch by elastic.

the class DiskThresholdDecider method freeDiskPercentageAfterShardAssigned.

/**
     * Given the DiskUsage for a node and the size of the shard, return the
     * percentage of free disk if the shard were to be allocated to the node.
     * @param usage A DiskUsage for the node to have space computed for
     * @param shardSize Size in bytes of the shard
     * @return Percentage of free space after the shard is assigned to the node
     */
double freeDiskPercentageAfterShardAssigned(DiskUsage usage, Long shardSize) {
    shardSize = (shardSize == null) ? 0 : shardSize;
    DiskUsage newUsage = new DiskUsage(usage.getNodeId(), usage.getNodeName(), usage.getPath(), usage.getTotalBytes(), usage.getFreeBytes() - shardSize);
    return newUsage.getFreeDiskAsPercentage();
}
Also used : DiskUsage(org.elasticsearch.cluster.DiskUsage)

Example 4 with DiskUsage

use of org.elasticsearch.cluster.DiskUsage in project elasticsearch by elastic.

the class DiskThresholdDeciderTests method testDiskThresholdWithAbsoluteSizes.

public void testDiskThresholdWithAbsoluteSizes() {
    Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "30b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "9b").build();
    ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
    // 90% used
    usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 10));
    // 90% used
    usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 10));
    // 40% used
    usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 60));
    // 20% used
    usagesBuilder.put("node4", new DiskUsage("node4", "n4", "/dev/null", 100, 80));
    // 15% used
    usagesBuilder.put("node5", new DiskUsage("node5", "n5", "/dev/null", 100, 85));
    ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
    ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
    // 10 bytes
    shardSizesBuilder.put("[test][0][p]", 10L);
    shardSizesBuilder.put("[test][0][r]", 10L);
    ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
    final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
    ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings))));
    ClusterInfoService cis = new ClusterInfoService() {

        @Override
        public ClusterInfo getClusterInfo() {
            logger.info("--> calling fake getClusterInfo");
            return clusterInfo;
        }

        @Override
        public void addListener(Listener listener) {
        // noop
        }
    };
    AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
    logger.info("--> adding node1 and node2 node");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Primary should initialize, even though both nodes are over the limit initialize
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
    String nodeWithPrimary, nodeWithoutPrimary;
    if (clusterState.getRoutingNodes().node("node1").size() == 1) {
        nodeWithPrimary = "node1";
        nodeWithoutPrimary = "node2";
    } else {
        nodeWithPrimary = "node2";
        nodeWithoutPrimary = "node1";
    }
    logger.info("--> nodeWithPrimary: {}", nodeWithPrimary);
    logger.info("--> nodeWithoutPrimary: {}", nodeWithoutPrimary);
    // Make node without the primary now habitable to replicas
    usagesBuilder = ImmutableOpenMap.builder(usages);
    // 65% used
    usagesBuilder.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", "/dev/null", 100, 35));
    usages = usagesBuilder.build();
    final ClusterInfo clusterInfo2 = new DevNullClusterInfo(usages, usages, shardSizes);
    cis = new ClusterInfoService() {

        @Override
        public ClusterInfo getClusterInfo() {
            logger.info("--> calling fake getClusterInfo");
            return clusterInfo2;
        }

        @Override
        public void addListener(Listener listener) {
        // noop
        }
    };
    strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Now the replica should be able to initialize
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
    logger.info("--> start the shards (primaries)");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logShardStates(clusterState);
    // Assert that we're able to start the primary and replica, since they were both initializing
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
    // Assert that node1 got a single shard (the primary), even though its disk usage is too high
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
    // Assert that node2 got a single shard (a replica)
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
    // Assert that one replica is still unassigned
    //assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
    logger.info("--> adding node3");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Assert that the replica is initialized now that node3 is available with enough space
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
    logger.info("--> start the shards (replicas)");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logShardStates(clusterState);
    // Assert that all replicas could be started
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    logger.info("--> changing decider settings");
    // Set the low threshold to 60 instead of 70
    // Set the high threshold to 70 instead of 80
    // node2 now should not have new shards allocated to it, but shards can remain
    diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "40b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "30b").build();
    deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings))));
    strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Shards remain started
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(3));
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    logger.info("--> changing settings again");
    // Set the low threshold to 50 instead of 60
    // Set the high threshold to 60 instead of 70
    // node2 now should not have new shards allocated to it, and shards cannot remain
    diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "50b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "40b").build();
    deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings))));
    strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Shards remain started
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(3));
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
    // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    logger.info("--> adding node4");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build();
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Shards remain started
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
    // One shard is relocating off of node1
    assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
    logger.info("--> apply INITIALIZING shards");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logShardStates(clusterState);
    // primary shard already has been relocated away
    assertThat(clusterState.getRoutingNodes().node(nodeWithPrimary).size(), equalTo(0));
    // node with increased space still has its shard
    assertThat(clusterState.getRoutingNodes().node(nodeWithoutPrimary).size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
    logger.info("--> adding node5");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node5"))).build();
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Shards remain started on node3 and node4
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
    // One shard is relocating off of node2 now
    assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(1));
    // Initializing on node5
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
    logger.info("--> apply INITIALIZING shards");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logger.info("--> final cluster state:");
    logShardStates(clusterState);
    // Node1 still has no shards because it has no space for them
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
    // Node5 is available now, so the shard is moved off of node2
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node5").size(), equalTo(1));
}
Also used : ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) Matchers.containsString(org.hamcrest.Matchers.containsString) DiskUsage(org.elasticsearch.cluster.DiskUsage) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) DiskThresholdSettings(org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) HashSet(java.util.HashSet) TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterInfoService(org.elasticsearch.cluster.ClusterInfoService) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) ClusterInfo(org.elasticsearch.cluster.ClusterInfo) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable)

Example 5 with DiskUsage

use of org.elasticsearch.cluster.DiskUsage in project elasticsearch by elastic.

the class DiskThresholdDeciderTests method testDiskThresholdWithShardSizes.

public void testDiskThresholdWithShardSizes() {
    Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "71%").build();
    ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
    // 69% used
    usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 31));
    // 99% used
    usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 1));
    ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
    ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
    // 10 bytes
    shardSizesBuilder.put("[test][0][p]", 10L);
    ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
    final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
    AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), makeDecider(diskSettings))));
    ClusterInfoService cis = new ClusterInfoService() {

        @Override
        public ClusterInfo getClusterInfo() {
            logger.info("--> calling fake getClusterInfo");
            return clusterInfo;
        }

        @Override
        public void addListener(Listener listener) {
        // noop
        }
    };
    AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)).build();
    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
    logger.info("--> adding node1");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(// node2 is added because DiskThresholdDecider automatically ignore single-node clusters
    newNode("node2"))).build();
    routingTable = strategy.reroute(clusterState, "reroute").routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    logger.info("--> start the shards (primaries)");
    routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    logShardStates(clusterState);
    // Shard can't be allocated to node1 (or node2) because it would cause too much usage
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
    // No shards are started, no nodes have enough disk for allocation
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0));
}
Also used : TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) ClusterInfoService(org.elasticsearch.cluster.ClusterInfoService) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) Matchers.containsString(org.hamcrest.Matchers.containsString) DiskUsage(org.elasticsearch.cluster.DiskUsage) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) ClusterInfo(org.elasticsearch.cluster.ClusterInfo) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) DiskThresholdSettings(org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService)

Aggregations

DiskUsage (org.elasticsearch.cluster.DiskUsage)36 ClusterInfo (org.elasticsearch.cluster.ClusterInfo)24 Matchers.containsString (org.hamcrest.Matchers.containsString)22 ImmutableOpenMap (org.elasticsearch.common.collect.ImmutableOpenMap)21 ClusterState (org.elasticsearch.cluster.ClusterState)19 ClusterSettings (org.elasticsearch.common.settings.ClusterSettings)19 RoutingTable (org.elasticsearch.cluster.routing.RoutingTable)18 ClusterInfoService (org.elasticsearch.cluster.ClusterInfoService)14 RoutingNode (org.elasticsearch.cluster.routing.RoutingNode)14 Settings (org.elasticsearch.common.settings.Settings)14 IndexRoutingTable (org.elasticsearch.cluster.routing.IndexRoutingTable)13 IndexShardRoutingTable (org.elasticsearch.cluster.routing.IndexShardRoutingTable)13 AllocationService (org.elasticsearch.cluster.routing.allocation.AllocationService)13 DiskThresholdSettings (org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings)13 BalancedShardsAllocator (org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator)13 TestGatewayAllocator (org.elasticsearch.test.gateway.TestGatewayAllocator)13 IndexMetadata (org.elasticsearch.cluster.metadata.IndexMetadata)10 ShardRouting (org.elasticsearch.cluster.routing.ShardRouting)10 Test (org.junit.Test)10 DevNullClusterInfo (org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo)9