Search in sources :

Example 66 with ClusterSettings

use of org.elasticsearch.common.settings.ClusterSettings in project elasticsearch by elastic.

the class ClusterServiceTests method testDisconnectFromNewlyAddedNodesIfClusterStatePublishingFails.

public void testDisconnectFromNewlyAddedNodesIfClusterStatePublishingFails() throws InterruptedException {
    TimedClusterService timedClusterService = new TimedClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool, () -> new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT));
    Set<DiscoveryNode> currentNodes = new HashSet<>();
    timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {

        @Override
        public void connectToNodes(DiscoveryNodes discoveryNodes) {
            discoveryNodes.forEach(currentNodes::add);
        }

        @Override
        public void disconnectFromNodesExcept(DiscoveryNodes nodesToKeep) {
            Set<DiscoveryNode> nodeSet = new HashSet<>();
            nodesToKeep.iterator().forEachRemaining(nodeSet::add);
            currentNodes.removeIf(node -> nodeSet.contains(node) == false);
        }
    });
    AtomicBoolean failToCommit = new AtomicBoolean();
    timedClusterService.setClusterStatePublisher((event, ackListener) -> {
        if (failToCommit.get()) {
            throw new Discovery.FailedToCommitClusterStateException("just to test this");
        }
    });
    timedClusterService.setDiscoverySettings(new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)));
    timedClusterService.start();
    ClusterState state = timedClusterService.state();
    final DiscoveryNodes nodes = state.nodes();
    final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
    state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
    setState(timedClusterService, state);
    assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes())));
    final CountDownLatch latch = new CountDownLatch(1);
    // try to add node when cluster state publishing fails
    failToCommit.set(true);
    timedClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            DiscoveryNode newNode = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
            return ClusterState.builder(currentState).nodes(DiscoveryNodes.builder(currentState.nodes()).add(newNode)).build();
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            latch.countDown();
        }

        @Override
        public void onFailure(String source, Exception e) {
            latch.countDown();
        }
    });
    latch.await();
    assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes())));
    timedClusterService.close();
}
Also used : Matchers.hasToString(org.hamcrest.Matchers.hasToString) Arrays(java.util.Arrays) Level(org.apache.logging.log4j.Level) ClusterBlocks(org.elasticsearch.cluster.block.ClusterBlocks) Matchers.hasKey(org.hamcrest.Matchers.hasKey) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Settings(org.elasticsearch.common.settings.Settings) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) After(org.junit.After) Map(java.util.Map) ThreadPool(org.elasticsearch.threadpool.ThreadPool) MockLogAppender(org.elasticsearch.test.MockLogAppender) ClusterStateObserver(org.elasticsearch.cluster.ClusterStateObserver) Releasable(org.elasticsearch.common.lease.Releasable) AfterClass(org.junit.AfterClass) CyclicBarrier(java.util.concurrent.CyclicBarrier) Priority(org.elasticsearch.common.Priority) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ClusterChangedEvent(org.elasticsearch.cluster.ClusterChangedEvent) Collectors(java.util.stream.Collectors) Sets(org.elasticsearch.common.util.set.Sets) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Version(org.elasticsearch.Version) Supplier(org.apache.logging.log4j.util.Supplier) Matchers.equalTo(org.hamcrest.Matchers.equalTo) Matchers.is(org.hamcrest.Matchers.is) Matchers.anyOf(org.hamcrest.Matchers.anyOf) Matchers.containsString(org.hamcrest.Matchers.containsString) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) BeforeClass(org.junit.BeforeClass) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) BaseFuture(org.elasticsearch.common.util.concurrent.BaseFuture) HashSet(java.util.HashSet) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterStateTaskListener(org.elasticsearch.cluster.ClusterStateTaskListener) TimeValue(org.elasticsearch.common.unit.TimeValue) ESTestCase(org.elasticsearch.test.ESTestCase) Before(org.junit.Before) Loggers(org.elasticsearch.common.logging.Loggers) Collections.emptyMap(java.util.Collections.emptyMap) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) Matchers.empty(org.hamcrest.Matchers.empty) Collections.emptySet(java.util.Collections.emptySet) Discovery(org.elasticsearch.discovery.Discovery) Semaphore(java.util.concurrent.Semaphore) DiscoverySettings(org.elasticsearch.discovery.DiscoverySettings) ClusterStateTaskConfig(org.elasticsearch.cluster.ClusterStateTaskConfig) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) ClusterStateTaskExecutor(org.elasticsearch.cluster.ClusterStateTaskExecutor) TimeUnit(java.util.concurrent.TimeUnit) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) LocalNodeMasterListener(org.elasticsearch.cluster.LocalNodeMasterListener) NodeConnectionsService(org.elasticsearch.cluster.NodeConnectionsService) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) LocalClusterUpdateTask(org.elasticsearch.cluster.LocalClusterUpdateTask) Tuple(org.elasticsearch.common.collect.Tuple) Collections(java.util.Collections) ClusterServiceUtils.setState(org.elasticsearch.test.ClusterServiceUtils.setState) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) NodeConnectionsService(org.elasticsearch.cluster.NodeConnectionsService) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Matchers.hasToString(org.hamcrest.Matchers.hasToString) Matchers.containsString(org.hamcrest.Matchers.containsString) DiscoverySettings(org.elasticsearch.discovery.DiscoverySettings) CountDownLatch(java.util.concurrent.CountDownLatch) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) HashSet(java.util.HashSet)

Example 67 with ClusterSettings

use of org.elasticsearch.common.settings.ClusterSettings in project elasticsearch by elastic.

the class ClusterServiceTests method createTimedClusterService.

TimedClusterService createTimedClusterService(boolean makeMaster) throws InterruptedException {
    TimedClusterService timedClusterService = new TimedClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool, () -> new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT));
    timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {

        @Override
        public void connectToNodes(DiscoveryNodes discoveryNodes) {
        // skip
        }

        @Override
        public void disconnectFromNodesExcept(DiscoveryNodes nodesToKeep) {
        // skip
        }
    });
    timedClusterService.setClusterStatePublisher((event, ackListener) -> {
    });
    timedClusterService.setDiscoverySettings(new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)));
    timedClusterService.start();
    ClusterState state = timedClusterService.state();
    final DiscoveryNodes nodes = state.nodes();
    final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(makeMaster ? nodes.getLocalNodeId() : null);
    state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
    setState(timedClusterService, state);
    return timedClusterService;
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) NodeConnectionsService(org.elasticsearch.cluster.NodeConnectionsService) DiscoverySettings(org.elasticsearch.discovery.DiscoverySettings) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes)

Example 68 with ClusterSettings

use of org.elasticsearch.common.settings.ClusterSettings in project elasticsearch by elastic.

the class RandomAllocationDeciderTests method testRandomDecisions.

/* This test will make random allocation decision on a growing and shrinking
     * cluster leading to a random distribution of the shards. After a certain
     * amount of iterations the test allows allocation unless the same shard is
     * already allocated on a node and balances the cluster to gain optimal
     * balance.*/
public void testRandomDecisions() {
    RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(random());
    AllocationService strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY), randomAllocationDecider))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
    int indices = scaledRandomIntBetween(1, 20);
    Builder metaBuilder = MetaData.builder();
    int maxNumReplicas = 1;
    int totalNumShards = 0;
    for (int i = 0; i < indices; i++) {
        int replicas = scaledRandomIntBetween(0, 6);
        maxNumReplicas = Math.max(maxNumReplicas, replicas + 1);
        int numShards = scaledRandomIntBetween(1, 20);
        totalNumShards += numShards * (replicas + 1);
        metaBuilder.put(IndexMetaData.builder("INDEX_" + i).settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(replicas));
    }
    MetaData metaData = metaBuilder.build();
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    for (int i = 0; i < indices; i++) {
        routingTableBuilder.addAsNew(metaData.index("INDEX_" + i));
    }
    RoutingTable initialRoutingTable = routingTableBuilder.build();
    ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
    int numIters = scaledRandomIntBetween(5, 15);
    int nodeIdCounter = 0;
    int atMostNodes = scaledRandomIntBetween(Math.max(1, maxNumReplicas), 15);
    final boolean frequentNodes = randomBoolean();
    AllocationService.CommandsResult routingResult;
    for (int i = 0; i < numIters; i++) {
        logger.info("Start iteration [{}]", i);
        ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
        DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
        if (clusterState.nodes().getSize() <= atMostNodes && (nodeIdCounter == 0 || (frequentNodes ? frequently() : rarely()))) {
            int numNodes = scaledRandomIntBetween(1, 3);
            for (int j = 0; j < numNodes; j++) {
                logger.info("adding node [{}]", nodeIdCounter);
                newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++)));
            }
        }
        boolean nodesRemoved = false;
        if (nodeIdCounter > 1 && rarely()) {
            int nodeId = scaledRandomIntBetween(0, nodeIdCounter - 2);
            final String node = "NODE_" + nodeId;
            boolean safeToRemove = true;
            RoutingNode routingNode = clusterState.getRoutingNodes().node(node);
            for (ShardRouting shard : routingNode != null ? routingNode : Collections.<ShardRouting>emptyList()) {
                if (shard.active() && shard.primary()) {
                    // make sure there is an active replica to prevent from going red
                    if (clusterState.routingTable().shardRoutingTable(shard.shardId()).activeShards().size() <= 1) {
                        safeToRemove = false;
                        break;
                    }
                }
            }
            if (safeToRemove) {
                logger.info("removing node [{}]", nodeId);
                newNodesBuilder.remove(node);
                nodesRemoved = true;
            } else {
                logger.debug("not removing node [{}] as it holds a primary with no replacement", nodeId);
            }
        }
        stateBuilder.nodes(newNodesBuilder.build());
        clusterState = stateBuilder.build();
        if (nodesRemoved) {
            clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute");
        } else {
            clusterState = strategy.reroute(clusterState, "reroute");
        }
        if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) {
            clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
        }
    }
    logger.info("Fill up nodes such that every shard can be allocated");
    if (clusterState.nodes().getSize() < maxNumReplicas) {
        ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
        DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
        for (int j = 0; j < (maxNumReplicas - clusterState.nodes().getSize()); j++) {
            logger.info("adding node [{}]", nodeIdCounter);
            newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++)));
        }
        stateBuilder.nodes(newNodesBuilder.build());
        clusterState = stateBuilder.build();
    }
    randomAllocationDecider.alwaysSayYes = true;
    logger.info("now say YES to everything");
    int iterations = 0;
    do {
        iterations++;
        clusterState = strategy.reroute(clusterState, "reroute");
        if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) {
            clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
        }
    } while (clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200);
    logger.info("Done Balancing after [{}] iterations. State:\n{}", iterations, clusterState);
    // we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong
    assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200));
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0));
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0));
    int shards = clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size();
    assertThat(shards, equalTo(totalNumShards));
    final int numNodes = clusterState.nodes().getSize();
    final int upperBound = (int) Math.round(((shards / numNodes) * 1.10));
    final int lowerBound = (int) Math.round(((shards / numNodes) * 0.90));
    for (int i = 0; i < nodeIdCounter; i++) {
        if (clusterState.getRoutingNodes().node("NODE_" + i) == null) {
            continue;
        }
        assertThat(clusterState.getRoutingNodes().node("NODE_" + i).size(), Matchers.anyOf(Matchers.anyOf(equalTo((shards / numNodes) + 1), equalTo((shards / numNodes) - 1), equalTo((shards / numNodes))), Matchers.allOf(Matchers.greaterThanOrEqualTo(lowerBound), Matchers.lessThanOrEqualTo(upperBound))));
    }
}
Also used : TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) Builder(org.elasticsearch.cluster.metadata.MetaData.Builder) AllocationDeciders(org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders) ReplicaAfterPrimaryActiveAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider) SameShardAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) HashSet(java.util.HashSet)

Example 69 with ClusterSettings

use of org.elasticsearch.common.settings.ClusterSettings in project elasticsearch by elastic.

the class DiskThresholdDeciderTests method testUnknownDiskUsage.

public void testUnknownDiskUsage() {
    Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.85).build();
    ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
    // 50% used
    usagesBuilder.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 50));
    // 100% used
    usagesBuilder.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 0));
    ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
    ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
    // 10 bytes
    shardSizesBuilder.put("[test][0][p]", 10L);
    // 10 bytes
    shardSizesBuilder.put("[test][0][r]", 10L);
    ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
    final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
    AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), makeDecider(diskSettings))));
    ClusterInfoService cis = new ClusterInfoService() {

        @Override
        public ClusterInfo getClusterInfo() {
            logger.info("--> calling fake getClusterInfo");
            return clusterInfo;
        }

        @Override
        public void addListener(Listener listener) {
        // noop
        }
    };
    AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)).build();
    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
    logger.info("--> adding node1");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(// node3 is added because DiskThresholdDecider automatically ignore single-node clusters
    newNode("node3"))).build();
    routingTable = strategy.reroute(clusterState, "reroute").routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    // Shard can be allocated to node1, even though it only has 25% free,
    // because it's a primary that's never been allocated before
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
    logger.info("--> start the shards (primaries)");
    routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    logShardStates(clusterState);
    // A single shard is started on node1, even though it normally would not
    // be allowed, because it's a primary that hasn't been allocated, and node1
    // is still below the high watermark (unlike node3)
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
}
Also used : TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) ClusterInfoService(org.elasticsearch.cluster.ClusterInfoService) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) Matchers.containsString(org.hamcrest.Matchers.containsString) DiskUsage(org.elasticsearch.cluster.DiskUsage) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) ClusterInfo(org.elasticsearch.cluster.ClusterInfo) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) DiskThresholdSettings(org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService)

Example 70 with ClusterSettings

use of org.elasticsearch.common.settings.ClusterSettings in project elasticsearch by elastic.

the class DiskThresholdDeciderTests method testDiskThreshold.

public void testDiskThreshold() {
    Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build();
    ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
    // 90% used
    usagesBuilder.put("node1", new DiskUsage("node1", "node1", "/dev/null", 100, 10));
    // 65% used
    usagesBuilder.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 35));
    // 40% used
    usagesBuilder.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 60));
    // 20% used
    usagesBuilder.put("node4", new DiskUsage("node4", "node4", "/dev/null", 100, 80));
    ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
    ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
    // 10 bytes
    shardSizesBuilder.put("[test][0][p]", 10L);
    shardSizesBuilder.put("[test][0][r]", 10L);
    ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
    final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
    ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings))));
    ClusterInfoService cis = new ClusterInfoService() {

        @Override
        public ClusterInfo getClusterInfo() {
            logger.info("--> calling fake getClusterInfo");
            return clusterInfo;
        }

        @Override
        public void addListener(Listener listener) {
        // noop
        }
    };
    AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
    final RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build();
    logger.info("--> adding two nodes");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Primary shard should be initializing, replica should not
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
    logger.info("--> start the shards (primaries)");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logShardStates(clusterState);
    // Assert that we're able to start the primary
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
    // Assert that node1 didn't get any shards because its disk usage is too high
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
    logger.info("--> start the shards (replicas)");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logShardStates(clusterState);
    // Assert that the replica couldn't be started since node1 doesn't have enough space
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
    logger.info("--> adding node3");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Assert that the replica is initialized now that node3 is available with enough space
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
    logger.info("--> start the shards (replicas)");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logShardStates(clusterState);
    // Assert that the replica couldn't be started since node1 doesn't have enough space
    assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    logger.info("--> changing decider settings");
    // Set the low threshold to 60 instead of 70
    // Set the high threshold to 70 instead of 80
    // node2 now should not have new shards allocated to it, but shards can remain
    diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.7).build();
    deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings))));
    strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Shards remain started
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    logger.info("--> changing settings again");
    // Set the low threshold to 50 instead of 60
    // Set the high threshold to 60 instead of 70
    // node2 now should not have new shards allocated to it, and shards cannot remain
    diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.5).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.6).build();
    deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings))));
    strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Shards remain started
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2));
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
    // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    logger.info("--> adding node4");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build();
    clusterState = strategy.reroute(clusterState, "reroute");
    logShardStates(clusterState);
    // Shards remain started
    assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
    logger.info("--> apply INITIALIZING shards");
    clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
    logShardStates(clusterState);
    assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
    // Node4 is available now, so the shard is moved off of node2
    assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
    assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
    assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
}
Also used : TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) ClusterInfoService(org.elasticsearch.cluster.ClusterInfoService) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) Matchers.containsString(org.hamcrest.Matchers.containsString) DiskUsage(org.elasticsearch.cluster.DiskUsage) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) ClusterInfo(org.elasticsearch.cluster.ClusterInfo) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) DiskThresholdSettings(org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) HashSet(java.util.HashSet)

Aggregations

ClusterSettings (org.elasticsearch.common.settings.ClusterSettings)109 Settings (org.elasticsearch.common.settings.Settings)58 ClusterState (org.elasticsearch.cluster.ClusterState)50 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)30 RoutingTable (org.elasticsearch.cluster.routing.RoutingTable)25 Matchers.containsString (org.hamcrest.Matchers.containsString)25 Test (org.junit.Test)25 MetaData (org.elasticsearch.cluster.metadata.MetaData)21 BalancedShardsAllocator (org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator)21 AllocationService (org.elasticsearch.cluster.routing.allocation.AllocationService)20 ClusterInfo (org.elasticsearch.cluster.ClusterInfo)18 DiskUsage (org.elasticsearch.cluster.DiskUsage)18 ShardRouting (org.elasticsearch.cluster.routing.ShardRouting)18 ImmutableOpenMap (org.elasticsearch.common.collect.ImmutableOpenMap)18 TestGatewayAllocator (org.elasticsearch.test.gateway.TestGatewayAllocator)18 IndexMetaData (org.elasticsearch.cluster.metadata.IndexMetaData)17 CrateDummyClusterServiceUnitTest (io.crate.test.integration.CrateDummyClusterServiceUnitTest)15 RoutingNode (org.elasticsearch.cluster.routing.RoutingNode)14 HashSet (java.util.HashSet)13 DiscoveryNodes (org.elasticsearch.cluster.node.DiscoveryNodes)13