Search in sources :

Example 71 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project crate by crate.

the class SettingTests method testByteSizeSettingMaxValue.

@Test
public void testByteSizeSettingMaxValue() {
    final Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(100, ByteSizeUnit.MB), new ByteSizeValue(16, ByteSizeUnit.MB), new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES));
    final long value = (1L << 31) - 1 + randomIntBetween(1, 1024);
    final Settings settings = Settings.builder().put("a.byte.size", value + "b").build();
    final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings));
    final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be <= [2147483647b]";
    assertThat(e, hasToString(containsString(expectedMessage)));
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Matchers.hasToString(org.hamcrest.Matchers.hasToString) Matchers.containsString(org.hamcrest.Matchers.containsString) Test(org.junit.Test)

Example 72 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project crate by crate.

the class SettingTests method testByteSizeSetting.

@Test
public void testByteSizeSetting() {
    final Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope);
    assertFalse(byteSizeValueSetting.isGroupSetting());
    final ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
    assertThat(byteSizeValue.getBytes(), equalTo(1024L));
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Test(org.junit.Test)

Example 73 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project crate by crate.

the class SettingTests method testByteSizeSettingMinValue.

@Test
public void testByteSizeSettingMinValue() {
    final Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(100, ByteSizeUnit.MB), new ByteSizeValue(20_000_000, ByteSizeUnit.BYTES), new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES));
    final long value = 20_000_000 - randomIntBetween(1, 1024);
    final Settings settings = Settings.builder().put("a.byte.size", value + "b").build();
    final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings));
    final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be >= [20000000b]";
    assertThat(e, hasToString(containsString(expectedMessage)));
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Matchers.hasToString(org.hamcrest.Matchers.hasToString) Matchers.containsString(org.hamcrest.Matchers.containsString) Test(org.junit.Test)

Example 74 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class DiskThresholdDecider method canAllocate.

@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
    ClusterInfo clusterInfo = allocation.clusterInfo();
    ImmutableOpenMap<String, DiskUsage> usages = clusterInfo.getNodeMostAvailableDiskUsages();
    final Decision decision = earlyTerminate(allocation, usages);
    if (decision != null) {
        return decision;
    }
    final double usedDiskThresholdLow = 100.0 - diskThresholdSettings.getFreeDiskThresholdLow();
    final double usedDiskThresholdHigh = 100.0 - diskThresholdSettings.getFreeDiskThresholdHigh();
    // subtractLeavingShards is passed as false here, because they still use disk space, and therefore should we should be extra careful
    // and take the size into account
    DiskUsage usage = getDiskUsage(node, allocation, usages, false);
    // First, check that the node currently over the low watermark
    double freeDiskPercentage = usage.getFreeDiskAsPercentage();
    // Cache the used disk percentage for displaying disk percentages consistent with documentation
    double usedDiskPercentage = usage.getUsedDiskAsPercentage();
    long freeBytes = usage.getFreeBytes();
    if (logger.isTraceEnabled()) {
        logger.trace("node [{}] has {}% used disk", node.nodeId(), usedDiskPercentage);
    }
    // flag that determines whether the low threshold checks below can be skipped. We use this for a primary shard that is freshly
    // allocated and empty.
    boolean skipLowTresholdChecks = shardRouting.primary() && shardRouting.active() == false && shardRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE;
    // checks for exact byte comparisons
    if (freeBytes < diskThresholdSettings.getFreeBytesThresholdLow().getBytes()) {
        if (skipLowTresholdChecks == false) {
            if (logger.isDebugEnabled()) {
                logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation", diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
            }
            return allocation.decision(Decision.NO, NAME, "the node is above the low watermark cluster setting [%s=%s], having less than the minimum required [%s] free " + "space, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getLowWatermarkRaw(), diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytes));
        } else if (freeBytes > diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
            // has never been allocated if it's under the high watermark
            if (logger.isDebugEnabled()) {
                logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, " + "but allowing allocation because primary has never been allocated", diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
            }
            return allocation.decision(Decision.YES, NAME, "the node is above the low watermark, but less than the high watermark, and this primary shard has " + "never been allocated before");
        } else {
            // above the high watermark, so don't allow allocating the shard
            if (logger.isDebugEnabled()) {
                logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, " + "preventing allocation even though primary has never been allocated", diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
            }
            return allocation.decision(Decision.NO, NAME, "the node is above the high watermark cluster setting [%s=%s], having less than the minimum required [%s] free " + "space, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
        }
    }
    // checks for percentage comparisons
    if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdLow()) {
        // If the shard is a replica or is a non-empty primary, check the low threshold
        if (skipLowTresholdChecks == false) {
            if (logger.isDebugEnabled()) {
                logger.debug("more than the allowed {} used disk threshold ({} used) on node [{}], preventing allocation", Strings.format1Decimals(usedDiskThresholdLow, "%"), Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
            }
            return allocation.decision(Decision.NO, NAME, "the node is above the low watermark cluster setting [%s=%s], using more disk space than the maximum allowed " + "[%s%%], actual free: [%s%%]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getLowWatermarkRaw(), usedDiskThresholdLow, freeDiskPercentage);
        } else if (freeDiskPercentage > diskThresholdSettings.getFreeDiskThresholdHigh()) {
            // has never been allocated if it's under the high watermark
            if (logger.isDebugEnabled()) {
                logger.debug("more than the allowed {} used disk threshold ({} used) on node [{}], " + "but allowing allocation because primary has never been allocated", Strings.format1Decimals(usedDiskThresholdLow, "%"), Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
            }
            return allocation.decision(Decision.YES, NAME, "the node is above the low watermark, but less than the high watermark, and this primary shard has " + "never been allocated before");
        } else {
            // above the high watermark, so don't allow allocating the shard
            if (logger.isDebugEnabled()) {
                logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, " + "preventing allocation even though primary has never been allocated", Strings.format1Decimals(diskThresholdSettings.getFreeDiskThresholdHigh(), "%"), Strings.format1Decimals(freeDiskPercentage, "%"), node.nodeId());
            }
            return allocation.decision(Decision.NO, NAME, "the node is above the high watermark cluster setting [%s=%s], using more disk space than the maximum allowed " + "[%s%%], actual free: [%s%%]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeDiskPercentage);
        }
    }
    // Secondly, check that allocating the shard to this node doesn't put it above the high watermark
    final long shardSize = getExpectedShardSize(shardRouting, allocation, 0);
    double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize);
    long freeBytesAfterShard = freeBytes - shardSize;
    if (freeBytesAfterShard < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
        logger.warn("after allocating, node [{}] would have less than the required " + "{} free bytes threshold ({} bytes free), preventing allocation", node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesAfterShard);
        return allocation.decision(Decision.NO, NAME, "allocating the shard to this node will bring the node above the high watermark cluster setting [%s=%s] " + "and cause it to have less than the minimum required [%s] of free space (free bytes after shard added: [%s])", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytesAfterShard));
    }
    if (freeSpaceAfterShard < diskThresholdSettings.getFreeDiskThresholdHigh()) {
        logger.warn("after allocating, node [{}] would have more than the allowed " + "{} free disk threshold ({} free), preventing allocation", node.nodeId(), Strings.format1Decimals(diskThresholdSettings.getFreeDiskThresholdHigh(), "%"), Strings.format1Decimals(freeSpaceAfterShard, "%"));
        return allocation.decision(Decision.NO, NAME, "allocating the shard to this node will bring the node above the high watermark cluster setting [%s=%s] " + "and cause it to use more disk space than the maximum allowed [%s%%] (free space after shard added: [%s%%])", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), usedDiskThresholdHigh, freeSpaceAfterShard);
    }
    return allocation.decision(Decision.YES, NAME, "enough disk for shard on node, free: [%s], shard size: [%s], free after allocating shard: [%s]", new ByteSizeValue(freeBytes), new ByteSizeValue(shardSize), new ByteSizeValue(freeBytesAfterShard));
}
Also used : ClusterInfo(org.elasticsearch.cluster.ClusterInfo) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) DiskUsage(org.elasticsearch.cluster.DiskUsage)

Example 75 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class HierarchyCircuitBreakerService method checkParentLimit.

/**
     * Checks whether the parent breaker has been tripped
     */
public void checkParentLimit(String label) throws CircuitBreakingException {
    long totalUsed = 0;
    for (CircuitBreaker breaker : this.breakers.values()) {
        totalUsed += (breaker.getUsed() * breaker.getOverhead());
    }
    long parentLimit = this.parentSettings.getLimit();
    if (totalUsed > parentLimit) {
        this.parentTripCount.incrementAndGet();
        final String message = "[parent] Data too large, data for [" + label + "]" + " would be [" + totalUsed + "/" + new ByteSizeValue(totalUsed) + "]" + ", which is larger than the limit of [" + parentLimit + "/" + new ByteSizeValue(parentLimit) + "]";
        throw new CircuitBreakingException(message, totalUsed, parentLimit);
    }
}
Also used : ChildMemoryCircuitBreaker(org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker) CircuitBreaker(org.elasticsearch.common.breaker.CircuitBreaker) NoopCircuitBreaker(org.elasticsearch.common.breaker.NoopCircuitBreaker) CircuitBreakingException(org.elasticsearch.common.breaker.CircuitBreakingException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Aggregations

ByteSizeValue (org.elasticsearch.common.unit.ByteSizeValue)145 Settings (org.elasticsearch.common.settings.Settings)23 Test (org.junit.Test)21 IOException (java.io.IOException)16 CountDownLatch (java.util.concurrent.CountDownLatch)13 ArrayList (java.util.ArrayList)11 TimeValue (org.elasticsearch.common.unit.TimeValue)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 List (java.util.List)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 Path (java.nio.file.Path)7 Translog (org.elasticsearch.index.translog.Translog)7 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 Collectors (java.util.stream.Collectors)6 BulkProcessor (org.elasticsearch.action.bulk.BulkProcessor)6 BulkRequest (org.elasticsearch.action.bulk.BulkRequest)6 BytesArray (org.elasticsearch.common.bytes.BytesArray)6 Matchers.equalTo (org.hamcrest.Matchers.equalTo)6