use of org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider in project elasticsearch by elastic.
the class ClusterModule method createAllocationDeciders.
// TODO: this is public so allocation benchmark can access the default deciders...can we do that in another way?
/** Return a new {@link AllocationDecider} instance with builtin deciders as well as those from plugins. */
public static Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings, List<ClusterPlugin> clusterPlugins) {
// collect deciders by class so that we can detect duplicates
Map<Class, AllocationDecider> deciders = new LinkedHashMap<>();
addAllocationDecider(deciders, new MaxRetryAllocationDecider(settings));
addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider(settings));
addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider(settings));
addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings));
addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings));
addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings));
clusterPlugins.stream().flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream()).forEach(d -> addAllocationDecider(deciders, d));
return deciders.values();
}
use of org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider in project crate by crate.
the class SysNodeChecksTest method testValidationDiskWatermarkCheckInBytes.
@Test
public void testValidationDiskWatermarkCheckInBytes() {
DiskWatermarkNodesSysCheck highDiskWatermark = new HighDiskWatermarkNodesSysCheck(clusterService, mock(Provider.class), mock(FsProbe.class));
assertThat(highDiskWatermark.id(), is(5));
assertThat(highDiskWatermark.severity(), is(SysCheck.Severity.HIGH));
DiskThresholdDecider decider = mock(DiskThresholdDecider.class);
// disk.watermark.high: 170b
// A path must have at least 170 bytes to pass the check, only 160 bytes are available.
when(decider.getFreeDiskThresholdHigh()).thenReturn(.0);
when(decider.getFreeBytesThresholdHigh()).thenReturn(new ByteSizeValue(170));
assertThat(highDiskWatermark.validate(decider, 160, 300), is(false));
// disk.watermark.high: 130b
// A path must have at least 130 bytes to pass the check, 140 bytes available.
when(decider.getFreeDiskThresholdHigh()).thenReturn(.0);
when(decider.getFreeBytesThresholdHigh()).thenReturn(new ByteSizeValue(130));
assertThat(highDiskWatermark.validate(decider, 140, 300), is(true));
}
use of org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider in project crate by crate.
the class SysNodeChecksTest method testValidationDiskWatermarkCheckInPercents.
@Test
public void testValidationDiskWatermarkCheckInPercents() {
DiskWatermarkNodesSysCheck lowDiskWatermark = new LowDiskWatermarkNodesSysCheck(clusterService, mock(Provider.class), mock(FsProbe.class));
assertThat(lowDiskWatermark.id(), is(6));
assertThat(lowDiskWatermark.severity(), is(SysCheck.Severity.HIGH));
DiskThresholdDecider decider = mock(DiskThresholdDecider.class);
// disk.watermark.low: 75%. It must fail when at least 75% of disk is used.
// Free - 150 bytes, total - 300 bytes. 50% of disk is used.
// freeDiskThresholdLow = 100.0 - 75.0
when(decider.getFreeDiskThresholdLow()).thenReturn(25.);
when(decider.getFreeBytesThresholdLow()).thenReturn(new ByteSizeValue(0));
assertThat(lowDiskWatermark.validate(decider, 150, 300), is(true));
// disk.watermark.low: 45%. The check must fail when at least 45% of disk is used.
// Free - 30 bytes, Total - 100 bytes. 70% of disk is used.
// freeDiskThresholdLow = 100.0 - 45.0
when(decider.getFreeDiskThresholdLow()).thenReturn(55.);
when(decider.getFreeBytesThresholdLow()).thenReturn(new ByteSizeValue(0));
assertThat(lowDiskWatermark.validate(decider, 30, 100), is(false));
}
use of org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider in project crate by crate.
the class DiskWatermarkNodesSysCheck method validate.
@Override
public boolean validate() {
try {
DiskThresholdDecider decider = deciderProvider.get();
if (!decider.isEnabled())
return false;
FsInfo.Path leastAvailablePath = getLeastAvailablePath();
return validate(decider, leastAvailablePath.getAvailable().getBytes(), leastAvailablePath.getTotal().getBytes());
} catch (IOException e) {
LOGGER.error("Unable to determine the node disk usage while validating high/low disk watermark check: ", e);
return false;
}
}
use of org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider in project crate by crate.
the class ClusterModule method createAllocationDeciders.
// TODO: this is public so allocation benchmark can access the default deciders...can we do that in another way?
/**
* Return a new {@link AllocationDecider} instance with builtin deciders as well as those from plugins.
*/
public static Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings, List<ClusterPlugin> clusterPlugins) {
// collect deciders by class so that we can detect duplicates
Map<Class, AllocationDecider> deciders = new LinkedHashMap<>();
addAllocationDecider(deciders, new MaxRetryAllocationDecider());
addAllocationDecider(deciders, new ResizeAllocationDecider());
addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider());
addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider());
addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new NodeVersionAllocationDecider());
addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider());
addAllocationDecider(deciders, new RestoreInProgressAllocationDecider());
addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new DecommissionAllocationDecider(settings, clusterSettings));
clusterPlugins.stream().flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream()).forEach(d -> addAllocationDecider(deciders, d));
return deciders.values();
}
Aggregations