use of org.apache.hadoop.hbase.Size in project hbase by apache.
the class TestSimpleRegionNormalizer method setupMocksForNormalizer.
@SuppressWarnings("MockitoCast")
private void setupMocksForNormalizer(Map<byte[], Integer> regionSizes, List<RegionInfo> regionInfoList) {
masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
tableDescriptor = Mockito.mock(TableDescriptor.class, RETURNS_DEEP_STUBS);
// for simplicity all regions are assumed to be on one server; doesn't matter to us
ServerName sn = ServerName.valueOf("localhost", 0, 0L);
when(masterServices.getAssignmentManager().getRegionStates().getRegionsOfTable(any())).thenReturn(regionInfoList);
when(masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(any())).thenReturn(sn);
when(masterServices.getAssignmentManager().getRegionStates().getRegionState(any(RegionInfo.class))).thenReturn(RegionState.createForTesting(null, RegionState.State.OPEN));
for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
RegionMetrics regionLoad = Mockito.mock(RegionMetrics.class);
when(regionLoad.getRegionName()).thenReturn(region.getKey());
when(regionLoad.getStoreFileSize()).thenReturn(new Size(region.getValue(), Size.Unit.MEGABYTE));
// this is possibly broken with jdk9, unclear if false positive or not
// suppress it for now, fix it when we get to running tests on 9
// see: http://errorprone.info/bugpattern/MockitoCast
when((Object) masterServices.getServerManager().getLoad(sn).getRegionMetrics().get(region.getKey())).thenReturn(regionLoad);
}
when(masterServices.isSplitOrMergeEnabled(any())).thenReturn(true);
when(tableDescriptor.getTableName()).thenReturn(name.getTableName());
normalizer = new SimpleRegionNormalizer();
normalizer.setConf(conf);
normalizer.setMasterServices(masterServices);
}
use of org.apache.hadoop.hbase.Size in project hbase by apache.
the class TestSimpleRegionNormalizerOnCluster method createTableBegsSplit.
/**
* create a table with 5 regions, having region sizes so as to provoke a split
* of the largest region.
* <ul>
* <li>total table size: 12</li>
* <li>average region size: 2.4</li>
* <li>split threshold: 2.4 * 2 = 4.8</li>
* </ul>
*/
private static int createTableBegsSplit(final TableName tableName, final boolean normalizerEnabled, final boolean isMergeEnabled) throws Exception {
final List<HRegion> generatedRegions = generateTestData(tableName, 1, 1, 2, 3, 5);
assertEquals(5, getRegionCount(tableName));
admin.flush(tableName).get();
final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName).get()).setNormalizationEnabled(normalizerEnabled).setMergeEnabled(isMergeEnabled).build();
admin.modifyTable(td).get();
// make sure relatively accurate region statistics are available for the test table. use
// the last/largest region as clue.
TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(1), new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() {
return "expected largest region to be >= 4mb.";
}
@Override
public boolean evaluate() {
return generatedRegions.stream().mapToDouble(val -> getRegionSizeMB(master, val.getRegionInfo())).allMatch(val -> val > 0) && getRegionSizeMB(master, generatedRegions.get(4).getRegionInfo()) >= 4.0;
}
});
return 5;
}
use of org.apache.hadoop.hbase.Size in project hbase by apache.
the class TestSimpleRegionNormalizerOnCluster method createTableBegsMerge.
/**
* create a table with 5 regions, having region sizes so as to provoke a merge
* of the smallest regions.
* <ul>
* <li>total table size: 13</li>
* <li>average region size: 2.6</li>
* <li>sum of sizes of first two regions < average</li>
* </ul>
*/
private static int createTableBegsMerge(final TableName tableName) throws Exception {
// create 5 regions with sizes to trigger merge of small regions
final List<HRegion> generatedRegions = generateTestData(tableName, 1, 1, 3, 3, 5);
assertEquals(5, getRegionCount(tableName));
admin.flush(tableName).get();
final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName).get()).setNormalizationEnabled(true).build();
admin.modifyTable(td).get();
// make sure relatively accurate region statistics are available for the test table. use
// the last/largest region as clue.
LOG.debug("waiting for region statistics to settle.");
TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(1), new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() {
return "expected largest region to be >= 4mb.";
}
@Override
public boolean evaluate() {
return generatedRegions.stream().mapToDouble(val -> getRegionSizeMB(master, val.getRegionInfo())).allMatch(val -> val > 0) && getRegionSizeMB(master, generatedRegions.get(4).getRegionInfo()) >= 4.0;
}
});
return 5;
}
use of org.apache.hadoop.hbase.Size in project hbase by apache.
the class TestRegionSizeCalculator method mockRegion.
/**
* Creates mock of region with given name and size.
*
* @param fileSizeMb number of megabytes occupied by region in file store in megabytes
*/
private RegionMetrics mockRegion(String regionName, int fileSizeMb) {
RegionMetrics region = Mockito.mock(RegionMetrics.class);
when(region.getRegionName()).thenReturn(Bytes.toBytes(regionName));
when(region.getNameAsString()).thenReturn(regionName);
when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, Size.Unit.MEGABYTE));
return region;
}
use of org.apache.hadoop.hbase.Size in project hbase by apache.
the class FieldValue method plus.
public FieldValue plus(FieldValue o) {
if (type != o.type) {
throw new IllegalArgumentException("invalid type");
}
switch(type) {
case STRING:
return new FieldValue(((String) value).concat((String) o.value), type);
case INTEGER:
return new FieldValue(((Integer) value) + ((Integer) o.value), type);
case LONG:
return new FieldValue(((Long) value) + ((Long) o.value), type);
case FLOAT:
case PERCENT:
return new FieldValue(((Float) value) + ((Float) o.value), type);
case SIZE:
Size size = (Size) value;
Size oSize = (Size) o.value;
Size.Unit unit = size.getUnit();
return new FieldValue(new Size(size.get(unit) + oSize.get(unit), unit), type);
default:
throw new AssertionError();
}
}
Aggregations