use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testGetBlockDistribution.
@Test
public void testGetBlockDistribution() {
Map<RegionInfo, HDFSBlocksDistribution> cache = new HashMap<>();
for (RegionInfo region : REGIONS) {
HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
assertHostAndWeightEquals(generate(region), hbd);
cache.put(region, hbd);
}
// the instance should be cached
for (RegionInfo region : REGIONS) {
HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
assertSame(cache.get(region), hbd);
}
}
use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testRefreshRegionsWithChangedLocality.
@Test
public void testRefreshRegionsWithChangedLocality() {
ServerName testServer = ServerName.valueOf("host-0", 12345, 12345);
RegionInfo testRegion = REGIONS.get(0);
Map<RegionInfo, HDFSBlocksDistribution> cache = new HashMap<>();
for (RegionInfo region : REGIONS) {
HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
assertHostAndWeightEquals(generate(region), hbd);
cache.put(region, hbd);
}
finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.123f));
// everything should be cached, because metrics were null before
for (RegionInfo region : REGIONS) {
HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
assertSame(cache.get(region), hbd);
}
finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.345f));
// locality changed just for our test region, so it should no longer be the same
for (RegionInfo region : REGIONS) {
HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
if (region.equals(testRegion)) {
assertNotSame(cache.get(region), hbd);
} else {
assertSame(cache.get(region), hbd);
}
}
}
use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.
the class TestInputStreamBlockDistribution method itDerivesLocalityFromFileLinkInputStream.
@Test
public void itDerivesLocalityFromFileLinkInputStream() throws Exception {
List<Path> files = new ArrayList<Path>();
files.add(testPath);
FileLink link = new FileLink(files);
try (FSDataInputStream stream = link.open(fs)) {
HDFSBlocksDistribution initial = new HDFSBlocksDistribution();
InputStreamBlockDistribution test = new InputStreamBlockDistribution(stream, getMockedStoreFileInfo(initial, true));
assertSame(initial, test.getHDFSBlockDistribution());
test.setLastCachedAt(test.getCachePeriodMs() + 1);
assertNotSame(initial, test.getHDFSBlockDistribution());
}
}
use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.
the class TestInputStreamBlockDistribution method itFallsBackOnLastKnownValueOnException.
@Test
public void itFallsBackOnLastKnownValueOnException() throws IOException {
HdfsDataInputStream fakeStream = mock(HdfsDataInputStream.class);
when(fakeStream.getAllBlocks()).thenThrow(new IOException("test"));
HDFSBlocksDistribution initial = new HDFSBlocksDistribution();
InputStreamBlockDistribution test = new InputStreamBlockDistribution(fakeStream, getMockedStoreFileInfo(initial, false));
assertSame(initial, test.getHDFSBlockDistribution());
test.setLastCachedAt(test.getCachePeriodMs() + 1);
// fakeStream throws an exception, so falls back on original
assertSame(initial, test.getHDFSBlockDistribution());
assertFalse(test.isStreamUnsupported());
}
Aggregations