Search in sources :

Example 26 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class TestRegionHDFSBlockLocationFinder method testGetBlockDistribution.

@Test
public void testGetBlockDistribution() {
    Map<RegionInfo, HDFSBlocksDistribution> cache = new HashMap<>();
    for (RegionInfo region : REGIONS) {
        HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
        assertHostAndWeightEquals(generate(region), hbd);
        cache.put(region, hbd);
    }
    // the instance should be cached
    for (RegionInfo region : REGIONS) {
        HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
        assertSame(cache.get(region), hbd);
    }
}
Also used : HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Test(org.junit.Test)

Example 27 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class TestRegionHDFSBlockLocationFinder method testRefreshRegionsWithChangedLocality.

@Test
public void testRefreshRegionsWithChangedLocality() {
    ServerName testServer = ServerName.valueOf("host-0", 12345, 12345);
    RegionInfo testRegion = REGIONS.get(0);
    Map<RegionInfo, HDFSBlocksDistribution> cache = new HashMap<>();
    for (RegionInfo region : REGIONS) {
        HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
        assertHostAndWeightEquals(generate(region), hbd);
        cache.put(region, hbd);
    }
    finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.123f));
    // everything should be cached, because metrics were null before
    for (RegionInfo region : REGIONS) {
        HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
        assertSame(cache.get(region), hbd);
    }
    finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.345f));
    // locality changed just for our test region, so it should no longer be the same
    for (RegionInfo region : REGIONS) {
        HDFSBlocksDistribution hbd = finder.getBlockDistribution(region);
        if (region.equals(testRegion)) {
            assertNotSame(cache.get(region), hbd);
        } else {
            assertSame(cache.get(region), hbd);
        }
    }
}
Also used : HashMap(java.util.HashMap) ServerName(org.apache.hadoop.hbase.ServerName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Test(org.junit.Test)

Example 28 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class TestInputStreamBlockDistribution method itDerivesLocalityFromFileLinkInputStream.

@Test
public void itDerivesLocalityFromFileLinkInputStream() throws Exception {
    List<Path> files = new ArrayList<Path>();
    files.add(testPath);
    FileLink link = new FileLink(files);
    try (FSDataInputStream stream = link.open(fs)) {
        HDFSBlocksDistribution initial = new HDFSBlocksDistribution();
        InputStreamBlockDistribution test = new InputStreamBlockDistribution(stream, getMockedStoreFileInfo(initial, true));
        assertSame(initial, test.getHDFSBlockDistribution());
        test.setLastCachedAt(test.getCachePeriodMs() + 1);
        assertNotSame(initial, test.getHDFSBlockDistribution());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) FileLink(org.apache.hadoop.hbase.io.FileLink) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Test(org.junit.Test)

Example 29 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class TestInputStreamBlockDistribution method itFallsBackOnLastKnownValueOnException.

@Test
public void itFallsBackOnLastKnownValueOnException() throws IOException {
    HdfsDataInputStream fakeStream = mock(HdfsDataInputStream.class);
    when(fakeStream.getAllBlocks()).thenThrow(new IOException("test"));
    HDFSBlocksDistribution initial = new HDFSBlocksDistribution();
    InputStreamBlockDistribution test = new InputStreamBlockDistribution(fakeStream, getMockedStoreFileInfo(initial, false));
    assertSame(initial, test.getHDFSBlockDistribution());
    test.setLastCachedAt(test.getCachePeriodMs() + 1);
    // fakeStream throws an exception, so falls back on original
    assertSame(initial, test.getHDFSBlockDistribution());
    assertFalse(test.isStreamUnsupported());
}
Also used : IOException(java.io.IOException) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) Test(org.junit.Test)

Aggregations

HDFSBlocksDistribution (org.apache.hadoop.hbase.HDFSBlocksDistribution)29 Test (org.junit.Test)11 Path (org.apache.hadoop.fs.Path)8 ArrayList (java.util.ArrayList)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 HashMap (java.util.HashMap)5 ExecutionException (java.util.concurrent.ExecutionException)5 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)5 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)5 IOException (java.io.IOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 InterruptedIOException (java.io.InterruptedIOException)3 Cell (org.apache.hadoop.hbase.Cell)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 Result (org.apache.hadoop.hbase.client.Result)3 Scan (org.apache.hadoop.hbase.client.Scan)3 TimeoutIOException (org.apache.hadoop.hbase.exceptions.TimeoutIOException)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2