Search in sources :

Example 96 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class TestRegionHDFSBlockLocationFinder method testRefreshAndWait.

@Test
public void testRefreshAndWait() throws Exception {
    finder.getCache().invalidateAll();
    for (RegionInfo region : REGIONS) {
        assertNull(finder.getCache().getIfPresent(region));
    }
    finder.refreshAndWait(REGIONS);
    for (RegionInfo region : REGIONS) {
        assertNotNull(finder.getCache().getIfPresent(region));
    }
}
Also used : RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Test(org.junit.Test)

Example 97 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class TestRegionHDFSBlockLocationFinder method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() {
    TD = TableDescriptorBuilder.newBuilder(TableName.valueOf("RegionLocationFinder")).build();
    int numRegions = 100;
    REGIONS = new ArrayList<>(numRegions);
    for (int i = 1; i <= numRegions; i++) {
        byte[] startKey = i == 0 ? HConstants.EMPTY_START_ROW : Bytes.toBytes(i);
        byte[] endKey = i == numRegions ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes(i + 1);
        RegionInfo region = RegionInfoBuilder.newBuilder(TD.getTableName()).setStartKey(startKey).setEndKey(endKey).build();
        REGIONS.add(region);
    }
}
Also used : RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) BeforeClass(org.junit.BeforeClass)

Example 98 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class TestRegionHDFSBlockLocationFinder method testGetTopBlockLocations.

@Test
public void testGetTopBlockLocations() {
    Map<ServerName, ServerMetrics> serverMetrics = new HashMap<>();
    for (int i = 0; i < 10; i++) {
        ServerName sn = ServerName.valueOf("host-" + i, 12345, 12345);
        serverMetrics.put(sn, null);
    }
    ClusterMetrics metrics = mock(ClusterMetrics.class);
    when(metrics.getLiveServerMetrics()).thenReturn(serverMetrics);
    finder.setClusterMetrics(metrics);
    for (RegionInfo region : REGIONS) {
        List<ServerName> servers = finder.getTopBlockLocations(region);
        long previousWeight = Long.MAX_VALUE;
        HDFSBlocksDistribution hbd = generate(region);
        for (ServerName server : servers) {
            long weight = hbd.getWeight(server.getHostname());
            assertTrue(weight <= previousWeight);
            previousWeight = weight;
        }
    }
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) HashMap(java.util.HashMap) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Test(org.junit.Test)

Example 99 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class TestSimpleLoadBalancer method testBalanceClusterOverallStrictly.

@Test
public void testBalanceClusterOverallStrictly() throws Exception {
    int[] regionNumOfTable1PerServer = { 3, 3, 4, 4, 4, 4, 5, 5, 5 };
    int[] regionNumOfTable2PerServer = { 2, 2, 2, 2, 2, 2, 2, 2, 1 };
    TreeMap<ServerName, List<RegionInfo>> serverRegionInfo = new TreeMap<>();
    List<ServerAndLoad> serverAndLoads = new ArrayList<>();
    for (int i = 0; i < regionNumOfTable1PerServer.length; i++) {
        ServerName serverName = ServerName.valueOf("server" + i, 1000, -1);
        List<RegionInfo> regions1 = createRegions(regionNumOfTable1PerServer[i], TableName.valueOf("table1"));
        List<RegionInfo> regions2 = createRegions(regionNumOfTable2PerServer[i], TableName.valueOf("table2"));
        regions1.addAll(regions2);
        serverRegionInfo.put(serverName, regions1);
        ServerAndLoad serverAndLoad = new ServerAndLoad(serverName, regionNumOfTable1PerServer[i] + regionNumOfTable2PerServer[i]);
        serverAndLoads.add(serverAndLoad);
    }
    HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> LoadOfAllTable = mockClusterServersWithTables(serverRegionInfo);
    loadBalancer.setClusterLoad((Map) LoadOfAllTable);
    List<RegionPlan> partialplans = loadBalancer.balanceTable(TableName.valueOf("table1"), LoadOfAllTable.get(TableName.valueOf("table1")));
    List<ServerAndLoad> balancedServerLoads = reconcile(serverAndLoads, partialplans, serverRegionInfo);
    for (ServerAndLoad serverAndLoad : balancedServerLoads) {
        assertEquals(6, serverAndLoad.getLoad());
    }
}
Also used : ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TreeMap(java.util.TreeMap) TableName(org.apache.hadoop.hbase.TableName) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 100 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class TestFavoredNodeAssignmentHelper method testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack.

@Test
public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
    // Test the case where there is only one server in one rack and another rack
    // has more servers. We try to choose secondary/tertiary on different
    // racks than what the primary is on. But if the other rack doesn't have
    // enough nodes to have both secondary/tertiary RSs, the tertiary is placed
    // on the same rack as the primary server is on
    Map<String, Integer> rackToServerCount = new HashMap<>();
    rackToServerCount.put("rack1", 2);
    rackToServerCount.put("rack2", 1);
    Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
    FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
    Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
    List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
    assertTrue(primaryRSMap.size() == 6);
    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = helper.placeSecondaryAndTertiaryRS(primaryRSMap);
    assertTrue(secondaryAndTertiaryMap.size() == regions.size());
    for (RegionInfo region : regions) {
        ServerName s = primaryRSMap.get(region);
        ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
        ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
        Set<String> racks = Sets.newHashSet(rackManager.getRack(s));
        racks.add(rackManager.getRack(secondaryRS));
        racks.add(rackManager.getRack(tertiaryRS));
        assertTrue(racks.size() >= 2);
    }
}
Also used : HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) NavigableMap(java.util.NavigableMap) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Aggregations

RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)824 Test (org.junit.Test)416 TableName (org.apache.hadoop.hbase.TableName)311 ServerName (org.apache.hadoop.hbase.ServerName)191 ArrayList (java.util.ArrayList)175 IOException (java.io.IOException)174 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)174 Path (org.apache.hadoop.fs.Path)141 List (java.util.List)118 HashMap (java.util.HashMap)90 Table (org.apache.hadoop.hbase.client.Table)90 Map (java.util.Map)81 Put (org.apache.hadoop.hbase.client.Put)81 Configuration (org.apache.hadoop.conf.Configuration)80 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)67 TreeMap (java.util.TreeMap)66 Result (org.apache.hadoop.hbase.client.Result)59 FileSystem (org.apache.hadoop.fs.FileSystem)58 Cell (org.apache.hadoop.hbase.Cell)50 Scan (org.apache.hadoop.hbase.client.Scan)46