Search in sources :

Example 11 with RegionLoad

use of org.apache.hadoop.hbase.RegionLoad in project hbase by apache.

the class TestRegionSizeCalculator method mockAdmin.

/**
   * Creates mock returning RegionLoad info about given servers.
  */
private Admin mockAdmin(RegionLoad... regionLoadArray) throws Exception {
    Admin mockAdmin = Mockito.mock(Admin.class);
    Map<byte[], RegionLoad> regionLoads = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (RegionLoad regionLoad : regionLoadArray) {
        regionLoads.put(regionLoad.getName(), regionLoad);
    }
    when(mockAdmin.getConfiguration()).thenReturn(configuration);
    when(mockAdmin.getRegionLoad(sn, TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads);
    return mockAdmin;
}
Also used : RegionLoad(org.apache.hadoop.hbase.RegionLoad) Admin(org.apache.hadoop.hbase.client.Admin) TreeMap(java.util.TreeMap)

Example 12 with RegionLoad

use of org.apache.hadoop.hbase.RegionLoad in project hbase by apache.

the class TestRegionSizeCalculator method mockRegion.

/**
   * Creates mock of region with given name and size.
   *
   * @param  fileSizeMb number of megabytes occupied by region in file store in megabytes
   * */
private RegionLoad mockRegion(String regionName, int fileSizeMb) {
    RegionLoad region = Mockito.mock(RegionLoad.class);
    when(region.getName()).thenReturn(regionName.getBytes());
    when(region.getNameAsString()).thenReturn(regionName);
    when(region.getStorefileSizeMB()).thenReturn(fileSizeMb);
    return region;
}
Also used : RegionLoad(org.apache.hadoop.hbase.RegionLoad)

Example 13 with RegionLoad

use of org.apache.hadoop.hbase.RegionLoad in project hbase by apache.

the class TestRSGroupsBase method getTableServerRegionMap.

public Map<TableName, Map<ServerName, List<String>>> getTableServerRegionMap() throws IOException {
    Map<TableName, Map<ServerName, List<String>>> map = Maps.newTreeMap();
    ClusterStatus status = TEST_UTIL.getHBaseClusterInterface().getClusterStatus();
    for (ServerName serverName : status.getServers()) {
        for (RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) {
            TableName tableName = null;
            try {
                tableName = HRegionInfo.getTable(rl.getName());
            } catch (IllegalArgumentException e) {
                LOG.warn("Failed parse a table name from regionname=" + Bytes.toStringBinary(rl.getName()));
                continue;
            }
            if (!map.containsKey(tableName)) {
                map.put(tableName, new TreeMap<>());
            }
            if (!map.get(tableName).containsKey(serverName)) {
                map.get(tableName).put(serverName, new LinkedList<>());
            }
            map.get(tableName).get(serverName).add(rl.getNameAsString());
        }
    }
    return map;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionLoad(org.apache.hadoop.hbase.RegionLoad) ServerName(org.apache.hadoop.hbase.ServerName) Map(java.util.Map) TreeMap(java.util.TreeMap) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus)

Example 14 with RegionLoad

use of org.apache.hadoop.hbase.RegionLoad in project hbase by apache.

the class TestSimpleRegionNormalizer method setupMocksForNormalizer.

protected void setupMocksForNormalizer(Map<byte[], Integer> regionSizes, List<HRegionInfo> hris) {
    masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
    masterRpcServices = Mockito.mock(MasterRpcServices.class, RETURNS_DEEP_STUBS);
    // for simplicity all regions are assumed to be on one server; doesn't matter to us
    ServerName sn = ServerName.valueOf("localhost", 0, 1L);
    when(masterServices.getAssignmentManager().getRegionStates().getRegionsOfTable(any(TableName.class))).thenReturn(hris);
    when(masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(any(HRegionInfo.class))).thenReturn(sn);
    for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
        RegionLoad regionLoad = Mockito.mock(RegionLoad.class);
        when(regionLoad.getName()).thenReturn(region.getKey());
        when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue());
        when(masterServices.getServerManager().getLoad(sn).getRegionsLoad().get(region.getKey())).thenReturn(regionLoad);
    }
    try {
        when(masterRpcServices.isSplitOrMergeEnabled(any(RpcController.class), any(IsSplitOrMergeEnabledRequest.class))).thenReturn(IsSplitOrMergeEnabledResponse.newBuilder().setEnabled(true).build());
    } catch (ServiceException se) {
        LOG.debug("error setting isSplitOrMergeEnabled switch", se);
    }
    normalizer.setMasterServices(masterServices);
    normalizer.setMasterRpcServices(masterRpcServices);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RpcController(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController) TableName(org.apache.hadoop.hbase.TableName) MasterRpcServices(org.apache.hadoop.hbase.master.MasterRpcServices) RegionLoad(org.apache.hadoop.hbase.RegionLoad) IsSplitOrMergeEnabledRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) MasterServices(org.apache.hadoop.hbase.master.MasterServices) HashMap(java.util.HashMap) Map(java.util.Map)

Example 15 with RegionLoad

use of org.apache.hadoop.hbase.RegionLoad in project cdap by caskdata.

the class HBaseTableUtil method getTableStats.

/**
   * Collects HBase table stats
   * //TODO: Explore the possiblitity of returning a {@code Map<TableId, TableStats>}
   * @param admin instance of {@link HBaseAdmin} to communicate with HBase
   * @return map of table name -> table stats
   * @throws IOException
   */
public Map<TableId, TableStats> getTableStats(HBaseAdmin admin) throws IOException {
    // The idea is to walk thru live region servers, collect table region stats and aggregate them towards table total
    // metrics.
    Map<TableId, TableStats> datasetStat = Maps.newHashMap();
    ClusterStatus clusterStatus = admin.getClusterStatus();
    for (ServerName serverName : clusterStatus.getServers()) {
        Map<byte[], RegionLoad> regionsLoad = clusterStatus.getLoad(serverName).getRegionsLoad();
        for (RegionLoad regionLoad : regionsLoad.values()) {
            TableName tableName = HRegionInfo.getTable(regionLoad.getName());
            HTableDescriptor tableDescriptor;
            try {
                tableDescriptor = admin.getTableDescriptor(tableName);
            } catch (TableNotFoundException exception) {
                // this can happen if the table has been deleted; the region stats get removed afterwards
                LOG.warn("Table not found for table name {}. Skipping collecting stats for it. Reason: {}", tableName, exception.getMessage());
                continue;
            }
            if (!isCDAPTable(tableDescriptor)) {
                continue;
            }
            TableId tableId = HTableNameConverter.from(tableDescriptor);
            TableStats stat = datasetStat.get(tableId);
            if (stat == null) {
                stat = new TableStats(regionLoad.getStorefileSizeMB(), regionLoad.getMemStoreSizeMB());
                datasetStat.put(tableId, stat);
            } else {
                stat.incStoreFileSizeMB(regionLoad.getStorefileSizeMB());
                stat.incMemStoreSizeMB(regionLoad.getMemStoreSizeMB());
            }
        }
    }
    return datasetStat;
}
Also used : TableId(co.cask.cdap.data2.util.TableId) TableName(org.apache.hadoop.hbase.TableName) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) RegionLoad(org.apache.hadoop.hbase.RegionLoad) ServerName(org.apache.hadoop.hbase.ServerName) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

RegionLoad (org.apache.hadoop.hbase.RegionLoad)15 ServerName (org.apache.hadoop.hbase.ServerName)10 ServerLoad (org.apache.hadoop.hbase.ServerLoad)5 TreeMap (java.util.TreeMap)4 ClusterStatus (org.apache.hadoop.hbase.ClusterStatus)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 TableName (org.apache.hadoop.hbase.TableName)3 Map (java.util.Map)2 TableId (co.cask.cdap.data2.util.TableId)1 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 ArrayDeque (java.util.ArrayDeque)1 Deque (java.util.Deque)1 HashMap (java.util.HashMap)1 GET (javax.ws.rs.GET)1 Produces (javax.ws.rs.Produces)1 ResponseBuilder (javax.ws.rs.core.Response.ResponseBuilder)1 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)1 TableNotFoundException (org.apache.hadoop.hbase.TableNotFoundException)1 Admin (org.apache.hadoop.hbase.client.Admin)1