Search in sources :

Example 11 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class BackupManager method createBackupInfo.

/**
   * Creates a backup info based on input backup request.
   * @param backupId backup id
   * @param type type
   * @param tableList table list
   * @param targetRootDir root dir
   * @param workers number of parallel workers
   * @param bandwidth bandwidth per worker in MB per sec
   * @return BackupInfo
   * @throws BackupException exception
   */
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, String targetRootDir, int workers, long bandwidth) throws BackupException {
    if (targetRootDir == null) {
        throw new BackupException("Wrong backup request parameter: target backup root directory");
    }
    if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
        // If table list is null for full backup, which means backup all tables. Then fill the table
        // list with all user tables from meta. It no table available, throw the request exception.
        HTableDescriptor[] htds = null;
        try (Admin admin = conn.getAdmin()) {
            htds = admin.listTables();
        } catch (Exception e) {
            throw new BackupException(e);
        }
        if (htds == null) {
            throw new BackupException("No table exists for full backup of all tables.");
        } else {
            tableList = new ArrayList<>();
            for (HTableDescriptor hTableDescriptor : htds) {
                TableName tn = hTableDescriptor.getTableName();
                if (tn.equals(BackupSystemTable.getTableName(conf))) {
                    // skip backup system table
                    continue;
                }
                tableList.add(hTableDescriptor.getTableName());
            }
            LOG.info("Full backup all the tables available in the cluster: " + tableList);
        }
    }
    // there are one or more tables in the table list
    backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]), targetRootDir);
    backupInfo.setBandwidth(bandwidth);
    backupInfo.setWorkers(workers);
    return backupInfo;
}
Also used : BackupInfo(org.apache.hadoop.hbase.backup.BackupInfo) TableName(org.apache.hadoop.hbase.TableName) Admin(org.apache.hadoop.hbase.client.Admin) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 12 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class BackupManifest method canCoverImage.

/**
   * Check whether backup image set could cover a backup image or not.
   * @param fullImages The backup image set
   * @param image The target backup image
   * @return true if fullImages can cover image, otherwise false
   */
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
    // - sum table set of fullImages must cover the table set of image.
    for (BackupImage image1 : fullImages) {
        if (image1.getType() == BackupType.INCREMENTAL) {
            return false;
        }
        if (image1.getStartTs() < image.getStartTs()) {
            return false;
        }
    }
    ArrayList<String> image1TableList = new ArrayList<String>();
    for (BackupImage image1 : fullImages) {
        List<TableName> tableList = image1.getTableNames();
        for (TableName table : tableList) {
            image1TableList.add(table.getNameAsString());
        }
    }
    ArrayList<String> image2TableList = new ArrayList<String>();
    List<TableName> tableList = image.getTableNames();
    for (TableName table : tableList) {
        image2TableList.add(table.getNameAsString());
    }
    for (int i = 0; i < image2TableList.size(); i++) {
        if (image1TableList.contains(image2TableList.get(i)) == false) {
            return false;
        }
    }
    LOG.debug("Full image set can cover image " + image.getBackupId());
    return true;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ArrayList(java.util.ArrayList)

Example 13 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class BackupSystemTable method getBackupHistoryForTableSet.

public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set, String backupRoot) throws IOException {
    List<BackupInfo> history = getBackupHistory(backupRoot);
    Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<TableName, ArrayList<BackupInfo>>();
    for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext(); ) {
        BackupInfo info = iterator.next();
        if (!backupRoot.equals(info.getBackupRootDir())) {
            continue;
        }
        List<TableName> tables = info.getTableNames();
        for (TableName tableName : tables) {
            if (set.contains(tableName)) {
                ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
                if (list == null) {
                    list = new ArrayList<BackupInfo>();
                    tableHistoryMap.put(tableName, list);
                }
                list.add(info);
            }
        }
    }
    return tableHistoryMap;
}
Also used : BackupInfo(org.apache.hadoop.hbase.backup.BackupInfo) TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList)

Example 14 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class TestRSGroupBasedLoadBalancer method getTableName.

private TableName getTableName(ServerName sn) throws IOException {
    TableName tableName = null;
    RSGroupInfoManager gm = getMockedGroupInfoManager();
    RSGroupInfo groupOfServer = null;
    for (RSGroupInfo gInfo : gm.listRSGroups()) {
        if (gInfo.containsServer(sn.getAddress())) {
            groupOfServer = gInfo;
            break;
        }
    }
    for (HTableDescriptor desc : tableDescs) {
        if (gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())) {
            tableName = desc.getTableName();
        }
    }
    return tableName;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RSGroupInfo(org.apache.hadoop.hbase.rsgroup.RSGroupInfo) RSGroupInfoManager(org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 15 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class TestRSGroupBasedLoadBalancer method assertRetainedAssignment.

/**
   * Asserts a valid retained assignment plan.
   * <p>
   * Must meet the following conditions:
   * <ul>
   * <li>Every input region has an assignment, and to an online server
   * <li>If a region had an existing assignment to a server with the same
   * address a a currently online server, it will be assigned to it
   * </ul>
   *
   * @param existing
   * @param assignment
   * @throws java.io.IOException
   * @throws java.io.FileNotFoundException
   */
private void assertRetainedAssignment(Map<HRegionInfo, ServerName> existing, List<ServerName> servers, Map<ServerName, List<HRegionInfo>> assignment) throws FileNotFoundException, IOException {
    // Verify condition 1, every region assigned, and to online server
    Set<ServerName> onlineServerSet = new TreeSet<>(servers);
    Set<HRegionInfo> assignedRegions = new TreeSet<>();
    for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
        assertTrue("Region assigned to server that was not listed as online", onlineServerSet.contains(a.getKey()));
        for (HRegionInfo r : a.getValue()) assignedRegions.add(r);
    }
    assertEquals(existing.size(), assignedRegions.size());
    // Verify condition 2, every region must be assigned to correct server.
    Set<String> onlineHostNames = new TreeSet<>();
    for (ServerName s : servers) {
        onlineHostNames.add(s.getHostname());
    }
    for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
        ServerName currentServer = a.getKey();
        for (HRegionInfo r : a.getValue()) {
            ServerName oldAssignedServer = existing.get(r);
            TableName tableName = r.getTable();
            String groupName = getMockedGroupInfoManager().getRSGroupOfTable(tableName);
            assertTrue(StringUtils.isNotEmpty(groupName));
            RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName);
            assertTrue("Region is not correctly assigned to group servers.", gInfo.containsServer(currentServer.getAddress()));
            if (oldAssignedServer != null && onlineHostNames.contains(oldAssignedServer.getHostname())) {
                // different group.
                if (!oldAssignedServer.getAddress().equals(currentServer.getAddress())) {
                    assertFalse(gInfo.containsServer(oldAssignedServer.getAddress()));
                }
            }
        }
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) RSGroupInfo(org.apache.hadoop.hbase.rsgroup.RSGroupInfo) TreeSet(java.util.TreeSet) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Aggregations

TableName (org.apache.hadoop.hbase.TableName)1033 Test (org.junit.Test)695 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)257 Table (org.apache.hadoop.hbase.client.Table)228 IOException (java.io.IOException)225 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)215 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)203 Result (org.apache.hadoop.hbase.client.Result)125 ArrayList (java.util.ArrayList)120 Put (org.apache.hadoop.hbase.client.Put)118 Path (org.apache.hadoop.fs.Path)113 Connection (org.apache.hadoop.hbase.client.Connection)103 Scan (org.apache.hadoop.hbase.client.Scan)98 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)89 ServerName (org.apache.hadoop.hbase.ServerName)85 Admin (org.apache.hadoop.hbase.client.Admin)85 Cell (org.apache.hadoop.hbase.Cell)77 HashMap (java.util.HashMap)75 Delete (org.apache.hadoop.hbase.client.Delete)66 InterruptedIOException (java.io.InterruptedIOException)63