Search in sources :

Example 6 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class RSGroupInfoManagerImpl method moveTables.

@Override
public synchronized void moveTables(Set<TableName> tableNames, String groupName) throws IOException {
    if (groupName != null && !rsGroupMap.containsKey(groupName)) {
        throw new DoNotRetryIOException("Group " + groupName + " does not exist or is a special group");
    }
    Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
    for (TableName tableName : tableNames) {
        if (tableMap.containsKey(tableName)) {
            RSGroupInfo src = new RSGroupInfo(newGroupMap.get(tableMap.get(tableName)));
            src.removeTable(tableName);
            newGroupMap.put(src.getName(), src);
        }
        if (groupName != null) {
            RSGroupInfo dst = new RSGroupInfo(newGroupMap.get(groupName));
            dst.addTable(tableName);
            newGroupMap.put(dst.getName(), dst);
        }
    }
    flushConfig(newGroupMap);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 7 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class BackupAdminImpl method deleteBackups.

@Override
public int deleteBackups(String[] backupIds) throws IOException {
    // TODO: requires Fault tolerance support, failure will leave system
    // in a non-consistent state
    // see HBASE-15227
    int totalDeleted = 0;
    Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>();
    try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
        for (int i = 0; i < backupIds.length; i++) {
            BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
            if (info != null) {
                String rootDir = info.getBackupRootDir();
                HashSet<TableName> allTables = allTablesMap.get(rootDir);
                if (allTables == null) {
                    allTables = new HashSet<TableName>();
                    allTablesMap.put(rootDir, allTables);
                }
                allTables.addAll(info.getTableNames());
                totalDeleted += deleteBackup(backupIds[i], sysTable);
            }
        }
        finalizeDelete(allTablesMap, sysTable);
    }
    return totalDeleted;
}
Also used : BackupInfo(org.apache.hadoop.hbase.backup.BackupInfo) TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Example 8 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class BackupAdminImpl method getAffectedBackupInfos.

private List<BackupInfo> getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, BackupSystemTable table) throws IOException {
    LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
    long ts = backupInfo.getStartTs();
    List<BackupInfo> list = new ArrayList<BackupInfo>();
    List<BackupInfo> history = table.getBackupHistory(backupInfo.getBackupRootDir());
    // break when backupInfo reached
    for (BackupInfo info : history) {
        if (info.getStartTs() == ts) {
            break;
        }
        List<TableName> tables = info.getTableNames();
        if (tables.contains(tn)) {
            BackupType bt = info.getType();
            if (bt == BackupType.FULL) {
                // Clear list if we encounter FULL backup
                list.clear();
            } else {
                LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn + " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
                list.add(info);
            }
        }
    }
    return list;
}
Also used : BackupInfo(org.apache.hadoop.hbase.backup.BackupInfo) TableName(org.apache.hadoop.hbase.TableName) ArrayList(java.util.ArrayList) BackupType(org.apache.hadoop.hbase.backup.BackupType)

Example 9 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class BackupManager method createBackupInfo.

/**
   * Creates a backup info based on input backup request.
   * @param backupId backup id
   * @param type type
   * @param tableList table list
   * @param targetRootDir root dir
   * @param workers number of parallel workers
   * @param bandwidth bandwidth per worker in MB per sec
   * @return BackupInfo
   * @throws BackupException exception
   */
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, String targetRootDir, int workers, long bandwidth) throws BackupException {
    if (targetRootDir == null) {
        throw new BackupException("Wrong backup request parameter: target backup root directory");
    }
    if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
        // If table list is null for full backup, which means backup all tables. Then fill the table
        // list with all user tables from meta. It no table available, throw the request exception.
        HTableDescriptor[] htds = null;
        try (Admin admin = conn.getAdmin()) {
            htds = admin.listTables();
        } catch (Exception e) {
            throw new BackupException(e);
        }
        if (htds == null) {
            throw new BackupException("No table exists for full backup of all tables.");
        } else {
            tableList = new ArrayList<>();
            for (HTableDescriptor hTableDescriptor : htds) {
                TableName tn = hTableDescriptor.getTableName();
                if (tn.equals(BackupSystemTable.getTableName(conf))) {
                    // skip backup system table
                    continue;
                }
                tableList.add(hTableDescriptor.getTableName());
            }
            LOG.info("Full backup all the tables available in the cluster: " + tableList);
        }
    }
    // there are one or more tables in the table list
    backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]), targetRootDir);
    backupInfo.setBandwidth(bandwidth);
    backupInfo.setWorkers(workers);
    return backupInfo;
}
Also used : BackupInfo(org.apache.hadoop.hbase.backup.BackupInfo) TableName(org.apache.hadoop.hbase.TableName) Admin(org.apache.hadoop.hbase.client.Admin) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 10 with TableName

use of org.apache.hadoop.hbase.TableName in project hbase by apache.

the class BackupManifest method canCoverImage.

/**
   * Check whether backup image set could cover a backup image or not.
   * @param fullImages The backup image set
   * @param image The target backup image
   * @return true if fullImages can cover image, otherwise false
   */
public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
    // - sum table set of fullImages must cover the table set of image.
    for (BackupImage image1 : fullImages) {
        if (image1.getType() == BackupType.INCREMENTAL) {
            return false;
        }
        if (image1.getStartTs() < image.getStartTs()) {
            return false;
        }
    }
    ArrayList<String> image1TableList = new ArrayList<String>();
    for (BackupImage image1 : fullImages) {
        List<TableName> tableList = image1.getTableNames();
        for (TableName table : tableList) {
            image1TableList.add(table.getNameAsString());
        }
    }
    ArrayList<String> image2TableList = new ArrayList<String>();
    List<TableName> tableList = image.getTableNames();
    for (TableName table : tableList) {
        image2TableList.add(table.getNameAsString());
    }
    for (int i = 0; i < image2TableList.size(); i++) {
        if (image1TableList.contains(image2TableList.get(i)) == false) {
            return false;
        }
    }
    LOG.debug("Full image set can cover image " + image.getBackupId());
    return true;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ArrayList(java.util.ArrayList)

Aggregations

TableName (org.apache.hadoop.hbase.TableName)1029 Test (org.junit.Test)694 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)257 Table (org.apache.hadoop.hbase.client.Table)227 IOException (java.io.IOException)225 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)215 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)203 Result (org.apache.hadoop.hbase.client.Result)124 ArrayList (java.util.ArrayList)118 Put (org.apache.hadoop.hbase.client.Put)118 Path (org.apache.hadoop.fs.Path)113 Connection (org.apache.hadoop.hbase.client.Connection)103 Scan (org.apache.hadoop.hbase.client.Scan)97 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)88 ServerName (org.apache.hadoop.hbase.ServerName)85 Admin (org.apache.hadoop.hbase.client.Admin)85 Cell (org.apache.hadoop.hbase.Cell)77 HashMap (java.util.HashMap)75 Delete (org.apache.hadoop.hbase.client.Delete)65 InterruptedIOException (java.io.InterruptedIOException)63