use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupAdminImpl method deleteBackups.
@Override
public int deleteBackups(String[] backupIds) throws IOException {
// TODO: requires Fault tolerance support, failure will leave system
// in a non-consistent state
// see HBASE-15227
int totalDeleted = 0;
Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>();
try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
for (int i = 0; i < backupIds.length; i++) {
BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
if (info != null) {
String rootDir = info.getBackupRootDir();
HashSet<TableName> allTables = allTablesMap.get(rootDir);
if (allTables == null) {
allTables = new HashSet<TableName>();
allTablesMap.put(rootDir, allTables);
}
allTables.addAll(info.getTableNames());
totalDeleted += deleteBackup(backupIds[i], sysTable);
}
}
finalizeDelete(allTablesMap, sysTable);
}
return totalDeleted;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupAdminImpl method getAffectedBackupInfos.
private List<BackupInfo> getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, BackupSystemTable table) throws IOException {
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
long ts = backupInfo.getStartTs();
List<BackupInfo> list = new ArrayList<BackupInfo>();
List<BackupInfo> history = table.getBackupHistory(backupInfo.getBackupRootDir());
// break when backupInfo reached
for (BackupInfo info : history) {
if (info.getStartTs() == ts) {
break;
}
List<TableName> tables = info.getTableNames();
if (tables.contains(tn)) {
BackupType bt = info.getType();
if (bt == BackupType.FULL) {
// Clear list if we encounter FULL backup
list.clear();
} else {
LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn + " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
list.add(info);
}
}
}
return list;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupManager method createBackupInfo.
/**
* Creates a backup info based on input backup request.
* @param backupId backup id
* @param type type
* @param tableList table list
* @param targetRootDir root dir
* @param workers number of parallel workers
* @param bandwidth bandwidth per worker in MB per sec
* @return BackupInfo
* @throws BackupException exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, String targetRootDir, int workers, long bandwidth) throws BackupException {
if (targetRootDir == null) {
throw new BackupException("Wrong backup request parameter: target backup root directory");
}
if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
// If table list is null for full backup, which means backup all tables. Then fill the table
// list with all user tables from meta. It no table available, throw the request exception.
HTableDescriptor[] htds = null;
try (Admin admin = conn.getAdmin()) {
htds = admin.listTables();
} catch (Exception e) {
throw new BackupException(e);
}
if (htds == null) {
throw new BackupException("No table exists for full backup of all tables.");
} else {
tableList = new ArrayList<>();
for (HTableDescriptor hTableDescriptor : htds) {
TableName tn = hTableDescriptor.getTableName();
if (tn.equals(BackupSystemTable.getTableName(conf))) {
// skip backup system table
continue;
}
tableList.add(hTableDescriptor.getTableName());
}
LOG.info("Full backup all the tables available in the cluster: " + tableList);
}
}
// there are one or more tables in the table list
backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]), targetRootDir);
backupInfo.setBandwidth(bandwidth);
backupInfo.setWorkers(workers);
return backupInfo;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupSystemTable method getBackupHistoryForTableSet.
public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set, String backupRoot) throws IOException {
List<BackupInfo> history = getBackupHistory(backupRoot);
Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<TableName, ArrayList<BackupInfo>>();
for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext(); ) {
BackupInfo info = iterator.next();
if (!backupRoot.equals(info.getBackupRootDir())) {
continue;
}
List<TableName> tables = info.getTableNames();
for (TableName tableName : tables) {
if (set.contains(tableName)) {
ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
if (list == null) {
list = new ArrayList<BackupInfo>();
tableHistoryMap.put(tableName, list);
}
list.add(info);
}
}
}
return tableHistoryMap;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupAdminImpl method getHistory.
@Override
public List<BackupInfo> getHistory(int n, BackupInfo.Filter... filters) throws IOException {
if (filters.length == 0)
return getHistory(n);
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
List<BackupInfo> history = table.getBackupHistory();
List<BackupInfo> result = new ArrayList<BackupInfo>();
for (BackupInfo bi : history) {
if (result.size() == n)
break;
boolean passed = true;
for (int i = 0; i < filters.length; i++) {
if (!filters[i].apply(bi)) {
passed = false;
break;
}
}
if (passed) {
result.add(bi);
}
}
return result;
}
}
Aggregations