use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupAdminImpl method deleteBackup.
/**
* Delete single backup and all related backups <br>
* Algorithm:<br>
* Backup type: FULL or INCREMENTAL <br>
* Is this last backup session for table T: YES or NO <br>
* For every table T from table list 'tables':<br>
* if(FULL, YES) deletes only physical data (PD) <br>
* if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo,<br>
* until we either reach the most recent backup for T in the system or FULL backup<br>
* which includes T<br>
* if(INCREMENTAL, YES) deletes only physical data (PD)
* if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images between last<br>
* FULL backup, which is older than the backup being deleted and the next FULL backup (if exists) <br>
* or last one for a particular table T and removes T from list of backup tables.
* @param backupId backup id
* @param sysTable backup system table
* @return total number of deleted backup images
* @throws IOException
*/
private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException {
BackupInfo backupInfo = sysTable.readBackupInfo(backupId);
int totalDeleted = 0;
if (backupInfo != null) {
LOG.info("Deleting backup " + backupInfo.getBackupId() + " ...");
BackupUtils.cleanupBackupData(backupInfo, conn.getConfiguration());
// List of tables in this backup;
List<TableName> tables = backupInfo.getTableNames();
long startTime = backupInfo.getStartTs();
for (TableName tn : tables) {
boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime);
if (isLastBackupSession) {
continue;
}
// else
List<BackupInfo> affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable);
for (BackupInfo info : affectedBackups) {
if (info.equals(backupInfo)) {
continue;
}
removeTableFromBackupImage(info, tn, sysTable);
}
}
LOG.debug("Delete backup info " + backupInfo.getBackupId());
sysTable.deleteBackupInfo(backupInfo.getBackupId());
LOG.info("Delete backup " + backupInfo.getBackupId() + " completed.");
totalDeleted++;
} else {
LOG.warn("Delete backup failed: no information found for backupID=" + backupId);
}
return totalDeleted;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupManager method getAncestors.
/**
* Get direct ancestors of the current backup.
* @param backupInfo The backup info for the current backup
* @return The ancestors for the current backup
* @throws IOException exception
* @throws BackupException exception
*/
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo) throws IOException, BackupException {
LOG.debug("Getting the direct ancestors of the current backup " + backupInfo.getBackupId());
ArrayList<BackupImage> ancestors = new ArrayList<BackupImage>();
// full backup does not have ancestor
if (backupInfo.getType() == BackupType.FULL) {
LOG.debug("Current backup is a full backup, no direct ancestor for it.");
return ancestors;
}
// get all backup history list in descending order
ArrayList<BackupInfo> allHistoryList = getBackupHistory(true);
for (BackupInfo backup : allHistoryList) {
BackupImage.Builder builder = BackupImage.newBuilder();
BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType()).withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build();
// add the full backup image as an ancestor until the last incremental backup
if (backup.getType().equals(BackupType.FULL)) {
// then no need to add
if (!BackupManifest.canCoverImage(ancestors, image)) {
ancestors.add(image);
}
} else {
// incremental backup
if (BackupManifest.canCoverImage(ancestors, image)) {
LOG.debug("Met the backup boundary of the current table set:");
for (BackupImage image1 : ancestors) {
LOG.debug(" BackupID=" + image1.getBackupId() + ", BackupDir=" + image1.getRootDir());
}
} else {
Path logBackupPath = HBackupFileSystem.getLogBackupPath(backup.getBackupRootDir(), backup.getBackupId());
LOG.debug("Current backup has an incremental backup ancestor, " + "touching its image manifest in " + logBackupPath.toString() + " to construct the dependency.");
BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
ancestors.add(lastIncrImage);
LOG.debug("Last dependent incremental backup image: " + "{BackupID=" + lastIncrImage.getBackupId() + "," + "BackupDir=" + lastIncrImage.getRootDir() + "}");
}
}
}
LOG.debug("Got " + ancestors.size() + " ancestors for the current backup.");
return ancestors;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupManifest method toBackupInfo.
public BackupInfo toBackupInfo() {
BackupInfo info = new BackupInfo();
info.setType(backupImage.getType());
List<TableName> list = backupImage.getTableNames();
TableName[] tables = new TableName[list.size()];
info.addTables(list.toArray(tables));
info.setBackupId(backupImage.getBackupId());
info.setStartTs(backupImage.getStartTs());
info.setBackupRootDir(backupImage.getRootDir());
if (backupImage.getType() == BackupType.INCREMENTAL) {
info.setHLogTargetDir(BackupUtils.getLogBackupDir(backupImage.getRootDir(), backupImage.getBackupId()));
}
return info;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupSystemTable method getBackupHistory.
/**
* Get backup history records filtered by list of filters.
* @param n max number of records
* @param filters list of filters
* @return backup records
* @throws IOException
*/
public List<BackupInfo> getBackupHistory(int n, BackupInfo.Filter... filters) throws IOException {
if (filters.length == 0)
return getHistory(n);
List<BackupInfo> history = getBackupHistory();
List<BackupInfo> result = new ArrayList<BackupInfo>();
for (BackupInfo bi : history) {
if (result.size() == n)
break;
boolean passed = true;
for (int i = 0; i < filters.length; i++) {
if (!filters[i].apply(bi)) {
passed = false;
break;
}
}
if (passed) {
result.add(bi);
}
}
return result;
}
use of org.apache.hadoop.hbase.backup.BackupInfo in project hbase by apache.
the class BackupSystemTable method getBackupInfos.
/**
* Get all backup sessions with a given state (in descending order by time)
* @param state backup session state
* @return history info of backup info objects
* @throws IOException exception
*/
public ArrayList<BackupInfo> getBackupInfos(BackupState state) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("get backup infos from backup system table");
}
Scan scan = createScanForBackupHistory();
ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
try (Table table = connection.getTable(tableName);
ResultScanner scanner = table.getScanner(scan)) {
Result res = null;
while ((res = scanner.next()) != null) {
res.advance();
BackupInfo context = cellToBackupInfo(res.current());
if (state != BackupState.ANY && context.getState() != state) {
continue;
}
list.add(context);
}
return list;
}
}
Aggregations