use of org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage in project hbase by apache.
the class BackupManager method getAncestors.
/**
* Get the direct ancestors of this backup for one table involved.
* @param backupInfo backup info
* @param table table
* @return backupImages on the dependency list
* @throws BackupException exception
* @throws IOException exception
*/
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table) throws BackupException, IOException {
ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
ArrayList<BackupImage> tableAncestors = new ArrayList<BackupImage>();
for (BackupImage image : ancestors) {
if (image.hasTable(table)) {
tableAncestors.add(image);
if (image.getType() == BackupType.FULL) {
break;
}
}
}
return tableAncestors;
}
use of org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage in project hbase by apache.
the class RestoreTablesClient method restoreImages.
/**
* Restore operation handle each backupImage in array
* @param svc: master services
* @param images: array BackupImage
* @param sTable: table to be restored
* @param tTable: table to be restored to
* @param truncateIfExists: truncate table
* @throws IOException exception
*/
private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable, boolean truncateIfExists) throws IOException {
// First image MUST be image of a FULL backup
BackupImage image = images[0];
String rootDir = image.getRootDir();
String backupId = image.getBackupId();
Path backupRoot = new Path(rootDir);
RestoreTool restoreTool = new RestoreTool(conf, backupRoot, backupId);
Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId);
String lastIncrBackupId = images.length == 1 ? null : images[images.length - 1].getBackupId();
// We need hFS only for full restore (see the code)
BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId);
if (manifest.getType() == BackupType.FULL) {
LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image " + tableBackupPath.toString());
restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists, lastIncrBackupId);
} else {
// incremental Backup
throw new IOException("Unexpected backup type " + image.getType());
}
if (images.length == 1) {
// full backup restore done
return;
}
List<Path> dirList = new ArrayList<Path>();
// full backup path comes first
for (int i = 1; i < images.length; i++) {
BackupImage im = images[i];
String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId());
dirList.add(new Path(logBackupDir));
}
String dirs = StringUtils.join(dirList, ",");
LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from log dirs: " + dirs);
Path[] paths = new Path[dirList.size()];
dirList.toArray(paths);
restoreTool.incrementalRestoreTable(conn, tableBackupPath, paths, new TableName[] { sTable }, new TableName[] { tTable }, lastIncrBackupId);
LOG.info(sTable + " has been successfully restored to " + tTable);
}
use of org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage in project hbase by apache.
the class RestoreTablesClient method restore.
/**
* Restore operation. Stage 2: resolved Backup Image dependency
* @param backupManifestMap : tableName, Manifest
* @param sTableArray The array of tables to be restored
* @param tTableArray The array of mapping tables to restore to
* @return set of BackupImages restored
* @throws IOException exception
*/
private void restore(HashMap<TableName, BackupManifest> backupManifestMap, TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
TreeSet<BackupImage> restoreImageSet = new TreeSet<BackupImage>();
boolean truncateIfExists = isOverwrite;
try {
for (int i = 0; i < sTableArray.length; i++) {
TableName table = sTableArray[i];
BackupManifest manifest = backupManifestMap.get(table);
// Get the image list of this backup for restore in time order from old
// to new.
List<BackupImage> list = new ArrayList<BackupImage>();
list.add(manifest.getBackupImage());
TreeSet<BackupImage> set = new TreeSet<BackupImage>(list);
List<BackupImage> depList = manifest.getDependentListByTable(table);
set.addAll(depList);
BackupImage[] arr = new BackupImage[set.size()];
set.toArray(arr);
restoreImages(arr, table, tTableArray[i], truncateIfExists);
restoreImageSet.addAll(list);
if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
LOG.info("Restore includes the following image(s):");
for (BackupImage image : restoreImageSet) {
LOG.info("Backup: " + image.getBackupId() + " " + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
}
}
}
} catch (Exception e) {
LOG.error("Failed", e);
throw new IOException(e);
}
LOG.debug("restoreStage finished");
}
use of org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage in project hbase by apache.
the class BackupUtils method validate.
public static boolean validate(HashMap<TableName, BackupManifest> backupManifestMap, Configuration conf) throws IOException {
boolean isValid = true;
for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) {
TableName table = manifestEntry.getKey();
TreeSet<BackupImage> imageSet = new TreeSet<BackupImage>();
ArrayList<BackupImage> depList = manifestEntry.getValue().getDependentListByTable(table);
if (depList != null && !depList.isEmpty()) {
imageSet.addAll(depList);
}
LOG.info("Dependent image(s) from old to new:");
for (BackupImage image : imageSet) {
String imageDir = HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table);
if (!BackupUtils.checkPathExist(imageDir, conf)) {
LOG.error("ERROR: backup image does not exist: " + imageDir);
isValid = false;
break;
}
LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available");
}
}
return isValid;
}
use of org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage in project hbase by apache.
the class TableBackupClient method addManifest.
/**
* Add manifest for the current backup. The manifest is stored within the table backup directory.
* @param backupInfo The current backup info
* @throws IOException exception
* @throws BackupException exception
*/
private void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type, Configuration conf) throws IOException, BackupException {
// set the overall backup phase : store manifest
backupInfo.setPhase(BackupPhase.STORE_MANIFEST);
BackupManifest manifest;
// we'll store its manifest with the table directory.
for (TableName table : backupInfo.getTables()) {
manifest = new BackupManifest(backupInfo, table);
ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupInfo, table);
for (BackupImage image : ancestors) {
manifest.addDependentImage(image);
}
if (type == BackupType.INCREMENTAL) {
// We'll store the log timestamps for this table only in its manifest.
HashMap<TableName, HashMap<String, Long>> tableTimestampMap = new HashMap<TableName, HashMap<String, Long>>();
tableTimestampMap.put(table, backupInfo.getIncrTimestampMap().get(table));
manifest.setIncrTimestampMap(tableTimestampMap);
ArrayList<BackupImage> ancestorss = backupManager.getAncestors(backupInfo);
for (BackupImage image : ancestorss) {
manifest.addDependentImage(image);
}
}
manifest.store(conf);
}
// This is used when created the next incremental backup
if (type == BackupType.INCREMENTAL) {
manifest = new BackupManifest(backupInfo);
// set the table region server start and end timestamps for incremental backup
manifest.setIncrTimestampMap(backupInfo.getIncrTimestampMap());
ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupInfo);
for (BackupImage image : ancestors) {
manifest.addDependentImage(image);
}
manifest.store(conf);
}
}
Aggregations