use of org.apache.hadoop.hbase.backup.HBackupFileSystem in project hbase by apache.
the class BackupUtils method copyTableRegionInfo.
/**
* copy out Table RegionInfo into incremental backup image need to consider move this logic into
* HBackupFileSystem
* @param conn connection
* @param backupInfo backup info
* @param conf configuration
* @throws IOException exception
*/
public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
// info files in the correct directory structure
try (Admin admin = conn.getAdmin()) {
for (TableName table : backupInfo.getTables()) {
if (!admin.tableExists(table)) {
LOG.warn("Table " + table + " does not exists, skipping it.");
continue;
}
TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
// write a copy of descriptor to the target directory
Path target = new Path(backupInfo.getTableBackupDir(table));
FileSystem targetFs = target.getFileSystem(conf);
FSTableDescriptors descriptors = new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
descriptors.createTableDescriptorForTableDirectory(target, orig, false);
LOG.debug("Attempting to copy table info for:" + table + " target: " + target + " descriptor: " + orig);
LOG.debug("Finished copying tableinfo.");
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
// For each region, write the region info to disk
LOG.debug("Starting to write region info for table " + table);
for (RegionInfo regionInfo : regions) {
Path regionDir = FSUtils.getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
}
LOG.debug("Finished writing region info for table " + table);
}
}
}
Aggregations