use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class MasterSnapshotVerifier method verifySnapshot.
/**
* Verify that the snapshot in the directory is a valid snapshot
* @param snapshotDir snapshot directory to check
* @param snapshotServers {@link org.apache.hadoop.hbase.ServerName} of the servers
* that are involved in the snapshot
* @throws CorruptedSnapshotException if the snapshot is invalid
* @throws IOException if there is an unexpected connection issue to the filesystem
*/
public void verifySnapshot(Path snapshotDir, Set<String> snapshotServers) throws CorruptedSnapshotException, IOException {
SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs, snapshotDir, snapshot);
// verify snapshot info matches
verifySnapshotDescription(snapshotDir);
// check that tableinfo is a valid table description
verifyTableInfo(manifest);
// check that each region is valid
verifyRegions(manifest);
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class RestoreSnapshotProcedure method restoreSnapshot.
/**
* Execute the on-disk Restore
* @param env MasterProcedureEnv
* @throws IOException
**/
private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {
MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
try {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(env.getMasterServices().getConfiguration(), fs, manifest, modifiedHTableDescriptor, rootDir, monitorException, getMonitorStatus());
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
regionsToRestore = metaChanges.getRegionsToRestore();
regionsToRemove = metaChanges.getRegionsToRemove();
regionsToAdd = metaChanges.getRegionsToAdd();
parentsToChildrenPairMap = metaChanges.getParentToChildrenPairMap();
} catch (IOException e) {
String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed in on-disk restore. Try re-running the restore command.";
LOG.error(msg, e);
monitorException.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class RestoreSnapshotProcedure method prepareRestore.
/**
* Action before any real action of restoring from snapshot.
* @param env MasterProcedureEnv
* @throws IOException
*/
private void prepareRestore(final MasterProcedureEnv env) throws IOException {
final TableName tableName = getTableName();
// Checks whether the table exists
if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
throw new TableNotFoundException(tableName);
}
// Check whether table is disabled.
env.getMasterServices().checkTableModifiable(tableName);
// Check that we have at least 1 CF
if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family.");
}
if (!getTableName().isSystemTable()) {
// Table already exist. Check and update the region quota for this table namespace.
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
int snapshotRegionCount = manifest.getRegionManifestsMap().size();
int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);
if (snapshotRegionCount > 0 && tableRegionCount != snapshotRegionCount) {
ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(tableName, snapshotRegionCount);
}
}
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class RestoreTool method getTableDesc.
/**
* Get table descriptor
* @param tableName is the table backed up
* @return {@link HTableDescriptor} saved in backup image of the table
*/
HTableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
Path tableInfoPath = this.getTableInfoPath(tableName);
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
}
return tableDescriptor;
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class RestoreTool method restoreTableAndCreate.
private void restoreTableAndCreate(Connection conn, TableName tableName, TableName newTableName, Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
if (newTableName == null) {
newTableName = tableName;
}
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// get table descriptor first
HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
if (tableDescriptor != null) {
LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
}
if (tableDescriptor == null) {
Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
if (fileSys.exists(tableSnapshotPath)) {
// check whether snapshot dir already recorded for target table
if (snapshotMap.get(tableName) != null) {
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
tableDescriptor = manifest.getTableDescriptor();
} else {
tableDescriptor = getTableDesc(tableName);
snapshotMap.put(tableName, getTableInfoPath(tableName));
}
if (tableDescriptor == null) {
LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
}
} else {
throw new IOException("Table snapshot directory: " + tableSnapshotPath + " does not exist.");
}
}
Path tableArchivePath = getTableArchivePath(tableName);
if (tableArchivePath == null) {
if (tableDescriptor != null) {
// find table descriptor but no archive dir means the table is empty, create table and exit
if (LOG.isDebugEnabled()) {
LOG.debug("find table descriptor but no archive dir for table " + tableName + ", will only create table");
}
tableDescriptor.setName(newTableName);
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, truncateIfExists);
return;
} else {
throw new IllegalStateException("Cannot restore hbase table because directory '" + " tableArchivePath is null.");
}
}
if (tableDescriptor == null) {
tableDescriptor = new HTableDescriptor(newTableName);
} else {
tableDescriptor.setName(newTableName);
}
// load all files in dir
try {
ArrayList<Path> regionPathList = getRegionList(tableName);
// should only try to create the table with all region informations, so we could pre-split
// the regions in fine grain
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList, tableDescriptor, truncateIfExists);
if (tableArchivePath != null) {
// start real restore through bulkload
// if the backup target is on local cluster, special action needed
Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath);
if (tempTableArchivePath.equals(tableArchivePath)) {
if (LOG.isDebugEnabled()) {
LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath);
}
} else {
// point to the tempDir
regionPathList = getRegionList(tempTableArchivePath);
if (LOG.isDebugEnabled()) {
LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath);
}
}
LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false);
for (Path regionPath : regionPathList) {
String regionName = regionPath.toString();
if (LOG.isDebugEnabled()) {
LOG.debug("Restoring HFiles from directory " + regionName);
}
String[] args = { regionName, newTableName.getNameAsString() };
loader.run(args);
}
}
// we do not recovered edits
} catch (Exception e) {
throw new IllegalStateException("Cannot restore hbase table", e);
}
}
Aggregations