use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class RestoreSnapshotProcedure method restoreSnapshot.
/**
* Execute the on-disk Restore
* @param env MasterProcedureEnv
* @throws IOException
**/
private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {
MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
try {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(env.getMasterServices().getConfiguration(), fs, manifest, modifiedHTableDescriptor, rootDir, monitorException, getMonitorStatus());
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
regionsToRestore = metaChanges.getRegionsToRestore();
regionsToRemove = metaChanges.getRegionsToRemove();
regionsToAdd = metaChanges.getRegionsToAdd();
parentsToChildrenPairMap = metaChanges.getParentToChildrenPairMap();
} catch (IOException e) {
String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed in on-disk restore. Try re-running the restore command.";
LOG.error(msg, e);
monitorException.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
throw new IOException(msg, e);
}
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class RestoreSnapshotProcedure method prepareRestore.
/**
* Action before any real action of restoring from snapshot.
* @param env MasterProcedureEnv
* @throws IOException
*/
private void prepareRestore(final MasterProcedureEnv env) throws IOException {
final TableName tableName = getTableName();
// Checks whether the table exists
if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
throw new TableNotFoundException(tableName);
}
// Check whether table is disabled.
env.getMasterServices().checkTableModifiable(tableName);
// Check that we have at least 1 CF
if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family.");
}
if (!getTableName().isSystemTable()) {
// Table already exist. Check and update the region quota for this table namespace.
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
int snapshotRegionCount = manifest.getRegionManifestsMap().size();
int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);
if (snapshotRegionCount > 0 && tableRegionCount != snapshotRegionCount) {
ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(tableName, snapshotRegionCount);
}
}
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class DeleteNamespaceProcedure method deleteDirectory.
/**
* Delete the namespace directories from the file system
* @param env MasterProcedureEnv
* @param namespaceName name of the namespace in string format
* @throws IOException
*/
protected static void deleteDirectory(final MasterProcedureEnv env, final String namespaceName) throws IOException {
MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
FileSystem fs = mfs.getFileSystem();
Path p = FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName);
try {
for (FileStatus status : fs.listStatus(p)) {
if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
throw new IOException("Namespace directory contains table dir: " + status.getPath());
}
}
if (!fs.delete(FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName), true)) {
throw new IOException("Failed to remove namespace: " + namespaceName);
}
} catch (FileNotFoundException e) {
// File already deleted, continue
LOG.debug("deleteDirectory throws exception: " + e);
}
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class DeleteTableProcedure method deleteFromFs.
protected static void deleteFromFs(final MasterProcedureEnv env, final TableName tableName, final List<HRegionInfo> regions, final boolean archive) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
final Path tempdir = mfs.getTempDir();
final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
if (fs.exists(tableDir)) {
// Ensure temp exists
if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
}
// Ensure parent exists
if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
}
// Move the table in /hbase/.tmp
if (!fs.rename(tableDir, tempTableDir)) {
if (fs.exists(tempTableDir)) {
// TODO
// what's in this dir? something old? probably something manual from the user...
// let's get rid of this stuff...
FileStatus[] files = fs.listStatus(tempdir);
if (files != null && files.length > 0) {
for (int i = 0; i < files.length; ++i) {
if (!files[i].isDir())
continue;
HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath());
}
}
fs.delete(tempdir, true);
}
throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
}
}
// Archive regions from FS (temp directory)
if (archive) {
for (HRegionInfo hri : regions) {
LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
}
LOG.debug("Table '" + tableName + "' archived!");
}
// Archive mob data
Path mobTableDir = FSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName);
Path regionDir = new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
if (fs.exists(regionDir)) {
HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
}
// Delete table directory from FS (temp directory)
if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
throw new IOException("Couldn't delete " + tempTableDir);
}
// Delete the table directory where the mob files are saved
if (mobTableDir != null && fs.exists(mobTableDir)) {
if (!fs.delete(mobTableDir, true)) {
throw new IOException("Couldn't delete mob dir " + mobTableDir);
}
}
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class CloneSnapshotProcedure method preCloneSnapshot.
/**
* Action before cloning from snapshot.
* @param env MasterProcedureEnv
* @throws IOException
* @throws InterruptedException
*/
private void preCloneSnapshot(final MasterProcedureEnv env) throws IOException, InterruptedException {
if (!getTableName().isSystemTable()) {
// Check and update namespace quota
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
ProcedureSyncWait.getMasterQuotaManager(env).checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size());
}
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preCreateTableAction(hTableDescriptor, null, getUser());
}
}
Aggregations