Search in sources :

Example 1 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class RestoreSnapshotProcedure method restoreSnapshot.

/**
   * Execute the on-disk Restore
   * @param env MasterProcedureEnv
   * @throws IOException
   **/
private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {
    MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
    FileSystem fs = fileSystemManager.getFileSystem();
    Path rootDir = fileSystemManager.getRootDir();
    final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
    LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
    try {
        Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
        SnapshotManifest manifest = SnapshotManifest.open(env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot);
        RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(env.getMasterServices().getConfiguration(), fs, manifest, modifiedHTableDescriptor, rootDir, monitorException, getMonitorStatus());
        RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
        regionsToRestore = metaChanges.getRegionsToRestore();
        regionsToRemove = metaChanges.getRegionsToRemove();
        regionsToAdd = metaChanges.getRegionsToAdd();
        parentsToChildrenPairMap = metaChanges.getParentToChildrenPairMap();
    } catch (IOException e) {
        String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed in on-disk restore. Try re-running the restore command.";
        LOG.error(msg, e);
        monitorException.receive(new ForeignException(env.getMasterServices().getServerName().toString(), e));
        throw new IOException(msg, e);
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) RestoreSnapshotHelper(org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher)

Example 2 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class RestoreSnapshotProcedure method prepareRestore.

/**
   * Action before any real action of restoring from snapshot.
   * @param env MasterProcedureEnv
   * @throws IOException
   */
private void prepareRestore(final MasterProcedureEnv env) throws IOException {
    final TableName tableName = getTableName();
    // Checks whether the table exists
    if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
        throw new TableNotFoundException(tableName);
    }
    // Check whether table is disabled.
    env.getMasterServices().checkTableModifiable(tableName);
    // Check that we have at least 1 CF
    if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
        throw new DoNotRetryIOException("Table " + getTableName().toString() + " should have at least one column family.");
    }
    if (!getTableName().isSystemTable()) {
        // Table already exist. Check and update the region quota for this table namespace.
        final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
        SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
        int snapshotRegionCount = manifest.getRegionManifestsMap().size();
        int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);
        if (snapshotRegionCount > 0 && tableRegionCount != snapshotRegionCount) {
            ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(tableName, snapshotRegionCount);
        }
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) TableName(org.apache.hadoop.hbase.TableName) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 3 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class DeleteNamespaceProcedure method deleteDirectory.

/**
   * Delete the namespace directories from the file system
   * @param env MasterProcedureEnv
   * @param namespaceName name of the namespace in string format
   * @throws IOException
   */
protected static void deleteDirectory(final MasterProcedureEnv env, final String namespaceName) throws IOException {
    MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    FileSystem fs = mfs.getFileSystem();
    Path p = FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName);
    try {
        for (FileStatus status : fs.listStatus(p)) {
            if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
                throw new IOException("Namespace directory contains table dir: " + status.getPath());
            }
        }
        if (!fs.delete(FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName), true)) {
            throw new IOException("Failed to remove namespace: " + namespaceName);
        }
    } catch (FileNotFoundException e) {
        // File already deleted, continue
        LOG.debug("deleteDirectory throws exception: " + e);
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 4 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class DeleteTableProcedure method deleteFromFs.

protected static void deleteFromFs(final MasterProcedureEnv env, final TableName tableName, final List<HRegionInfo> regions, final boolean archive) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final FileSystem fs = mfs.getFileSystem();
    final Path tempdir = mfs.getTempDir();
    final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
    final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
    if (fs.exists(tableDir)) {
        // Ensure temp exists
        if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
            throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
        }
        // Ensure parent exists
        if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
            throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
        }
        // Move the table in /hbase/.tmp
        if (!fs.rename(tableDir, tempTableDir)) {
            if (fs.exists(tempTableDir)) {
                // TODO
                // what's in this dir? something old? probably something manual from the user...
                // let's get rid of this stuff...
                FileStatus[] files = fs.listStatus(tempdir);
                if (files != null && files.length > 0) {
                    for (int i = 0; i < files.length; ++i) {
                        if (!files[i].isDir())
                            continue;
                        HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath());
                    }
                }
                fs.delete(tempdir, true);
            }
            throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
        }
    }
    // Archive regions from FS (temp directory)
    if (archive) {
        for (HRegionInfo hri : regions) {
            LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
            HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
        }
        LOG.debug("Table '" + tableName + "' archived!");
    }
    // Archive mob data
    Path mobTableDir = FSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), tableName);
    Path regionDir = new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
    if (fs.exists(regionDir)) {
        HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
    }
    // Delete table directory from FS (temp directory)
    if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
        throw new IOException("Couldn't delete " + tempTableDir);
    }
    // Delete the table directory where the mob files are saved
    if (mobTableDir != null && fs.exists(mobTableDir)) {
        if (!fs.delete(mobTableDir, true)) {
            throw new IOException("Couldn't delete mob dir " + mobTableDir);
        }
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) IOException(java.io.IOException)

Example 5 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class CloneSnapshotProcedure method preCloneSnapshot.

/**
   * Action before cloning from snapshot.
   * @param env MasterProcedureEnv
   * @throws IOException
   * @throws InterruptedException
   */
private void preCloneSnapshot(final MasterProcedureEnv env) throws IOException, InterruptedException {
    if (!getTableName().isSystemTable()) {
        // Check and update namespace quota
        final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
        SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
        ProcedureSyncWait.getMasterQuotaManager(env).checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size());
    }
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        cpHost.preCreateTableAction(hTableDescriptor, null, getUser());
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost)

Aggregations

MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)24 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)10 IOException (java.io.IOException)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)5 Configuration (org.apache.hadoop.conf.Configuration)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)4 HRegionFileSystem (org.apache.hadoop.hbase.regionserver.HRegionFileSystem)4 SnapshotManifest (org.apache.hadoop.hbase.snapshot.SnapshotManifest)4 ArrayList (java.util.ArrayList)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 TableName (org.apache.hadoop.hbase.TableName)3 HashSet (java.util.HashSet)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2 Admin (org.apache.hadoop.hbase.client.Admin)2 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)2 ForeignExceptionDispatcher (org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher)2 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)2