Search in sources :

Example 21 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class CloneSnapshotProcedure method createFilesystemLayout.

/**
   * Create regions in file system.
   * @param env MasterProcedureEnv
   * @throws IOException
   */
private List<HRegionInfo> createFilesystemLayout(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, final List<HRegionInfo> newRegions) throws IOException {
    return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {

        @Override
        public List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env, final Path tableRootDir, final TableName tableName, final List<HRegionInfo> newRegions) throws IOException {
            final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
            final FileSystem fs = mfs.getFileSystem();
            final Path rootDir = mfs.getRootDir();
            final Configuration conf = env.getMasterConfiguration();
            final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
            getMonitorStatus().setStatus("Clone snapshot - creating regions for table: " + tableName);
            try {
                // 1. Execute the on-disk Clone
                Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
                SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
                RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus);
                RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
                // Clone operation should not have stuff to restore or remove
                Preconditions.checkArgument(!metaChanges.hasRegionsToRestore(), "A clone should not have regions to restore");
                Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(), "A clone should not have regions to remove");
                // At this point the clone is complete. Next step is enabling the table.
                String msg = "Clone snapshot=" + snapshot.getName() + " on table=" + tableName + " completed!";
                LOG.info(msg);
                monitorStatus.setStatus(msg + " Waiting for table to be enabled...");
                // 2. Let the next step to add the regions to meta
                return metaChanges.getRegionsToAdd();
            } catch (Exception e) {
                String msg = "clone snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed because " + e.getMessage();
                LOG.error(msg, e);
                IOException rse = new RestoreSnapshotException(msg, e, ProtobufUtil.createSnapshotDesc(snapshot));
                // these handlers aren't futures so we need to register the error here.
                monitorException.receive(new ForeignException("Master CloneSnapshotProcedure", rse));
                throw rse;
            }
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Configuration(org.apache.hadoop.conf.Configuration) CreateHdfsRegions(org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsRegions) IOException(java.io.IOException) ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher) RestoreSnapshotException(org.apache.hadoop.hbase.snapshot.RestoreSnapshotException) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) IOException(java.io.IOException) RestoreSnapshotException(org.apache.hadoop.hbase.snapshot.RestoreSnapshotException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) ArrayList(java.util.ArrayList) List(java.util.List) RestoreSnapshotHelper(org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper)

Example 22 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class CloneSnapshotProcedure method createFsLayout.

/**
   * Create region layout in file system.
   * @param env MasterProcedureEnv
   * @throws IOException
   */
private List<HRegionInfo> createFsLayout(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Path tempdir = mfs.getTempDir();
    // 1. Create Table Descriptor
    // using a copy of descriptor, table will be created enabling first
    HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
    final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
    ((FSTableDescriptors) (env.getMasterServices().getTableDescriptors())).createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);
    // 2. Create Regions
    newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, hTableDescriptor.getTableName(), newRegions);
    // 3. Move Table temp directory to the hbase root location
    CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
    return newRegions;
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 23 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class CreateTableProcedure method createFsLayout.

protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Path tempdir = mfs.getTempDir();
    // 1. Create Table Descriptor
    // using a copy of descriptor, table will be created enabling first
    final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
    ((FSTableDescriptors) (env.getMasterServices().getTableDescriptors())).createTableDescriptorForTableDirectory(tempTableDir, hTableDescriptor, false);
    // 2. Create Regions
    newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, hTableDescriptor.getTableName(), newRegions);
    // 3. Move Table temp directory to the hbase root location
    moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
    return newRegions;
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors)

Example 24 with MasterFileSystem

use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.

the class TestRestoreSnapshotFromClient method getFamiliesFromFS.

private Set<String> getFamiliesFromFS(final TableName tableName) throws IOException {
    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    Set<String> families = new HashSet<>();
    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
    for (Path regionDir : FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) {
        for (Path familyDir : FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) {
            families.add(familyDir.getName());
        }
    }
    return families;
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) HashSet(java.util.HashSet)

Aggregations

MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)24 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)10 IOException (java.io.IOException)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)5 Configuration (org.apache.hadoop.conf.Configuration)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)4 HRegionFileSystem (org.apache.hadoop.hbase.regionserver.HRegionFileSystem)4 SnapshotManifest (org.apache.hadoop.hbase.snapshot.SnapshotManifest)4 ArrayList (java.util.ArrayList)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 TableName (org.apache.hadoop.hbase.TableName)3 HashSet (java.util.HashSet)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2 Admin (org.apache.hadoop.hbase.client.Admin)2 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)2 ForeignExceptionDispatcher (org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher)2 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)2