Search in sources :

Example 16 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class MasterFileSystem method bootstrap.

private static void bootstrap(final Path rd, final Configuration c) throws IOException {
    LOG.info("BOOTSTRAP: creating hbase:meta region");
    try {
        // Bootstrapping, make sure blockcache is off.  Else, one will be
        // created here in bootstrap and it'll need to be cleaned up.  Better to
        // not make it in first place.  Turn off block caching for bootstrap.
        // Enable after.
        HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
        HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
        setInfoFamilyCachingForMeta(metaDescriptor, false);
        HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null);
        setInfoFamilyCachingForMeta(metaDescriptor, true);
        meta.close();
    } catch (IOException e) {
        e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
        LOG.error("bootstrap", e);
        throw e;
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 17 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class CloneSnapshotProcedure method createFsLayout.

/**
 * Create region layout in file system.
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private List<RegionInfo> createFsLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, List<RegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    // 1. Create Table Descriptor
    // using a copy of descriptor, table will be created enabling first
    final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
    if (CommonFSUtils.isExists(mfs.getFileSystem(), tableDir)) {
        // if the region dirs exist, will cause exception and unlimited retry (see HBASE-24546)
        LOG.warn("temp table dir already exists on disk: {}, will be deleted.", tableDir);
        CommonFSUtils.deleteDirectory(mfs.getFileSystem(), tableDir);
    }
    ((FSTableDescriptors) (env.getMasterServices().getTableDescriptors())).createTableDescriptorForTableDirectory(tableDir, TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
    // 2. Create Regions
    newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(), tableDescriptor.getTableName(), newRegions);
    return newRegions;
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors)

Example 18 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class CreateTableProcedure method createFsLayout.

protected static List<RegionInfo> createFsLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, List<RegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    // 1. Create Table Descriptor
    // using a copy of descriptor, table will be created enabling first
    final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
    ((FSTableDescriptors) (env.getMasterServices().getTableDescriptors())).createTableDescriptorForTableDirectory(tableDir, tableDescriptor, false);
    // 2. Create Regions
    newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(), tableDescriptor.getTableName(), newRegions);
    return newRegions;
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors)

Example 19 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class BackupUtils method copyTableRegionInfo.

/**
 * copy out Table RegionInfo into incremental backup image need to consider move this logic into
 * HBackupFileSystem
 * @param conn connection
 * @param backupInfo backup info
 * @param conf configuration
 * @throws IOException exception
 */
public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf) throws IOException {
    Path rootDir = CommonFSUtils.getRootDir(conf);
    FileSystem fs = rootDir.getFileSystem(conf);
    // info files in the correct directory structure
    try (Admin admin = conn.getAdmin()) {
        for (TableName table : backupInfo.getTables()) {
            if (!admin.tableExists(table)) {
                LOG.warn("Table " + table + " does not exists, skipping it.");
                continue;
            }
            TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
            // write a copy of descriptor to the target directory
            Path target = new Path(backupInfo.getTableBackupDir(table));
            FileSystem targetFs = target.getFileSystem(conf);
            FSTableDescriptors descriptors = new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
            descriptors.createTableDescriptorForTableDirectory(target, orig, false);
            LOG.debug("Attempting to copy table info for:" + table + " target: " + target + " descriptor: " + orig);
            LOG.debug("Finished copying tableinfo.");
            List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
            // For each region, write the region info to disk
            LOG.debug("Starting to write region info for table " + table);
            for (RegionInfo regionInfo : regions) {
                Path regionDir = FSUtils.getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
                regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
                writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
            }
            LOG.debug("Finished writing region info for table " + table);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) FileSystem(org.apache.hadoop.fs.FileSystem) HBackupFileSystem(org.apache.hadoop.hbase.backup.HBackupFileSystem) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 20 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class TestRegionInfo method testReadAndWriteHRegionInfoFile.

@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
    HBaseTestingUtil htu = new HBaseTestingUtil();
    RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
    Path basedir = htu.getDataTestDir();
    // Create a region.  That'll write the .regioninfo file.
    FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
    FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration());
    HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(), fsTableDescriptors.get(TableName.META_TABLE_NAME));
    // Get modtime on the file.
    long modtime = getModTime(r);
    HBaseTestingUtil.closeRegionAndWAL(r);
    Thread.sleep(1001);
    r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, htu.getConfiguration());
    // Ensure the file is not written for a second time.
    long modtime2 = getModTime(r);
    assertEquals(modtime, modtime2);
    // Now load the file.
    org.apache.hadoop.hbase.client.RegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
    assertEquals(0, org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri));
    HBaseTestingUtil.closeRegionAndWAL(r);
}
Also used : Path(org.apache.hadoop.fs.Path) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Test(org.junit.Test)

Aggregations

FSTableDescriptors (org.apache.hadoop.hbase.util.FSTableDescriptors)21 Path (org.apache.hadoop.fs.Path)16 Test (org.junit.Test)10 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)5 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)4 Configuration (org.apache.hadoop.conf.Configuration)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)2 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)2 TableDescriptors (org.apache.hadoop.hbase.TableDescriptors)2 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Cell (org.apache.hadoop.hbase.Cell)1 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)1