use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.
the class MasterFileSystem method bootstrap.
private static void bootstrap(final Path rd, final Configuration c) throws IOException {
LOG.info("BOOTSTRAP: creating hbase:meta region");
try {
// Bootstrapping, make sure blockcache is off. Else, one will be
// created here in bootstrap and it'll need to be cleaned up. Better to
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
setInfoFamilyCachingForMeta(metaDescriptor, false);
HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null);
setInfoFamilyCachingForMeta(metaDescriptor, true);
meta.close();
} catch (IOException e) {
e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
LOG.error("bootstrap", e);
throw e;
}
}
use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.
the class CloneSnapshotProcedure method createFsLayout.
/**
* Create region layout in file system.
* @param env MasterProcedureEnv
* @throws IOException
*/
private List<RegionInfo> createFsLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, List<RegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
if (CommonFSUtils.isExists(mfs.getFileSystem(), tableDir)) {
// if the region dirs exist, will cause exception and unlimited retry (see HBASE-24546)
LOG.warn("temp table dir already exists on disk: {}, will be deleted.", tableDir);
CommonFSUtils.deleteDirectory(mfs.getFileSystem(), tableDir);
}
((FSTableDescriptors) (env.getMasterServices().getTableDescriptors())).createTableDescriptorForTableDirectory(tableDir, TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(), tableDescriptor.getTableName(), newRegions);
return newRegions;
}
use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.
the class CreateTableProcedure method createFsLayout.
protected static List<RegionInfo> createFsLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, List<RegionInfo> newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
((FSTableDescriptors) (env.getMasterServices().getTableDescriptors())).createTableDescriptorForTableDirectory(tableDir, tableDescriptor, false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, mfs.getRootDir(), tableDescriptor.getTableName(), newRegions);
return newRegions;
}
use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.
the class BackupUtils method copyTableRegionInfo.
/**
* copy out Table RegionInfo into incremental backup image need to consider move this logic into
* HBackupFileSystem
* @param conn connection
* @param backupInfo backup info
* @param conf configuration
* @throws IOException exception
*/
public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
// info files in the correct directory structure
try (Admin admin = conn.getAdmin()) {
for (TableName table : backupInfo.getTables()) {
if (!admin.tableExists(table)) {
LOG.warn("Table " + table + " does not exists, skipping it.");
continue;
}
TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
// write a copy of descriptor to the target directory
Path target = new Path(backupInfo.getTableBackupDir(table));
FileSystem targetFs = target.getFileSystem(conf);
FSTableDescriptors descriptors = new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf));
descriptors.createTableDescriptorForTableDirectory(target, orig, false);
LOG.debug("Attempting to copy table info for:" + table + " target: " + target + " descriptor: " + orig);
LOG.debug("Finished copying tableinfo.");
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
// For each region, write the region info to disk
LOG.debug("Starting to write region info for table " + table);
for (RegionInfo regionInfo : regions) {
Path regionDir = FSUtils.getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
}
LOG.debug("Finished writing region info for table " + table);
}
}
}
use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.
the class TestRegionInfo method testReadAndWriteHRegionInfoFile.
@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
HBaseTestingUtil htu = new HBaseTestingUtil();
RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
Path basedir = htu.getDataTestDir();
// Create a region. That'll write the .regioninfo file.
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration());
HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(), fsTableDescriptors.get(TableName.META_TABLE_NAME));
// Get modtime on the file.
long modtime = getModTime(r);
HBaseTestingUtil.closeRegionAndWAL(r);
Thread.sleep(1001);
r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, htu.getConfiguration());
// Ensure the file is not written for a second time.
long modtime2 = getModTime(r);
assertEquals(modtime, modtime2);
// Now load the file.
org.apache.hadoop.hbase.client.RegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
assertEquals(0, org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri));
HBaseTestingUtil.closeRegionAndWAL(r);
}
Aggregations