use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class RestoreTool method getTableDesc.
/**
* Get table descriptor
* @param tableName is the table backed up
* @return {@link HTableDescriptor} saved in backup image of the table
*/
HTableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
Path tableInfoPath = this.getTableInfoPath(tableName);
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString());
}
return tableDescriptor;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class RestoreTool method incrementalRestoreTable.
/**
* During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently
* tableNames and newTablesNames only contain single table, will be expanded to multiple tables in
* the future
* @param conn HBase connection
* @param tableBackupPath backup path
* @param logDirs : incremental backup folders, which contains WAL
* @param tableNames : source tableNames(table names were backuped)
* @param newTableNames : target tableNames(table names to be restored to)
* @param incrBackupId incremental backup Id
* @throws IOException exception
*/
public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
try (Admin admin = conn.getAdmin()) {
if (tableNames.length != newTableNames.length) {
throw new IOException("Number of source tables and target tables does not match!");
}
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// full backup. Here, check that all new tables exists
for (TableName tableName : newTableNames) {
if (!admin.tableExists(tableName)) {
throw new IOException("HBase table " + tableName + " does not exist. Create the table first, e.g. by restoring a full backup.");
}
}
// adjust table schema
for (int i = 0; i < tableNames.length; i++) {
TableName tableName = tableNames[i];
HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
TableName newTableName = newTableNames[i];
HTableDescriptor newTableDescriptor = admin.getTableDescriptor(newTableName);
List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
List<HColumnDescriptor> existingFamilies = Arrays.asList(newTableDescriptor.getColumnFamilies());
boolean schemaChangeNeeded = false;
for (HColumnDescriptor family : families) {
if (!existingFamilies.contains(family)) {
newTableDescriptor.addFamily(family);
schemaChangeNeeded = true;
}
}
for (HColumnDescriptor family : existingFamilies) {
if (!families.contains(family)) {
newTableDescriptor.removeFamily(family.getName());
schemaChangeNeeded = true;
}
}
if (schemaChangeNeeded) {
modifyTableSync(conn, newTableDescriptor);
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
}
}
RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf);
restoreService.run(logDirs, tableNames, newTableNames, false);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class RestoreTool method restoreTableAndCreate.
private void restoreTableAndCreate(Connection conn, TableName tableName, TableName newTableName, Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException {
if (newTableName == null) {
newTableName = tableName;
}
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// get table descriptor first
HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
if (tableDescriptor != null) {
LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
}
if (tableDescriptor == null) {
Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
if (fileSys.exists(tableSnapshotPath)) {
// check whether snapshot dir already recorded for target table
if (snapshotMap.get(tableName) != null) {
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc);
tableDescriptor = manifest.getTableDescriptor();
} else {
tableDescriptor = getTableDesc(tableName);
snapshotMap.put(tableName, getTableInfoPath(tableName));
}
if (tableDescriptor == null) {
LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost");
}
} else {
throw new IOException("Table snapshot directory: " + tableSnapshotPath + " does not exist.");
}
}
Path tableArchivePath = getTableArchivePath(tableName);
if (tableArchivePath == null) {
if (tableDescriptor != null) {
// find table descriptor but no archive dir means the table is empty, create table and exit
if (LOG.isDebugEnabled()) {
LOG.debug("find table descriptor but no archive dir for table " + tableName + ", will only create table");
}
tableDescriptor.setName(newTableName);
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, truncateIfExists);
return;
} else {
throw new IllegalStateException("Cannot restore hbase table because directory '" + " tableArchivePath is null.");
}
}
if (tableDescriptor == null) {
tableDescriptor = new HTableDescriptor(newTableName);
} else {
tableDescriptor.setName(newTableName);
}
// load all files in dir
try {
ArrayList<Path> regionPathList = getRegionList(tableName);
// should only try to create the table with all region informations, so we could pre-split
// the regions in fine grain
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList, tableDescriptor, truncateIfExists);
if (tableArchivePath != null) {
// start real restore through bulkload
// if the backup target is on local cluster, special action needed
Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath);
if (tempTableArchivePath.equals(tableArchivePath)) {
if (LOG.isDebugEnabled()) {
LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath);
}
} else {
// point to the tempDir
regionPathList = getRegionList(tempTableArchivePath);
if (LOG.isDebugEnabled()) {
LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath);
}
}
LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false);
for (Path regionPath : regionPathList) {
String regionName = regionPath.toString();
if (LOG.isDebugEnabled()) {
LOG.debug("Restoring HFiles from directory " + regionName);
}
String[] args = { regionName, newTableName.getNameAsString() };
loader.run(args);
}
}
// we do not recovered edits
} catch (Exception e) {
throw new IllegalStateException("Cannot restore hbase table", e);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class ConstraintProcessor method start.
@Override
public void start(CoprocessorEnvironment environment) {
// make sure we are on a region server
if (!(environment instanceof RegionCoprocessorEnvironment)) {
throw new IllegalArgumentException("Constraints only act on regions - started in an environment that was not a region");
}
RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) environment;
HTableDescriptor desc = env.getRegion().getTableDesc();
// load all the constraints from the HTD
try {
this.constraints = Constraints.getConstraints(desc, classloader);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (LOG.isInfoEnabled()) {
LOG.info("Finished loading " + constraints.size() + " user Constraints on table: " + desc.getTableName());
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class MergeTableRegionsProcedure method mergeStoreFiles.
/**
* Create reference file(s) of merging regions under the merges directory
* @param env MasterProcedureEnv
* @param regionFs region file system
* @param mergedDir the temp directory of merged region
* @throws IOException
*/
private void mergeStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Configuration conf = env.getMasterConfiguration();
final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
for (String family : regionFs.getFamilies()) {
final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null && storeFiles.size() > 0) {
final CacheConfig cacheConf = new CacheConfig(conf, hcd);
for (StoreFileInfo storeFileInfo : storeFiles) {
// Create reference file(s) of the region in mergedDir
regionFs.mergeStoreFile(mergedRegionInfo, family, new StoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()), mergedDir);
}
}
}
}
Aggregations