use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class CloneSnapshotProcedure method preCloneSnapshot.
/**
* Action before cloning from snapshot.
* @param env MasterProcedureEnv
* @throws IOException
* @throws InterruptedException
*/
private void preCloneSnapshot(final MasterProcedureEnv env) throws IOException, InterruptedException {
if (!getTableName().isSystemTable()) {
// Check and update namespace quota
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot);
ProcedureSyncWait.getMasterQuotaManager(env).checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size());
}
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preCreateTableAction(hTableDescriptor, null, getUser());
}
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project phoenix by apache.
the class MapReduceParallelScanGrouper method getRegionBoundaries.
@Override
public List<HRegionLocation> getRegionBoundaries(StatementContext context, byte[] tableName) throws SQLException {
String snapshotName;
Configuration conf = context.getConnection().getQueryServices().getConfiguration();
if ((snapshotName = getSnapshotName(conf)) != null) {
try {
Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
FileSystem fs = rootDir.getFileSystem(conf);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
HBaseProtos.SnapshotDescription snapshotDescription = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDescription);
return getRegionLocationsFromManifest(manifest);
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
return context.getConnection().getQueryServices().getAllTableRegions(tableName);
}
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class TableSnapshotInputFormatImpl method getSplits.
public static List<InputSplit> getSplits(Configuration conf) throws IOException {
String snapshotName = getSnapshotName(conf);
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
SnapshotManifest manifest = getSnapshotManifest(conf, snapshotName, rootDir, fs);
List<HRegionInfo> regionInfos = getRegionInfosFromManifest(manifest);
// TODO: mapred does not support scan as input API. Work around for now.
Scan scan = extractScanFromConf(conf);
// the temp dir where the snapshot is restored
Path restoreDir = new Path(conf.get(RESTORE_DIR_KEY));
return getSplits(scan, manifest, regionInfos, restoreDir, conf);
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class SnapshotManager method restoreOrCloneSnapshot.
/**
* Restore or Clone the specified snapshot
* @param reqSnapshot
* @param nonceKey unique identifier to prevent duplicated RPC
* @throws IOException
*/
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey) throws IOException {
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
// check if the snapshot exists
if (!fs.exists(snapshotDir)) {
LOG.error("A Snapshot named '" + reqSnapshot.getName() + "' does not exist.");
throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(reqSnapshot));
}
// Get snapshot info from file system. The reqSnapshot is a "fake" snapshotInfo with
// just the snapshot "name" and table name to restore. It does not contains the "real" snapshot
// information.
SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot);
HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName tableName = TableName.valueOf(reqSnapshot.getTable());
// stop tracking "abandoned" handlers
cleanupSentinels();
// Verify snapshot validity
SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation
long procId;
if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
procId = restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey);
} else {
procId = cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey);
}
return procId;
}
use of org.apache.hadoop.hbase.snapshot.SnapshotManifest in project hbase by apache.
the class CloneSnapshotProcedure method createFilesystemLayout.
/**
* Create regions in file system.
* @param env MasterProcedureEnv
* @throws IOException
*/
private List<HRegionInfo> createFilesystemLayout(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, final List<HRegionInfo> newRegions) throws IOException {
return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env, final Path tableRootDir, final TableName tableName, final List<HRegionInfo> newRegions) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
final Path rootDir = mfs.getRootDir();
final Configuration conf = env.getMasterConfiguration();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
getMonitorStatus().setStatus("Clone snapshot - creating regions for table: " + tableName);
try {
// 1. Execute the on-disk Clone
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// Clone operation should not have stuff to restore or remove
Preconditions.checkArgument(!metaChanges.hasRegionsToRestore(), "A clone should not have regions to restore");
Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(), "A clone should not have regions to remove");
// At this point the clone is complete. Next step is enabling the table.
String msg = "Clone snapshot=" + snapshot.getName() + " on table=" + tableName + " completed!";
LOG.info(msg);
monitorStatus.setStatus(msg + " Waiting for table to be enabled...");
// 2. Let the next step to add the regions to meta
return metaChanges.getRegionsToAdd();
} catch (Exception e) {
String msg = "clone snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed because " + e.getMessage();
LOG.error(msg, e);
IOException rse = new RestoreSnapshotException(msg, e, ProtobufUtil.createSnapshotDesc(snapshot));
// these handlers aren't futures so we need to register the error here.
monitorException.receive(new ForeignException("Master CloneSnapshotProcedure", rse));
throw rse;
}
}
});
}
Aggregations