use of org.apache.hadoop.fs.Path in project hbase by apache.
the class SnapshotDescriptionUtils method createInProgressTag.
/**
* Create in-progress tag under .tmp of in-progress snapshot
* */
public static void createInProgressTag(Path workingDir, FileSystem fs) throws IOException {
FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY);
Path snapshot_in_progress = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS);
FSUtils.create(fs, snapshot_in_progress, perms, true);
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class SnapshotDescriptionUtils method completeSnapshot.
/**
* Move the finished snapshot to its final, publicly visible directory - this marks the snapshot
* as 'complete'.
* @param snapshot description of the snapshot being tabken
* @param rootdir root directory of the hbase installation
* @param workingDir directory where the in progress snapshot was built
* @param fs {@link FileSystem} where the snapshot was built
* @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if the
* snapshot could not be moved
* @throws IOException the filesystem could not be reached
*/
public static void completeSnapshot(SnapshotDescription snapshot, Path rootdir, Path workingDir, FileSystem fs) throws SnapshotCreationException, IOException {
Path finishedDir = getCompletedSnapshotDir(snapshot, rootdir);
LOG.debug("Snapshot is done, just moving the snapshot from " + workingDir + " to " + finishedDir);
if (!fs.rename(workingDir, finishedDir)) {
throw new SnapshotCreationException("Failed to move working directory(" + workingDir + ") to completed directory(" + finishedDir + ").", ProtobufUtil.createSnapshotDesc(snapshot));
}
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class SnapshotManifest method addMobRegion.
public void addMobRegion(HRegionInfo regionInfo) throws IOException {
// 0. Get the ManifestBuilder/RegionVisitor
RegionVisitor visitor = createRegionVisitor(desc);
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing mob region '" + regionInfo + "' region-info for snapshot.");
Object regionData = visitor.regionOpen(regionInfo);
monitor.rethrowException();
// 2. iterate through all the stores in the region
LOG.debug("Creating references for mob files");
Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
// 2.1. build the snapshot reference for the store if it's a mob store
if (!hcd.isMobEnabled()) {
continue;
}
Object familyData = visitor.familyOpen(regionData, hcd.getName());
monitor.rethrowException();
Path storePath = MobUtils.getMobFamilyPath(mobRegionPath, hcd.getNameAsString());
List<StoreFileInfo> storeFiles = getStoreFiles(storePath);
if (storeFiles == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("No mob files under family: " + hcd.getNameAsString());
}
continue;
}
addReferenceFiles(visitor, regionData, familyData, storeFiles, true);
visitor.familyClose(regionData, familyData);
}
visitor.regionClose(regionData);
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class SnapshotManifest method consolidate.
public void consolidate() throws IOException {
if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
Path rootDir = FSUtils.getRootDir(conf);
LOG.info("Using old Snapshot Format");
// write a copy of descriptor to the snapshot directory
new FSTableDescriptors(conf, fs, rootDir).createTableDescriptorForTableDirectory(workingDir, htd, false);
} else {
LOG.debug("Convert to Single Snapshot Manifest");
convertToV2SingleManifest();
}
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class CoprocessorWhitelistMasterObserver method verifyCoprocessors.
/**
* Perform the validation checks for a coprocessor to determine if the path
* is white listed or not.
* @throws IOException if path is not included in whitelist or a failure
* occurs in processing
* @param ctx as passed in from the coprocessor
* @param htd as passed in from the coprocessor
*/
private void verifyCoprocessors(ObserverContext<MasterCoprocessorEnvironment> ctx, HTableDescriptor htd) throws IOException {
MasterServices services = ctx.getEnvironment().getMasterServices();
Configuration conf = services.getConfiguration();
Collection<String> paths = conf.getStringCollection(CP_COPROCESSOR_WHITELIST_PATHS_KEY);
List<String> coprocs = htd.getCoprocessors();
for (int i = 0; i < coprocs.size(); i++) {
String coproc = coprocs.get(i);
String coprocSpec = Bytes.toString(htd.getValue(Bytes.toBytes("coprocessor$" + (i + 1))));
if (coprocSpec == null) {
continue;
}
// File path is the 1st field of the coprocessor spec
Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(coprocSpec);
if (matcher == null || !matcher.matches()) {
continue;
}
String coprocPathStr = matcher.group(1).trim();
// Check if coprocessor is being loaded via the classpath (i.e. no file path)
if (coprocPathStr.equals("")) {
break;
}
Path coprocPath = new Path(coprocPathStr);
String coprocessorClass = matcher.group(2).trim();
boolean foundPathMatch = false;
for (String pathStr : paths) {
Path wlPath = new Path(pathStr);
try {
foundPathMatch = validatePath(coprocPath, wlPath, conf);
if (foundPathMatch == true) {
LOG.debug(String.format("Coprocessor %s found in directory %s", coprocessorClass, pathStr));
break;
}
} catch (IOException e) {
LOG.warn(String.format("Failed to validate white list path %s for coprocessor path %s", pathStr, coprocPathStr));
}
}
if (!foundPathMatch) {
throw new IOException(String.format("Loading %s DENIED in %s", coprocessorClass, CP_COPROCESSOR_WHITELIST_PATHS_KEY));
}
}
}
Aggregations