use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class MergeTableRegionsProcedure method cleanupMergedRegion.
/**
* Clean up a merged region on rollback after failure.
*/
private void cleanupMergedRegion(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
TableName tn = this.regionsToMerge[0].getTable();
final Path tabledir = CommonFSUtils.getTableDir(mfs.getRootDir(), tn);
final FileSystem fs = mfs.getFileSystem();
// See createMergedRegion above where we specify the merge dir as being in the
// FIRST merge parent region.
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false);
regionFs.cleanupMergedRegion(mergedRegion);
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class CatalogJanitor method cleanMergeRegion.
/**
* If merged region no longer holds reference to the merge regions, archive merge region on hdfs
* and perform deleting references in hbase:meta
* @return true if we delete references in merged region on hbase:meta and archive the files on
* the file system
*/
private boolean cleanMergeRegion(final RegionInfo mergedRegion, List<RegionInfo> parents) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Cleaning merged region {}", mergedRegion);
}
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = CommonFSUtils.getTableDir(rootdir, mergedRegion.getTable());
TableDescriptor htd = getDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
} catch (IOException e) {
LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
}
if (regionFs == null || !regionFs.hasReferences(htd)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting parents ({}) from fs; merged child {} no longer holds references", parents.stream().map(r -> RegionInfo.getShortNameToLog(r)).collect(Collectors.joining(", ")), mergedRegion);
}
ProcedureExecutor<MasterProcedureEnv> pe = this.services.getMasterProcedureExecutor();
GCMultipleMergedRegionsProcedure mergeRegionProcedure = new GCMultipleMergedRegionsProcedure(pe.getEnvironment(), mergedRegion, parents);
pe.submitProcedure(mergeRegionProcedure);
if (LOG.isDebugEnabled()) {
LOG.debug("Submitted procedure {} for merged region {}", mergeRegionProcedure, mergedRegion);
}
return true;
}
return false;
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class MajorCompactionTTLRequest method getStoresRequiringCompaction.
Map<String, Long> getStoresRequiringCompaction(TableDescriptor htd) throws IOException {
HRegionFileSystem fileSystem = getFileSystem();
Map<String, Long> familyTTLMap = Maps.newHashMap();
for (ColumnFamilyDescriptor descriptor : htd.getColumnFamilies()) {
long ts = getColFamilyCutoffTime(descriptor);
// If the table's TTL is forever, lets not compact any of the regions.
if (ts > 0 && shouldCFBeCompacted(fileSystem, descriptor.getNameAsString(), ts)) {
familyTTLMap.put(descriptor.getNameAsString(), ts);
}
}
return familyTTLMap;
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class MajorCompactionRequest method getStoresRequiringCompaction.
Set<String> getStoresRequiringCompaction(Set<String> requestedStores, long timestamp) throws IOException {
HRegionFileSystem fileSystem = getFileSystem();
Set<String> familiesToCompact = Sets.newHashSet();
for (String family : requestedStores) {
if (shouldCFBeCompacted(fileSystem, family, timestamp)) {
familiesToCompact.add(family);
}
}
return familiesToCompact;
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class RestoreSnapshotHelper method cloneRegion.
/**
* Clone region directory content from the snapshot info.
*
* Each region is encoded with the table name, so the cloned region will have
* a different region name.
*
* Instead of copying the hfiles a HFileLink is created.
*
* @param regionDir {@link Path} cloned dir
* @param snapshotRegionInfo
*/
private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException {
final String tableName = tableDesc.getTableName().getNameAsString();
final String snapshotName = snapshotDesc.getName();
for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) {
Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8());
List<StoreFileInfo> clonedFiles = new ArrayList<>();
for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) {
LOG.info("Adding HFileLink " + storeFile.getName() + " from cloned region " + "in snapshot " + snapshotName + " to table=" + tableName);
if (MobUtils.isMobRegionInfo(newRegionInfo)) {
String mobFileName = HFileLink.createHFileLinkName(snapshotRegionInfo, storeFile.getName());
Path mobPath = new Path(familyDir, mobFileName);
if (fs.exists(mobPath)) {
fs.delete(mobPath, true);
}
restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs);
} else {
String file = restoreStoreFile(familyDir, snapshotRegionInfo, storeFile, createBackRefs);
clonedFiles.add(new StoreFileInfo(conf, fs, new Path(familyDir, file), true));
}
}
// we don't need to track files under mobdir
if (!MobUtils.isMobRegionInfo(newRegionInfo)) {
Path regionPath = new Path(tableDir, newRegionInfo.getEncodedName());
HRegionFileSystem regionFS = (fs.exists(regionPath)) ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo);
Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray()));
StoreFileTracker tracker = StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build());
tracker.set(clonedFiles);
}
}
}
Aggregations