use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class CatalogJanitor method cleanMergeRegion.
/**
* If merged region no longer holds reference to the merge regions, archive
* merge region on hdfs and perform deleting references in hbase:meta
* @param mergedRegion
* @param regionA
* @param regionB
* @return true if we delete references in merged region on hbase:meta and archive
* the files on the file system
* @throws IOException
*/
boolean cleanMergeRegion(final HRegionInfo mergedRegion, final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
} catch (IOException e) {
LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
}
if (regionFs == null || !regionFs.hasReferences(htd)) {
LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and " + regionB.getRegionNameAsString() + " from fs because merged region no longer holds references");
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), mergedRegion);
services.getServerManager().removeRegion(regionA);
services.getServerManager().removeRegion(regionB);
FavoredNodesManager fnm = this.services.getFavoredNodesManager();
if (fnm != null) {
fnm.deleteFavoredNodesForRegions(Lists.newArrayList(regionA, regionB));
}
return true;
}
return false;
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class RestoreTool method checkLocalAndBackup.
/**
* Duplicate the backup image if it's on local cluster
* @see HStore#bulkLoadHFile(String, long)
* @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum)
* @param tableArchivePath archive path
* @return the new tableArchivePath
* @throws IOException exception
*/
Path checkLocalAndBackup(Path tableArchivePath) throws IOException {
// Move the file if it's on local cluster
boolean isCopyNeeded = false;
FileSystem srcFs = tableArchivePath.getFileSystem(conf);
FileSystem desFs = FileSystem.get(conf);
if (tableArchivePath.getName().startsWith("/")) {
isCopyNeeded = true;
} else {
// long)
if (srcFs.getUri().equals(desFs.getUri())) {
LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: " + desFs.getUri());
isCopyNeeded = true;
}
}
if (isCopyNeeded) {
LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore");
if (desFs.exists(restoreTmpPath)) {
try {
desFs.delete(restoreTmpPath, true);
} catch (IOException e) {
LOG.debug("Failed to delete path: " + restoreTmpPath + ", need to check whether restore target DFS cluster is healthy");
}
}
FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf);
LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath);
tableArchivePath = restoreTmpPath;
}
return tableArchivePath;
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class MergeTableRegionsProcedure method cleanupMergedRegion.
/**
* Clean up merged region
* @param env MasterProcedureEnv
* @throws IOException
*/
private void cleanupMergedRegion(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable());
final FileSystem fs = mfs.getFileSystem();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false);
regionFs.cleanupMergedRegion(mergedRegionInfo);
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project hbase by apache.
the class MergeTableRegionsProcedure method createMergedRegion.
/**
* Create merged region
* @param env MasterProcedureEnv
* @throws IOException
*/
private void createMergedRegion(final MasterProcedureEnv env) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tabledir = FSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable());
final FileSystem fs = mfs.getFileSystem();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, regionsToMerge[0], false);
regionFs.createMergesDir();
mergeStoreFiles(env, regionFs, regionFs.getMergesDir());
HRegionFileSystem regionFs2 = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tabledir, regionsToMerge[1], false);
mergeStoreFiles(env, regionFs2, regionFs.getMergesDir());
regionFs.commitMergedRegion(mergedRegionInfo);
}
use of org.apache.hadoop.hbase.regionserver.HRegionFileSystem in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, hConf);
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new MockRegionServerServices(hConf, null));
}
Aggregations