use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class SnapshotManager method checkSnapshotSupport.
/**
* Called at startup, to verify if snapshot operation is supported, and to avoid
* starting the master if there're snapshots present but the cleaners needed are missing.
* Otherwise we can end up with snapshot data loss.
* @param conf The {@link Configuration} object to use
* @param mfs The MasterFileSystem to use
* @throws IOException in case of file-system operation failure
* @throws UnsupportedOperationException in case cleaners are missing and
* there're snapshot in the system
*/
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs) throws IOException, UnsupportedOperationException {
// Verify if snapshot is disabled by the user
String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);
// Extract cleaners from conf
Set<String> hfileCleaners = new HashSet<>();
String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
if (cleaners != null)
Collections.addAll(hfileCleaners, cleaners);
Set<String> logCleaners = new HashSet<>();
cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
if (cleaners != null)
Collections.addAll(logCleaners, cleaners);
// check if an older version of snapshot directory was present
Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
FileSystem fs = mfs.getFileSystem();
List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir));
if (ss != null && !ss.isEmpty()) {
LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
}
// that there're no snapshot in the .snapshot folder.
if (snapshotEnabled) {
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, hfileCleaners.toArray(new String[hfileCleaners.size()]));
conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, logCleaners.toArray(new String[logCleaners.size()]));
} else {
// Verify if cleaners are present
snapshotEnabled = hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) && hfileCleaners.contains(HFileLinkCleaner.class.getName());
// Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
if (snapshotEnabled) {
LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " + "but the '" + HBASE_SNAPSHOT_ENABLED + "' property " + (userDisabled ? "is set to 'false'." : "is not set."));
}
}
// Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
this.isSnapshotSupported = snapshotEnabled && !userDisabled;
// otherwise we end up with snapshot data loss.
if (!snapshotEnabled) {
LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
if (fs.exists(snapshotDir)) {
FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
if (snapshots != null) {
LOG.error("Snapshots are present, but cleaners are not enabled.");
checkSnapshotSupport();
}
}
}
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class SplitTableRegionProcedure method splitStoreFiles.
/**
* Create Split directory
* @param env MasterProcedureEnv
* @throws IOException
*/
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Configuration conf = env.getMasterConfiguration();
// The following code sets up a thread pool executor with as many slots as
// there's files to split. It then fires up everything, waits for
// completion and finally checks for any exception
//
// Note: splitStoreFiles creates daughter region dirs under the parent splits dir
// Nothing to unroll here if failure -- re-run createSplitsDir will
// clean this up.
int nbFiles = 0;
for (String family : regionFs.getFamilies()) {
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null) {
nbFiles += storeFiles.size();
}
}
if (nbFiles == 0) {
// no file needs to be splitted.
return new Pair<>(0, 0);
}
// Default max #threads to use is the smaller of table's configured number of blocking store
// files or the available number of logical cores.
int defMaxThreads = Math.min(conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT), Runtime.getRuntime().availableProcessors());
// Max #threads is the smaller of the number of storefiles or the default max determined above.
int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + " using " + maxThreads + " threads");
ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
List<Future<Pair<Path, Path>>> futures = new ArrayList<>(nbFiles);
// Split each store file.
final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
for (String family : regionFs.getFamilies()) {
final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null && storeFiles.size() > 0) {
final CacheConfig cacheConf = new CacheConfig(conf, hcd);
for (StoreFileInfo storeFileInfo : storeFiles) {
StoreFileSplitter sfs = new StoreFileSplitter(regionFs, family.getBytes(), new StoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()));
futures.add(threadPool.submit(sfs));
}
}
}
// Shutdown the pool
threadPool.shutdown();
// Wait for all the tasks to finish
long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
try {
boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
if (stillRunning) {
threadPool.shutdownNow();
// wait for the thread to shutdown completely.
while (!threadPool.isTerminated()) {
Thread.sleep(50);
}
throw new IOException("Took too long to split the" + " files and create the references, aborting split");
}
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
int daughterA = 0;
int daughterB = 0;
// Look for any exception
for (Future<Pair<Path, Path>> future : futures) {
try {
Pair<Path, Path> p = future.get();
daughterA += p.getFirst() != null ? 1 : 0;
daughterB += p.getSecond() != null ? 1 : 0;
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles.");
}
return new Pair<>(daughterA, daughterB);
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class MasterDDLOperationHelper method deleteColumnFamilyFromFileSystem.
/**
* Remove the column family from the file system
**/
public static void deleteColumnFamilyFromFileSystem(final MasterProcedureEnv env, final TableName tableName, List<HRegionInfo> regionInfoList, final byte[] familyName, boolean hasMob) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
if (LOG.isDebugEnabled()) {
LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName);
}
if (regionInfoList == null) {
regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName);
}
for (HRegionInfo hri : regionInfoList) {
// Delete the family directory in FS for all the regions one by one
mfs.deleteFamilyFromFS(hri, familyName);
}
if (hasMob) {
// Delete the mob region
Path mobRootDir = new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME);
HRegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
mfs.deleteFamilyFromFS(mobRootDir, mobRegionInfo, familyName);
}
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class SnapshotTestingUtils method deleteArchiveDirectory.
public static void deleteArchiveDirectory(final HBaseTestingUtility util) throws IOException {
// Ensure the archiver to be empty
MasterFileSystem mfs = util.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path archiveDir = new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
mfs.getFileSystem().delete(archiveDir, true);
}
use of org.apache.hadoop.hbase.master.MasterFileSystem in project hbase by apache.
the class SnapshotTestingUtils method corruptSnapshot.
/**
* Corrupt the specified snapshot by deleting some files.
*
* @param util {@link HBaseTestingUtility}
* @param snapshotName name of the snapshot to corrupt
* @return array of the corrupted HFiles
* @throws IOException on unexecpted error reading the FS
*/
public static ArrayList corruptSnapshot(final HBaseTestingUtility util, final String snapshotName) throws IOException {
final MasterFileSystem mfs = util.getHBaseCluster().getMaster().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, mfs.getRootDir());
HBaseProtos.SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
final TableName table = TableName.valueOf(snapshotDesc.getTable());
final ArrayList corruptedFiles = new ArrayList();
final Configuration conf = util.getConfiguration();
SnapshotReferenceUtil.visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new SnapshotReferenceUtil.StoreFileVisitor() {
@Override
public void storeFile(final HRegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
String region = regionInfo.getEncodedName();
String hfile = storeFile.getName();
HFileLink link = HFileLink.build(conf, table, region, family, hfile);
if (corruptedFiles.size() % 2 == 0) {
fs.delete(link.getAvailablePath(fs), true);
corruptedFiles.add(hfile);
}
}
});
assertTrue(corruptedFiles.size() > 0);
return corruptedFiles;
}
Aggregations