use of java.util.concurrent.ExecutionException in project hbase by apache.
the class HFileReplicator method copyHFilesToStagingDir.
private Map<String, Path> copyHFilesToStagingDir() throws IOException {
Map<String, Path> mapOfCopiedHFiles = new HashMap<>();
Pair<byte[], List<String>> familyHFilePathsPair;
List<String> hfilePaths;
byte[] family;
Path familyStagingDir;
int familyHFilePathsPairsListSize;
int totalNoOfHFiles;
List<Pair<byte[], List<String>>> familyHFilePathsPairsList;
FileSystem sourceFs = null;
try {
Path sourceClusterPath = new Path(sourceBaseNamespaceDirPath);
/*
* Path#getFileSystem will by default get the FS from cache. If both source and sink cluster
* has same FS name service then it will return peer cluster FS. To avoid this we explicitly
* disable the loading of FS from cache, so that a new FS is created with source cluster
* configuration.
*/
String sourceScheme = sourceClusterPath.toUri().getScheme();
String disableCacheName = String.format("fs.%s.impl.disable.cache", new Object[] { sourceScheme });
sourceClusterConf.setBoolean(disableCacheName, true);
sourceFs = sourceClusterPath.getFileSystem(sourceClusterConf);
User user = userProvider.getCurrent();
// For each table name in the map
for (Entry<String, List<Pair<byte[], List<String>>>> tableEntry : bulkLoadHFileMap.entrySet()) {
String tableName = tableEntry.getKey();
// Create staging directory for each table
Path stagingDir = createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName));
familyHFilePathsPairsList = tableEntry.getValue();
familyHFilePathsPairsListSize = familyHFilePathsPairsList.size();
// For each list of family hfile paths pair in the table
for (int i = 0; i < familyHFilePathsPairsListSize; i++) {
familyHFilePathsPair = familyHFilePathsPairsList.get(i);
family = familyHFilePathsPair.getFirst();
hfilePaths = familyHFilePathsPair.getSecond();
familyStagingDir = new Path(stagingDir, Bytes.toString(family));
totalNoOfHFiles = hfilePaths.size();
// For each list of hfile paths for the family
List<Future<Void>> futures = new ArrayList<>();
Callable<Void> c;
Future<Void> future;
int currentCopied = 0;
// Copy the hfiles parallely
while (totalNoOfHFiles > currentCopied + this.copiesPerThread) {
c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread));
future = exec.submit(c);
futures.add(future);
currentCopied += this.copiesPerThread;
}
int remaining = totalNoOfHFiles - currentCopied;
if (remaining > 0) {
c = new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, currentCopied + remaining));
future = exec.submit(c);
futures.add(future);
}
for (Future<Void> f : futures) {
try {
f.get();
} catch (InterruptedException e) {
InterruptedIOException iioe = new InterruptedIOException("Failed to copy HFiles to local file system. This will be retried again " + "by the source cluster.");
iioe.initCause(e);
throw iioe;
} catch (ExecutionException e) {
throw new IOException("Failed to copy HFiles to local file system. This will " + "be retried again by the source cluster.", e);
}
}
}
// Add the staging directory to this table. Staging directory contains all the hfiles
// belonging to this table
mapOfCopiedHFiles.put(tableName, stagingDir);
}
return mapOfCopiedHFiles;
} finally {
if (sourceFs != null) {
sourceFs.close();
}
if (exec != null) {
exec.shutdown();
}
}
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class HFileCorruptionChecker method checkTableDir.
/**
* Check all the regiondirs in the specified tableDir
*
* @param tableDir
* path to a table
* @throws IOException
*/
void checkTableDir(Path tableDir) throws IOException {
List<FileStatus> rds = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
if (rds == null) {
if (!fs.exists(tableDir)) {
LOG.warn("Table Directory " + tableDir + " does not exist. Likely due to concurrent delete. Skipping.");
missing.add(tableDir);
}
return;
}
// Parallelize check at the region dir level
List<RegionDirChecker> rdcs = new ArrayList<>(rds.size() + 1);
List<Future<Void>> rdFutures;
for (FileStatus rdFs : rds) {
Path rdDir = rdFs.getPath();
RegionDirChecker work = new RegionDirChecker(rdDir);
rdcs.add(work);
}
// add mob region
rdcs.add(createMobRegionDirChecker(tableDir));
// Submit and wait for completion
try {
rdFutures = executor.invokeAll(rdcs);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs checking interrupted!", ie);
return;
}
for (int i = 0; i < rdFutures.size(); i++) {
Future<Void> f = rdFutures.get(i);
try {
f.get();
} catch (ExecutionException e) {
LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir, e.getCause());
// rethrow IOExceptions
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
// rethrow RuntimeExceptions
if (e.getCause() instanceof RuntimeException) {
throw (RuntimeException) e.getCause();
}
// this should never happen
LOG.error("Unexpected exception encountered", e);
// bailing out.
return;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs check interrupted!", ie);
// bailing out
return;
}
}
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class PerformanceEvaluation method doLocalClients.
/*
* Run all clients in this vm each to its own thread.
*/
static RunResult[] doLocalClients(final TestOptions opts, final Configuration conf) throws IOException, InterruptedException {
final Class<? extends Test> cmd = determineCommandClass(opts.cmdName);
assert cmd != null;
@SuppressWarnings("unchecked") Future<RunResult>[] threads = new Future[opts.numClientThreads];
RunResult[] results = new RunResult[opts.numClientThreads];
ExecutorService pool = Executors.newFixedThreadPool(opts.numClientThreads, new ThreadFactoryBuilder().setNameFormat("TestClient-%s").build());
final Connection con = ConnectionFactory.createConnection(conf);
for (int i = 0; i < threads.length; i++) {
final int index = i;
threads[i] = pool.submit(new Callable<RunResult>() {
@Override
public RunResult call() throws Exception {
TestOptions threadOpts = new TestOptions(opts);
if (threadOpts.startRow == 0)
threadOpts.startRow = index * threadOpts.perClientRunRows;
RunResult run = runOneClient(cmd, conf, con, threadOpts, new Status() {
@Override
public void setStatus(final String msg) throws IOException {
LOG.info(msg);
}
});
LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + "ms over " + threadOpts.perClientRunRows + " rows");
return run;
}
});
}
pool.shutdown();
for (int i = 0; i < threads.length; i++) {
try {
results[i] = threads[i].get();
} catch (ExecutionException e) {
throw new IOException(e.getCause());
}
}
final String test = cmd.getSimpleName();
LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(results));
Arrays.sort(results);
long total = 0;
for (RunResult result : results) {
total += result.duration;
}
LOG.info("[" + test + "]" + "\tMin: " + results[0] + "ms" + "\tMax: " + results[results.length - 1] + "ms" + "\tAvg: " + (total / results.length) + "ms");
con.close();
return results;
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class TwoConcurrentActionPolicy method runOneIteration.
@Override
protected void runOneIteration() {
Action actionOne = PolicyBasedChaosMonkey.selectRandomItem(actionsOne);
Action actionTwo = PolicyBasedChaosMonkey.selectRandomItem(actionsTwo);
Future fOne = executor.submit(new ActionRunner(actionOne));
Future fTwo = executor.submit(new ActionRunner(actionTwo));
try {
fOne.get();
fTwo.get();
} catch (InterruptedException e) {
LOG.warn("Exception occurred during performing action: " + StringUtils.stringifyException(e));
} catch (ExecutionException ex) {
LOG.warn("Exception occurred during performing action: " + StringUtils.stringifyException(ex));
}
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class SnapshotManifestV1 method loadRegionManifests.
static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor, final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
if (regions == null) {
LOG.debug("No regions under directory:" + snapshotDir);
return null;
}
final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<>(executor);
for (final FileStatus region : regions) {
completionService.submit(new Callable<SnapshotRegionManifest>() {
@Override
public SnapshotRegionManifest call() throws IOException {
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath());
return buildManifestFromDisk(conf, fs, snapshotDir, hri);
}
});
}
ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<>(regions.length);
try {
for (int i = 0; i < regions.length; ++i) {
regionsManifest.add(completionService.take().get());
}
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
IOException ex = new IOException();
ex.initCause(e.getCause());
throw ex;
}
return regionsManifest;
}
Aggregations