use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class DFSClientCache method getDfsInputStream.
FSDataInputStream getDfsInputStream(String userName, String inodePath) {
DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath);
FSDataInputStream s = null;
try {
s = inputstreamCache.get(k);
} catch (ExecutionException e) {
LOG.warn("Failed to create DFSInputStream for user:" + userName + " Cause:" + e);
}
return s;
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class StorageLocationChecker method check.
/**
* Initiate a check of the supplied storage volumes and return
* a list of failed volumes.
*
* StorageLocations are returned in the same order as the input
* for compatibility with existing unit tests.
*
* @param conf HDFS configuration.
* @param dataDirs list of volumes to check.
* @return returns a list of failed volumes. Returns the empty list if
* there are no failed volumes.
*
* @throws InterruptedException if the check was interrupted.
* @throws IOException if the number of failed volumes exceeds the
* maximum allowed or if there are no good
* volumes.
*/
public List<StorageLocation> check(final Configuration conf, final Collection<StorageLocation> dataDirs) throws InterruptedException, IOException {
final HashMap<StorageLocation, Boolean> goodLocations = new LinkedHashMap<>();
final Set<StorageLocation> failedLocations = new HashSet<>();
final Map<StorageLocation, ListenableFuture<VolumeCheckResult>> futures = Maps.newHashMap();
final LocalFileSystem localFS = FileSystem.getLocal(conf);
final CheckContext context = new CheckContext(localFS, expectedPermission);
// Start parallel disk check operations on all StorageLocations.
for (StorageLocation location : dataDirs) {
goodLocations.put(location, true);
Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(location, context);
if (olf.isPresent()) {
futures.put(location, olf.get());
}
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {
throw new DiskErrorException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + ". Value configured is >= " + "to the number of configured volumes (" + dataDirs.size() + ").");
}
final long checkStartTimeMs = timer.monotonicNow();
// Retrieve the results of the disk checks.
for (Map.Entry<StorageLocation, ListenableFuture<VolumeCheckResult>> entry : futures.entrySet()) {
// Determine how much time we can allow for this check to complete.
// The cumulative wait time cannot exceed maxAllowedTimeForCheck.
final long waitSoFarMs = (timer.monotonicNow() - checkStartTimeMs);
final long timeLeftMs = Math.max(0, maxAllowedTimeForCheckMs - waitSoFarMs);
final StorageLocation location = entry.getKey();
try {
final VolumeCheckResult result = entry.getValue().get(timeLeftMs, TimeUnit.MILLISECONDS);
switch(result) {
case HEALTHY:
break;
case DEGRADED:
LOG.warn("StorageLocation {} appears to be degraded.", location);
break;
case FAILED:
LOG.warn("StorageLocation {} detected as failed.", location);
failedLocations.add(location);
goodLocations.remove(location);
break;
default:
LOG.error("Unexpected health check result {} for StorageLocation {}", result, location);
}
} catch (ExecutionException | TimeoutException e) {
LOG.warn("Exception checking StorageLocation " + location, e.getCause());
failedLocations.add(location);
goodLocations.remove(location);
}
}
if (failedLocations.size() > maxVolumeFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + goodLocations.size() + ", volumes configured: " + dataDirs.size() + ", volumes failed: " + failedLocations.size() + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
}
if (goodLocations.size() == 0) {
throw new DiskErrorException("All directories in " + DFS_DATANODE_DATA_DIR_KEY + " are invalid: " + failedLocations);
}
return new ArrayList<>(goodLocations.keySet());
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class TestRPC method testExternalCall.
@Test(timeout = 30000)
public void testExternalCall() throws Exception {
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting("user123", new String[0]);
final IOException expectedIOE = new IOException("boom");
// use 1 handler so the callq can be plugged
final Server server = setupTestServer(conf, 1);
try {
final AtomicBoolean result = new AtomicBoolean();
ExternalCall<String> remoteUserCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
return UserGroupInformation.getCurrentUser().getUserName();
}
});
ExternalCall<String> exceptionCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
throw expectedIOE;
}
});
final CountDownLatch latch = new CountDownLatch(1);
final CyclicBarrier barrier = new CyclicBarrier(2);
ExternalCall<Void> barrierCall = newExtCall(ugi, new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// notify we are in a handler and then wait to keep the callq
// plugged up
latch.countDown();
barrier.await();
return null;
}
});
server.queueCall(barrierCall);
server.queueCall(exceptionCall);
server.queueCall(remoteUserCall);
// wait for barrier call to enter the handler, check that the other 2
// calls are actually queued
latch.await();
assertEquals(2, server.getCallQueueLen());
// unplug the callq
barrier.await();
barrierCall.get();
// verify correct ugi is used
String answer = remoteUserCall.get();
assertEquals(ugi.getUserName(), answer);
try {
exceptionCall.get();
fail("didn't throw");
} catch (ExecutionException ee) {
assertTrue((ee.getCause()) instanceof IOException);
assertEquals(expectedIOE.getMessage(), ee.getCause().getMessage());
}
} finally {
server.stop();
}
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class TestRPCServerShutdown method testRPCServerShutdown.
/**
* Verify the RPC server can shutdown properly when callQueue is full.
*/
@Test(timeout = 30000)
public void testRPCServerShutdown() throws Exception {
final int numClients = 3;
final List<Future<Void>> res = new ArrayList<Future<Void>>();
final ExecutorService executorService = Executors.newFixedThreadPool(numClients);
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
RPC.Builder builder = newServerBuilder(conf).setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true);
final Server server = setupTestServer(builder);
final TestRpcService proxy = getClient(addr, conf);
try {
// Start another sleep RPC call to make reader thread block on CallQueue.
for (int i = 0; i < numClients; i++) {
res.add(executorService.submit(new Callable<Void>() {
@Override
public Void call() throws ServiceException, InterruptedException {
proxy.sleep(null, newSleepRequest(100000));
return null;
}
}));
}
while (server.getCallQueueLen() != 1 || countThreads(CallQueueManager.class.getName()) != 1 || countThreads(PBServerImpl.class.getName()) != 1) {
Thread.sleep(100);
}
} finally {
try {
stop(server, proxy);
assertEquals("Not enough clients", numClients, res.size());
for (Future<Void> f : res) {
try {
f.get();
fail("Future get should not return");
} catch (ExecutionException e) {
ServiceException se = (ServiceException) e.getCause();
assertTrue("Unexpected exception: " + se, se.getCause() instanceof IOException);
LOG.info("Expected exception", e.getCause());
}
}
} finally {
executorService.shutdown();
}
}
}
use of java.util.concurrent.ExecutionException in project hadoop by apache.
the class KMSAudit method op.
/**
* Logs to the audit service a single operation on the KMS or on a key.
*
* @param opStatus
* The outcome of the audited event
* @param op
* The operation being audited (either {@link KMS.KMSOp} or
* {@link Type} N.B this is passed as an {@link Object} to allow
* either enum to be passed in.
* @param ugi
* The user's security context
* @param key
* The String name of the key if applicable
* @param remoteHost
* The hostname of the requesting service
* @param extraMsg
* Any extra details for auditing
*/
private void op(final OpStatus opStatus, final Object op, final UserGroupInformation ugi, final String key, final String remoteHost, final String extraMsg) {
final String user = ugi == null ? null : ugi.getShortUserName();
if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key) && (op != null) && AGGREGATE_OPS_WHITELIST.contains(op)) {
String cacheKey = createCacheKey(user, key, op);
if (opStatus == OpStatus.UNAUTHORIZED) {
cache.invalidate(cacheKey);
logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
} else {
try {
AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {
@Override
public AuditEvent call() throws Exception {
return new AuditEvent(op, ugi, key, remoteHost, extraMsg);
}
});
// incrementAndGet() == 0 implies first access)
if (event.getAccessCount().incrementAndGet() == 0) {
event.getAccessCount().incrementAndGet();
logEvent(opStatus, event);
}
} catch (ExecutionException ex) {
throw new RuntimeException(ex);
}
}
} else {
logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
}
}
Aggregations