Search in sources :

Example 21 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class DFSClientCache method getDfsInputStream.

FSDataInputStream getDfsInputStream(String userName, String inodePath) {
    DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath);
    FSDataInputStream s = null;
    try {
        s = inputstreamCache.get(k);
    } catch (ExecutionException e) {
        LOG.warn("Failed to create DFSInputStream for user:" + userName + " Cause:" + e);
    }
    return s;
}
Also used : FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ExecutionException(java.util.concurrent.ExecutionException)

Example 22 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class StorageLocationChecker method check.

/**
   * Initiate a check of the supplied storage volumes and return
   * a list of failed volumes.
   *
   * StorageLocations are returned in the same order as the input
   * for compatibility with existing unit tests.
   *
   * @param conf HDFS configuration.
   * @param dataDirs list of volumes to check.
   * @return returns a list of failed volumes. Returns the empty list if
   *         there are no failed volumes.
   *
   * @throws InterruptedException if the check was interrupted.
   * @throws IOException if the number of failed volumes exceeds the
   *                     maximum allowed or if there are no good
   *                     volumes.
   */
public List<StorageLocation> check(final Configuration conf, final Collection<StorageLocation> dataDirs) throws InterruptedException, IOException {
    final HashMap<StorageLocation, Boolean> goodLocations = new LinkedHashMap<>();
    final Set<StorageLocation> failedLocations = new HashSet<>();
    final Map<StorageLocation, ListenableFuture<VolumeCheckResult>> futures = Maps.newHashMap();
    final LocalFileSystem localFS = FileSystem.getLocal(conf);
    final CheckContext context = new CheckContext(localFS, expectedPermission);
    // Start parallel disk check operations on all StorageLocations.
    for (StorageLocation location : dataDirs) {
        goodLocations.put(location, true);
        Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(location, context);
        if (olf.isPresent()) {
            futures.put(location, olf.get());
        }
    }
    if (maxVolumeFailuresTolerated >= dataDirs.size()) {
        throw new DiskErrorException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + ". Value configured is >= " + "to the number of configured volumes (" + dataDirs.size() + ").");
    }
    final long checkStartTimeMs = timer.monotonicNow();
    // Retrieve the results of the disk checks.
    for (Map.Entry<StorageLocation, ListenableFuture<VolumeCheckResult>> entry : futures.entrySet()) {
        // Determine how much time we can allow for this check to complete.
        // The cumulative wait time cannot exceed maxAllowedTimeForCheck.
        final long waitSoFarMs = (timer.monotonicNow() - checkStartTimeMs);
        final long timeLeftMs = Math.max(0, maxAllowedTimeForCheckMs - waitSoFarMs);
        final StorageLocation location = entry.getKey();
        try {
            final VolumeCheckResult result = entry.getValue().get(timeLeftMs, TimeUnit.MILLISECONDS);
            switch(result) {
                case HEALTHY:
                    break;
                case DEGRADED:
                    LOG.warn("StorageLocation {} appears to be degraded.", location);
                    break;
                case FAILED:
                    LOG.warn("StorageLocation {} detected as failed.", location);
                    failedLocations.add(location);
                    goodLocations.remove(location);
                    break;
                default:
                    LOG.error("Unexpected health check result {} for StorageLocation {}", result, location);
            }
        } catch (ExecutionException | TimeoutException e) {
            LOG.warn("Exception checking StorageLocation " + location, e.getCause());
            failedLocations.add(location);
            goodLocations.remove(location);
        }
    }
    if (failedLocations.size() > maxVolumeFailuresTolerated) {
        throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + goodLocations.size() + ", volumes configured: " + dataDirs.size() + ", volumes failed: " + failedLocations.size() + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
    }
    if (goodLocations.size() == 0) {
        throw new DiskErrorException("All directories in " + DFS_DATANODE_DATA_DIR_KEY + " are invalid: " + failedLocations);
    }
    return new ArrayList<>(goodLocations.keySet());
}
Also used : CheckContext(org.apache.hadoop.hdfs.server.datanode.StorageLocation.CheckContext) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) ExecutionException(java.util.concurrent.ExecutionException) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) HashSet(java.util.HashSet) TimeoutException(java.util.concurrent.TimeoutException)

Example 23 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class TestRPC method testExternalCall.

@Test(timeout = 30000)
public void testExternalCall() throws Exception {
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting("user123", new String[0]);
    final IOException expectedIOE = new IOException("boom");
    // use 1 handler so the callq can be plugged
    final Server server = setupTestServer(conf, 1);
    try {
        final AtomicBoolean result = new AtomicBoolean();
        ExternalCall<String> remoteUserCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {

            @Override
            public String run() throws Exception {
                return UserGroupInformation.getCurrentUser().getUserName();
            }
        });
        ExternalCall<String> exceptionCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {

            @Override
            public String run() throws Exception {
                throw expectedIOE;
            }
        });
        final CountDownLatch latch = new CountDownLatch(1);
        final CyclicBarrier barrier = new CyclicBarrier(2);
        ExternalCall<Void> barrierCall = newExtCall(ugi, new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                // notify we are in a handler and then wait to keep the callq
                // plugged up
                latch.countDown();
                barrier.await();
                return null;
            }
        });
        server.queueCall(barrierCall);
        server.queueCall(exceptionCall);
        server.queueCall(remoteUserCall);
        // wait for barrier call to enter the handler, check that the other 2
        // calls are actually queued
        latch.await();
        assertEquals(2, server.getCallQueueLen());
        // unplug the callq
        barrier.await();
        barrierCall.get();
        // verify correct ugi is used
        String answer = remoteUserCall.get();
        assertEquals(ugi.getUserName(), answer);
        try {
            exceptionCall.get();
            fail("didn't throw");
        } catch (ExecutionException ee) {
            assertTrue((ee.getCause()) instanceof IOException);
            assertEquals(expectedIOE.getMessage(), ee.getCause().getMessage());
        }
    } finally {
        server.stop();
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) ServiceException(com.google.protobuf.ServiceException) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) InterruptedIOException(java.io.InterruptedIOException) SocketTimeoutException(java.net.SocketTimeoutException) ConnectException(java.net.ConnectException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AccessControlException(org.apache.hadoop.security.AccessControlException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ExecutionException(java.util.concurrent.ExecutionException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 24 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class TestRPCServerShutdown method testRPCServerShutdown.

/**
   *  Verify the RPC server can shutdown properly when callQueue is full.
   */
@Test(timeout = 30000)
public void testRPCServerShutdown() throws Exception {
    final int numClients = 3;
    final List<Future<Void>> res = new ArrayList<Future<Void>>();
    final ExecutorService executorService = Executors.newFixedThreadPool(numClients);
    conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
    RPC.Builder builder = newServerBuilder(conf).setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true);
    final Server server = setupTestServer(builder);
    final TestRpcService proxy = getClient(addr, conf);
    try {
        // Start another sleep RPC call to make reader thread block on CallQueue.
        for (int i = 0; i < numClients; i++) {
            res.add(executorService.submit(new Callable<Void>() {

                @Override
                public Void call() throws ServiceException, InterruptedException {
                    proxy.sleep(null, newSleepRequest(100000));
                    return null;
                }
            }));
        }
        while (server.getCallQueueLen() != 1 || countThreads(CallQueueManager.class.getName()) != 1 || countThreads(PBServerImpl.class.getName()) != 1) {
            Thread.sleep(100);
        }
    } finally {
        try {
            stop(server, proxy);
            assertEquals("Not enough clients", numClients, res.size());
            for (Future<Void> f : res) {
                try {
                    f.get();
                    fail("Future get should not return");
                } catch (ExecutionException e) {
                    ServiceException se = (ServiceException) e.getCause();
                    assertTrue("Unexpected exception: " + se, se.getCause() instanceof IOException);
                    LOG.info("Expected exception", e.getCause());
                }
            }
        } finally {
            executorService.shutdown();
        }
    }
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) ServiceException(com.google.protobuf.ServiceException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.Test)

Example 25 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class KMSAudit method op.

/**
   * Logs to the audit service a single operation on the KMS or on a key.
   *
   * @param opStatus
   *          The outcome of the audited event
   * @param op
   *          The operation being audited (either {@link KMS.KMSOp} or
   *          {@link Type} N.B this is passed as an {@link Object} to allow
   *          either enum to be passed in.
   * @param ugi
   *          The user's security context
   * @param key
   *          The String name of the key if applicable
   * @param remoteHost
   *          The hostname of the requesting service
   * @param extraMsg
   *          Any extra details for auditing
   */
private void op(final OpStatus opStatus, final Object op, final UserGroupInformation ugi, final String key, final String remoteHost, final String extraMsg) {
    final String user = ugi == null ? null : ugi.getShortUserName();
    if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key) && (op != null) && AGGREGATE_OPS_WHITELIST.contains(op)) {
        String cacheKey = createCacheKey(user, key, op);
        if (opStatus == OpStatus.UNAUTHORIZED) {
            cache.invalidate(cacheKey);
            logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
        } else {
            try {
                AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {

                    @Override
                    public AuditEvent call() throws Exception {
                        return new AuditEvent(op, ugi, key, remoteHost, extraMsg);
                    }
                });
                // incrementAndGet() == 0 implies first access)
                if (event.getAccessCount().incrementAndGet() == 0) {
                    event.getAccessCount().incrementAndGet();
                    logEvent(opStatus, event);
                }
            } catch (ExecutionException ex) {
                throw new RuntimeException(ex);
            }
        }
    } else {
        logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
    }
}
Also used : AuditEvent(org.apache.hadoop.crypto.key.kms.server.KMSAuditLogger.AuditEvent) ExecutionException(java.util.concurrent.ExecutionException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

ExecutionException (java.util.concurrent.ExecutionException)1341 IOException (java.io.IOException)367 Test (org.junit.Test)335 TimeoutException (java.util.concurrent.TimeoutException)258 ArrayList (java.util.ArrayList)237 Future (java.util.concurrent.Future)218 ExecutorService (java.util.concurrent.ExecutorService)152 CountDownLatch (java.util.concurrent.CountDownLatch)103 List (java.util.List)98 CancellationException (java.util.concurrent.CancellationException)98 Callable (java.util.concurrent.Callable)97 Test (org.testng.annotations.Test)78 HashMap (java.util.HashMap)69 Map (java.util.Map)65 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)64 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)63 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)56 ParallelTest (com.hazelcast.test.annotation.ParallelTest)47 QuickTest (com.hazelcast.test.annotation.QuickTest)47 UncheckedExecutionException (com.google.common.util.concurrent.UncheckedExecutionException)46