Search in sources :

Example 31 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class TestFSDownload method testDirDownload.

@Test(timeout = 10000)
public void testDirDownload() throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    FileContext files = FileContext.getLocalFSFileContext(conf);
    final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));
    files.mkdir(basedir, null, true);
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
    Map<LocalResource, LocalResourceVisibility> rsrcVis = new HashMap<LocalResource, LocalResourceVisibility>();
    Random rand = new Random();
    long sharedSeed = rand.nextLong();
    rand.setSeed(sharedSeed);
    System.out.println("SEED: " + sharedSeed);
    Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
    ExecutorService exec = HadoopExecutors.newSingleThreadExecutor();
    LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName());
    for (int i = 0; i < 5; ++i) {
        LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
        if (i % 2 == 1) {
            vis = LocalResourceVisibility.APPLICATION;
        }
        Path p = new Path(basedir, "dir" + i + ".jar");
        LocalResource rsrc = createJar(files, p, vis);
        rsrcVis.put(rsrc, vis);
        Path destPath = dirs.getLocalPathForWrite(basedir.toString(), conf);
        destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet()));
        FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
        pending.put(rsrc, exec.submit(fsd));
    }
    exec.shutdown();
    while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ;
    for (Future<Path> path : pending.values()) {
        Assert.assertTrue(path.isDone());
    }
    try {
        for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
            Path localized = p.getValue().get();
            FileStatus status = files.getFileStatus(localized);
            System.out.println("Testing path " + localized);
            assert (status.isDirectory());
            assert (rsrcVis.containsKey(p.getKey()));
            verifyPermsRecursively(localized.getFileSystem(conf), files, localized, rsrcVis.get(p.getKey()));
        }
    } catch (ExecutionException e) {
        throw new IOException("Failed exec", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) IOException(java.io.IOException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) LocalResourceVisibility(org.apache.hadoop.yarn.api.records.LocalResourceVisibility) Random(java.util.Random) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) LocalDirAllocator(org.apache.hadoop.fs.LocalDirAllocator) ExecutionException(java.util.concurrent.ExecutionException) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 32 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class ContainerLocalizer method createStatus.

/**
   * Create the payload for the HeartBeat. Mainly the list of
   * {@link LocalResourceStatus}es
   * 
   * @return a {@link LocalizerStatus} that can be sent via heartbeat.
   * @throws InterruptedException
   */
private LocalizerStatus createStatus() throws InterruptedException {
    final List<LocalResourceStatus> currentResources = new ArrayList<LocalResourceStatus>();
    // TODO: Synchronization??
    for (Iterator<LocalResource> i = pendingResources.keySet().iterator(); i.hasNext(); ) {
        LocalResource rsrc = i.next();
        LocalResourceStatus stat = recordFactory.newRecordInstance(LocalResourceStatus.class);
        stat.setResource(rsrc);
        Future<Path> fPath = pendingResources.get(rsrc);
        if (fPath.isDone()) {
            try {
                Path localPath = fPath.get();
                stat.setLocalPath(URL.fromPath(localPath));
                stat.setLocalSize(FileUtil.getDU(new File(localPath.getParent().toUri())));
                stat.setStatus(ResourceStatusType.FETCH_SUCCESS);
            } catch (ExecutionException e) {
                stat.setStatus(ResourceStatusType.FETCH_FAILURE);
                stat.setException(SerializedException.newInstance(e.getCause()));
            } catch (CancellationException e) {
                stat.setStatus(ResourceStatusType.FETCH_FAILURE);
                stat.setException(SerializedException.newInstance(e));
            }
            // TODO shouldn't remove until ACK
            i.remove();
        } else {
            stat.setStatus(ResourceStatusType.FETCH_PENDING);
        }
        currentResources.add(stat);
    }
    LocalizerStatus status = recordFactory.newRecordInstance(LocalizerStatus.class);
    status.setLocalizerId(localizerId);
    status.addAllResources(currentResources);
    return status;
}
Also used : Path(org.apache.hadoop.fs.Path) CancellationException(java.util.concurrent.CancellationException) ArrayList(java.util.ArrayList) LocalizerStatus(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus) ExecutionException(java.util.concurrent.ExecutionException) LocalResourceStatus(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus) File(java.io.File) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource)

Example 33 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class S3ABlockOutputStream method putObject.

/**
   * Upload the current block as a single PUT request; if the buffer
   * is empty a 0-byte PUT will be invoked, as it is needed to create an
   * entry at the far end.
   * @throws IOException any problem.
   */
private void putObject() throws IOException {
    LOG.debug("Executing regular upload for {}", writeOperationHelper);
    final S3ADataBlocks.DataBlock block = getActiveBlock();
    int size = block.dataSize();
    final S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
    final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.newPutRequest(uploadData.getFile()) : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);
    fs.setOptionalPutRequestParameters(putObjectRequest);
    long transferQueueTime = now();
    BlockUploadProgress callback = new BlockUploadProgress(block, progressListener, transferQueueTime);
    putObjectRequest.setGeneralProgressListener(callback);
    statistics.blockUploadQueued(size);
    ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {

        @Override
        public PutObjectResult call() throws Exception {
            PutObjectResult result;
            try {
                // the putObject call automatically closes the input
                // stream afterwards.
                result = writeOperationHelper.putObject(putObjectRequest);
            } finally {
                closeAll(LOG, uploadData, block);
            }
            return result;
        }
    });
    clearActiveBlock();
    //wait for completion
    try {
        putObjectResult.get();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted object upload", ie);
        Thread.currentThread().interrupt();
    } catch (ExecutionException ee) {
        throw extractException("regular upload", key, ee);
    }
}
Also used : PutObjectResult(com.amazonaws.services.s3.model.PutObjectResult) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AmazonClientException(com.amazonaws.AmazonClientException) ExecutionException(java.util.concurrent.ExecutionException) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 34 with ExecutionException

use of java.util.concurrent.ExecutionException in project hadoop by apache.

the class ITestS3ADeleteManyFiles method testBulkRenameAndDelete.

/**
   * CAUTION: If this test starts failing, please make sure that the
   * {@link org.apache.hadoop.fs.s3a.Constants#MAX_THREADS} configuration is not
   * set too low. Alternatively, consider reducing the
   * <code>scale.test.operation.count</code> parameter in
   * <code>getOperationCount()</code>.
   *
   * @see #getOperationCount()
   */
@Test
public void testBulkRenameAndDelete() throws Throwable {
    final Path scaleTestDir = path("testBulkRenameAndDelete");
    final Path srcDir = new Path(scaleTestDir, "src");
    final Path finalDir = new Path(scaleTestDir, "final");
    final long count = getOperationCount();
    final S3AFileSystem fs = getFileSystem();
    ContractTestUtils.rm(fs, scaleTestDir, true, false);
    fs.mkdirs(srcDir);
    fs.mkdirs(finalDir);
    int testBufferSize = fs.getConf().getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE, ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
    // use Executor to speed up file creation
    ExecutorService exec = Executors.newFixedThreadPool(16);
    final ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(exec);
    try {
        final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');
        for (int i = 0; i < count; ++i) {
            final String fileName = "foo-" + i;
            completionService.submit(new Callable<Boolean>() {

                @Override
                public Boolean call() throws IOException {
                    ContractTestUtils.createFile(fs, new Path(srcDir, fileName), false, data);
                    return fs.exists(new Path(srcDir, fileName));
                }
            });
        }
        for (int i = 0; i < count; ++i) {
            final Future<Boolean> future = completionService.take();
            try {
                if (!future.get()) {
                    LOG.warn("cannot create file");
                }
            } catch (ExecutionException e) {
                LOG.warn("Error while uploading file", e.getCause());
                throw e;
            }
        }
    } finally {
        exec.shutdown();
    }
    int nSrcFiles = fs.listStatus(srcDir).length;
    fs.rename(srcDir, finalDir);
    assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
    ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + 0));
    ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + count / 2));
    ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + (count - 1)));
    ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + 0));
    ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + count / 2));
    ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + (count - 1)));
    ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}
Also used : Path(org.apache.hadoop.fs.Path) S3AFileSystem(org.apache.hadoop.fs.s3a.S3AFileSystem) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.Test)

Example 35 with ExecutionException

use of java.util.concurrent.ExecutionException in project hbase by apache.

the class HBaseInterClusterReplicationEndpoint method replicate.

/**
   * Do the shipping logic
   */
@Override
public boolean replicate(ReplicateContext replicateContext) {
    CompletionService<Integer> pool = new ExecutorCompletionService<>(this.exec);
    List<Entry> entries = replicateContext.getEntries();
    String walGroupId = replicateContext.getWalGroupId();
    int sleepMultiplier = 1;
    int numReplicated = 0;
    if (!peersSelected && this.isRunning()) {
        connectToPeers();
        peersSelected = true;
    }
    int numSinks = replicationSinkMgr.getNumSinks();
    if (numSinks == 0) {
        LOG.warn("No replication sinks found, returning without replicating. The source should retry" + " with the same set of edits.");
        return false;
    }
    // minimum of: configured threads, number of 100-waledit batches,
    //  and number of current sinks
    int n = Math.min(Math.min(this.maxThreads, entries.size() / 100 + 1), numSinks);
    List<List<Entry>> entryLists = new ArrayList<>(n);
    if (n == 1) {
        entryLists.add(entries);
    } else {
        for (int i = 0; i < n; i++) {
            entryLists.add(new ArrayList<>(entries.size() / n + 1));
        }
        // now group by region
        for (Entry e : entries) {
            entryLists.get(Math.abs(Bytes.hashCode(e.getKey().getEncodedRegionName()) % n)).add(e);
        }
    }
    while (this.isRunning() && !exec.isShutdown()) {
        if (!isPeerEnabled()) {
            if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
                sleepMultiplier++;
            }
            continue;
        }
        try {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Replicating " + entries.size() + " entries of total size " + replicateContext.getSize());
            }
            int futures = 0;
            for (int i = 0; i < entryLists.size(); i++) {
                if (!entryLists.get(i).isEmpty()) {
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Submitting " + entryLists.get(i).size() + " entries of total size " + replicateContext.getSize());
                    }
                    // RuntimeExceptions encountered here bubble up and are handled in ReplicationSource
                    pool.submit(createReplicator(entryLists.get(i), i));
                    futures++;
                }
            }
            IOException iox = null;
            for (int i = 0; i < futures; i++) {
                try {
                    // wait for all futures, remove successful parts
                    // (only the remaining parts will be retried)
                    Future<Integer> f = pool.take();
                    int index = f.get().intValue();
                    int batchSize = entryLists.get(index).size();
                    entryLists.set(index, Collections.<Entry>emptyList());
                    // Now, we have marked the batch as done replicating, record its size
                    numReplicated += batchSize;
                } catch (InterruptedException ie) {
                    iox = new IOException(ie);
                } catch (ExecutionException ee) {
                    // cause must be an IOException
                    iox = (IOException) ee.getCause();
                }
            }
            if (iox != null) {
                // if we had any exceptions, try again
                throw iox;
            }
            if (numReplicated != entries.size()) {
                // Something went wrong here and we don't know what, let's just fail and retry.
                LOG.warn("The number of edits replicated is different from the number received," + " failing for now.");
                return false;
            }
            // update metrics
            this.metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId);
            return true;
        } catch (IOException ioe) {
            // Didn't ship anything, but must still age the last time we did
            this.metrics.refreshAgeOfLastShippedOp(walGroupId);
            if (ioe instanceof RemoteException) {
                ioe = ((RemoteException) ioe).unwrapRemoteException();
                LOG.warn("Can't replicate because of an error on the remote cluster: ", ioe);
                if (ioe instanceof TableNotFoundException) {
                    if (sleepForRetries("A table is missing in the peer cluster. " + "Replication cannot proceed without losing data.", sleepMultiplier)) {
                        sleepMultiplier++;
                    }
                } else if (ioe instanceof SaslException) {
                    LOG.warn("Peer encountered SaslException, rechecking all sinks: ", ioe);
                    replicationSinkMgr.chooseSinks();
                }
            } else {
                if (ioe instanceof SocketTimeoutException) {
                    // This exception means we waited for more than 60s and nothing
                    // happened, the cluster is alive and calling it right away
                    // even for a test just makes things worse.
                    sleepForRetries("Encountered a SocketTimeoutException. Since the " + "call to the remote cluster timed out, which is usually " + "caused by a machine failure or a massive slowdown", this.socketTimeoutMultiplier);
                } else if (ioe instanceof ConnectException) {
                    LOG.warn("Peer is unavailable, rechecking all sinks: ", ioe);
                    replicationSinkMgr.chooseSinks();
                } else {
                    LOG.warn("Can't replicate because of a local or network error: ", ioe);
                }
            }
            if (sleepForRetries("Since we are unable to replicate", sleepMultiplier)) {
                sleepMultiplier++;
            }
        }
    }
    // in case we exited before replicating
    return false;
}
Also used : ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) IOException(java.io.IOException) SaslException(javax.security.sasl.SaslException) HBaseReplicationEndpoint(org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) SocketTimeoutException(java.net.SocketTimeoutException) ArrayList(java.util.ArrayList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) RemoteException(org.apache.hadoop.ipc.RemoteException) ConnectException(java.net.ConnectException)

Aggregations

ExecutionException (java.util.concurrent.ExecutionException)1341 IOException (java.io.IOException)367 Test (org.junit.Test)335 TimeoutException (java.util.concurrent.TimeoutException)258 ArrayList (java.util.ArrayList)237 Future (java.util.concurrent.Future)218 ExecutorService (java.util.concurrent.ExecutorService)152 CountDownLatch (java.util.concurrent.CountDownLatch)103 List (java.util.List)98 CancellationException (java.util.concurrent.CancellationException)98 Callable (java.util.concurrent.Callable)97 Test (org.testng.annotations.Test)78 HashMap (java.util.HashMap)69 Map (java.util.Map)65 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)64 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)63 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)56 ParallelTest (com.hazelcast.test.annotation.ParallelTest)47 QuickTest (com.hazelcast.test.annotation.QuickTest)47 UncheckedExecutionException (com.google.common.util.concurrent.UncheckedExecutionException)46