Search in sources :

Example 46 with Callable

use of java.util.concurrent.Callable in project druid by druid-io.

the class CassandraDataSegmentPuller method getSegmentFiles.

public io.druid.java.util.common.FileUtils.FileCopyResult getSegmentFiles(final String key, final File outDir) throws SegmentLoadingException {
    log.info("Pulling index from C* at path[%s] to outDir[%s]", key, outDir);
    if (!outDir.exists()) {
        outDir.mkdirs();
    }
    if (!outDir.isDirectory()) {
        throw new ISE("outDir[%s] must be a directory.", outDir);
    }
    long startTime = System.currentTimeMillis();
    final File tmpFile = new File(outDir, "index.zip");
    log.info("Pulling to temporary local cache [%s]", tmpFile.getAbsolutePath());
    final io.druid.java.util.common.FileUtils.FileCopyResult localResult;
    try {
        localResult = RetryUtils.retry(new Callable<io.druid.java.util.common.FileUtils.FileCopyResult>() {

            @Override
            public io.druid.java.util.common.FileUtils.FileCopyResult call() throws Exception {
                try (OutputStream os = new FileOutputStream(tmpFile)) {
                    final ObjectMetadata meta = ChunkedStorage.newReader(indexStorage, key, os).withBatchSize(BATCH_SIZE).withConcurrencyLevel(CONCURRENCY).call();
                }
                return new io.druid.java.util.common.FileUtils.FileCopyResult(tmpFile);
            }
        }, Predicates.<Throwable>alwaysTrue(), 10);
    } catch (Exception e) {
        throw new SegmentLoadingException(e, "Unable to copy key [%s] to file [%s]", key, tmpFile.getAbsolutePath());
    }
    try {
        final io.druid.java.util.common.FileUtils.FileCopyResult result = CompressionUtils.unzip(tmpFile, outDir);
        log.info("Pull of file[%s] completed in %,d millis (%s bytes)", key, System.currentTimeMillis() - startTime, result.size());
        return result;
    } catch (Exception e) {
        try {
            FileUtils.deleteDirectory(outDir);
        } catch (IOException e1) {
            log.error(e1, "Error clearing segment directory [%s]", outDir.getAbsolutePath());
            e.addSuppressed(e1);
        }
        throw new SegmentLoadingException(e, e.getMessage());
    } finally {
        if (!tmpFile.delete()) {
            log.warn("Could not delete cache file at [%s]", tmpFile.getAbsolutePath());
        }
    }
}
Also used : FileUtils(org.apache.commons.io.FileUtils) SegmentLoadingException(io.druid.segment.loading.SegmentLoadingException) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) SegmentLoadingException(io.druid.segment.loading.SegmentLoadingException) IOException(java.io.IOException) FileOutputStream(java.io.FileOutputStream) ISE(io.druid.java.util.common.ISE) File(java.io.File) ObjectMetadata(com.netflix.astyanax.recipes.storage.ObjectMetadata)

Example 47 with Callable

use of java.util.concurrent.Callable in project hbase by apache.

the class TestHBaseFsckOneRS method testParallelWithRetriesHbck.

/**
   * This test makes sure that with enough retries both parallel instances
   * of hbck will be completed successfully.
   *
   * @throws Exception
   */
@Test(timeout = 180000)
public void testParallelWithRetriesHbck() throws Exception {
    final ExecutorService service;
    final Future<HBaseFsck> hbck1, hbck2;
    // With the ExponentialBackoffPolicyWithLimit (starting with 200 milliseconds sleep time, and
    // max sleep time of 5 seconds), we can retry around 15 times within 80 seconds before bail out.
    //
    // Note: the reason to use 80 seconds is that in HADOOP-2.6 and later, the create file would
    // retry up to HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).  See HBASE-13574 for more
    // details.
    final int timeoutInSeconds = 80;
    final int sleepIntervalInMilliseconds = 200;
    final int maxSleepTimeInMilliseconds = 6000;
    final int maxRetryAttempts = 15;
    class RunHbck implements Callable<HBaseFsck> {

        @Override
        public HBaseFsck call() throws Exception {
            // Increase retry attempts to make sure the non-active hbck doesn't get starved
            Configuration c = new Configuration(conf);
            c.setInt("hbase.hbck.lockfile.maxwaittime", timeoutInSeconds);
            c.setInt("hbase.hbck.lockfile.attempt.sleep.interval", sleepIntervalInMilliseconds);
            c.setInt("hbase.hbck.lockfile.attempt.maxsleeptime", maxSleepTimeInMilliseconds);
            c.setInt("hbase.hbck.lockfile.attempts", maxRetryAttempts);
            return doFsck(c, false);
        }
    }
    service = Executors.newFixedThreadPool(2);
    hbck1 = service.submit(new RunHbck());
    hbck2 = service.submit(new RunHbck());
    service.shutdown();
    //wait for some time, for both hbck calls finish
    service.awaitTermination(timeoutInSeconds * 2, TimeUnit.SECONDS);
    HBaseFsck h1 = hbck1.get();
    HBaseFsck h2 = hbck2.get();
    // Both should be successful
    assertNotNull(h1);
    assertNotNull(h2);
    assert (h1.getRetCode() >= 0);
    assert (h2.getRetCode() >= 0);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ExecutorService(java.util.concurrent.ExecutorService) Callable(java.util.concurrent.Callable) Test(org.junit.Test)

Example 48 with Callable

use of java.util.concurrent.Callable in project druid by druid-io.

the class BlockingPoolTest method testConcurrentBatchClose.

@Test(timeout = 1000)
public void testConcurrentBatchClose() throws ExecutionException, InterruptedException {
    final int batch1 = POOL.maxSize() / 2;
    final Callable<ReferenceCountingResourceHolder<List<Integer>>> c1 = new Callable<ReferenceCountingResourceHolder<List<Integer>>>() {

        @Override
        public ReferenceCountingResourceHolder<List<Integer>> call() throws Exception {
            return POOL.takeBatch(batch1, 10);
        }
    };
    final int batch2 = POOL.maxSize() - batch1;
    final Callable<ReferenceCountingResourceHolder<List<Integer>>> c2 = new Callable<ReferenceCountingResourceHolder<List<Integer>>>() {

        @Override
        public ReferenceCountingResourceHolder<List<Integer>> call() throws Exception {
            return POOL.takeBatch(batch2, 10);
        }
    };
    final Future<ReferenceCountingResourceHolder<List<Integer>>> f1 = SERVICE.submit(c1);
    final Future<ReferenceCountingResourceHolder<List<Integer>>> f2 = SERVICE.submit(c2);
    final ReferenceCountingResourceHolder<List<Integer>> r1 = f1.get();
    final ReferenceCountingResourceHolder<List<Integer>> r2 = f2.get();
    assertNotNull(r1);
    assertNotNull(r2);
    assertEquals(batch1, r1.get().size());
    assertEquals(batch2, r2.get().size());
    assertEquals(0, POOL.getPoolSize());
    final Future future1 = SERVICE.submit(new Runnable() {

        @Override
        public void run() {
            r1.close();
        }
    });
    final Future future2 = SERVICE.submit(new Runnable() {

        @Override
        public void run() {
            r2.close();
        }
    });
    future1.get();
    future2.get();
    assertEquals(POOL.maxSize(), POOL.getPoolSize());
}
Also used : Future(java.util.concurrent.Future) List(java.util.List) Callable(java.util.concurrent.Callable) Test(org.junit.Test)

Example 49 with Callable

use of java.util.concurrent.Callable in project druid by druid-io.

the class BlockingPoolTest method testConcurrentTakeBatchClose.

@Test(timeout = 1000)
public void testConcurrentTakeBatchClose() throws ExecutionException, InterruptedException {
    final ReferenceCountingResourceHolder<List<Integer>> r1 = POOL.takeBatch(1, 10);
    final Callable<ReferenceCountingResourceHolder<List<Integer>>> c2 = new Callable<ReferenceCountingResourceHolder<List<Integer>>>() {

        @Override
        public ReferenceCountingResourceHolder<List<Integer>> call() throws Exception {
            return POOL.takeBatch(10, 100);
        }
    };
    final Future<ReferenceCountingResourceHolder<List<Integer>>> f2 = SERVICE.submit(c2);
    final Future f1 = SERVICE.submit(new Runnable() {

        @Override
        public void run() {
            try {
                Thread.sleep(50);
            } catch (InterruptedException e) {
            // ignore
            }
            r1.close();
        }
    });
    final ReferenceCountingResourceHolder<List<Integer>> r2 = f2.get();
    f1.get();
    assertNotNull(r2);
    assertEquals(10, r2.get().size());
    assertEquals(0, POOL.getPoolSize());
    r2.close();
    assertEquals(POOL.maxSize(), POOL.getPoolSize());
}
Also used : Future(java.util.concurrent.Future) List(java.util.List) Callable(java.util.concurrent.Callable) Test(org.junit.Test)

Example 50 with Callable

use of java.util.concurrent.Callable in project druid by druid-io.

the class HdfsClasspathSetupTest method testConcurrentUpload.

@Test
public void testConcurrentUpload() throws IOException, InterruptedException, ExecutionException, TimeoutException {
    final int concurrency = 10;
    ListeningExecutorService pool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrency));
    // barrier ensures that all jobs try to add files to classpath at same time.
    final CyclicBarrier barrier = new CyclicBarrier(concurrency);
    final DistributedFileSystem fs = miniCluster.getFileSystem();
    final Path expectedJarPath = new Path(finalClasspath, dummyJarFile.getName());
    List<ListenableFuture<Boolean>> futures = new ArrayList<>();
    for (int i = 0; i < concurrency; i++) {
        futures.add(pool.submit(new Callable() {

            @Override
            public Boolean call() throws Exception {
                int id = barrier.await();
                Job job = Job.getInstance(conf, "test-job-" + id);
                Path intermediatePathForJob = new Path(intermediatePath, "job-" + id);
                JobHelper.addJarToClassPath(dummyJarFile, finalClasspath, intermediatePathForJob, fs, job);
                // check file gets uploaded to final HDFS path
                Assert.assertTrue(fs.exists(expectedJarPath));
                // check that the intermediate file is not present
                Assert.assertFalse(fs.exists(new Path(intermediatePathForJob, dummyJarFile.getName())));
                // check file gets added to the classpath
                Assert.assertEquals(expectedJarPath.toString(), job.getConfiguration().get(MRJobConfig.CLASSPATH_FILES));
                return true;
            }
        }));
    }
    Futures.allAsList(futures).get(30, TimeUnit.SECONDS);
    pool.shutdownNow();
}
Also used : Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Job(org.apache.hadoop.mapreduce.Job) Callable(java.util.concurrent.Callable) CyclicBarrier(java.util.concurrent.CyclicBarrier) Test(org.junit.Test)

Aggregations

Callable (java.util.concurrent.Callable)1946 ArrayList (java.util.ArrayList)664 ExecutorService (java.util.concurrent.ExecutorService)630 Test (org.junit.Test)598 Future (java.util.concurrent.Future)482 IOException (java.io.IOException)255 ExecutionException (java.util.concurrent.ExecutionException)247 List (java.util.List)167 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)158 CountDownLatch (java.util.concurrent.CountDownLatch)157 HashMap (java.util.HashMap)120 Map (java.util.Map)117 File (java.io.File)112 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)105 Ignite (org.apache.ignite.Ignite)87 HashSet (java.util.HashSet)80 Set (java.util.Set)55 TimeoutException (java.util.concurrent.TimeoutException)54 Collectors (java.util.stream.Collectors)53 Transaction (org.apache.ignite.transactions.Transaction)52