use of java.util.concurrent.FutureTask in project camel by apache.
the class PojoCallable method testJavaSpaceConcurrentRequestReply.
@Test
public void testJavaSpaceConcurrentRequestReply() throws Exception {
Vector<FutureTask<Reply>> tasks = new Vector<FutureTask<Reply>>();
Endpoint endpoint = context.getEndpoint("direct:input");
ExecutorService es = Executors.newFixedThreadPool(10);
long start = System.currentTimeMillis();
for (int i = 0; i < 100; ++i) {
Request req = new Request();
req.setPayload("REQUEST " + i);
ITestPojo proxy = ProxyHelper.createProxy(endpoint, ITestPojo.class);
FutureTask<Reply> task = new FutureTask<Reply>(new PojoCallable(req, proxy));
tasks.add(task);
es.submit(task);
}
int i = 0;
for (FutureTask<Reply> futureTask : tasks) {
assertTrue(futureTask.get().getPayload().equals("REPLY for REQUEST " + i++));
}
long stop = System.currentTimeMillis();
log.info("{} took {} milliseconds", getTestMethodName(), stop - start);
es.shutdownNow();
}
use of java.util.concurrent.FutureTask in project hadoop by apache.
the class TestBlockManager method testBlockReportQueueing.
@Test
public void testBlockReportQueueing() throws Exception {
Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
final FSNamesystem fsn = cluster.getNamesystem();
final BlockManager bm = fsn.getBlockManager();
final ExecutorService executor = Executors.newCachedThreadPool();
final CyclicBarrier startBarrier = new CyclicBarrier(2);
final CountDownLatch endLatch = new CountDownLatch(3);
final CountDownLatch doneLatch = new CountDownLatch(1);
// create a task intended to block while processing, thus causing
// the queue to backup. simulates how a full BR is processed.
FutureTask<?> blockingOp = new FutureTask<Void>(new Callable<Void>() {
@Override
public Void call() throws IOException {
bm.runBlockOp(new Callable<Void>() {
@Override
public Void call() throws InterruptedException, BrokenBarrierException {
// use a barrier to control the blocking.
startBarrier.await();
endLatch.countDown();
return null;
}
});
// signal that runBlockOp returned
doneLatch.countDown();
return null;
}
});
// create an async task. simulates how an IBR is processed.
Callable<?> asyncOp = new Callable<Void>() {
@Override
public Void call() throws IOException {
bm.enqueueBlockOp(new Runnable() {
@Override
public void run() {
// use the latch to signal if the op has run.
endLatch.countDown();
}
});
return null;
}
};
// calling get forces its execution so we can test if it's blocked.
Future<?> blockedFuture = executor.submit(blockingOp);
boolean isBlocked = false;
try {
// wait 1s for the future to block. it should run instantaneously.
blockedFuture.get(1, TimeUnit.SECONDS);
} catch (TimeoutException te) {
isBlocked = true;
}
assertTrue(isBlocked);
// should effectively return immediately since calls are queued.
// however they should be backed up in the queue behind the blocking
// operation.
executor.submit(asyncOp).get(1, TimeUnit.SECONDS);
executor.submit(asyncOp).get(1, TimeUnit.SECONDS);
// check the async calls are queued, and first is still blocked.
assertEquals(2, bm.getBlockOpQueueLength());
assertFalse(blockedFuture.isDone());
// unblock the queue, wait for last op to complete, check the blocked
// call has returned
startBarrier.await(1, TimeUnit.SECONDS);
assertTrue(endLatch.await(1, TimeUnit.SECONDS));
assertEquals(0, bm.getBlockOpQueueLength());
assertTrue(doneLatch.await(1, TimeUnit.SECONDS));
} finally {
cluster.shutdown();
}
}
use of java.util.concurrent.FutureTask in project hbase by apache.
the class HRegion method doProcessRowWithTimeout.
private void doProcessRowWithTimeout(final RowProcessor<?, ?> processor, final long now, final HRegion region, final List<Mutation> mutations, final WALEdit walEdit, final long timeout) throws IOException {
// Short circuit the no time bound case.
if (timeout < 0) {
try {
processor.process(now, region, mutations, walEdit);
} catch (IOException e) {
LOG.warn("RowProcessor:" + processor.getClass().getName() + " throws Exception on row(s):" + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "...", e);
throw e;
}
return;
}
// Case with time bound
FutureTask<Void> task = new FutureTask<>(new Callable<Void>() {
@Override
public Void call() throws IOException {
try {
processor.process(now, region, mutations, walEdit);
return null;
} catch (IOException e) {
LOG.warn("RowProcessor:" + processor.getClass().getName() + " throws Exception on row(s):" + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "...", e);
throw e;
}
}
});
rowProcessorExecutor.execute(task);
try {
task.get(timeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException te) {
LOG.error("RowProcessor timeout:" + timeout + " ms on row(s):" + Bytes.toStringBinary(processor.getRowsToLock().iterator().next()) + "...");
throw new IOException(te);
} catch (Exception e) {
throw new IOException(e);
}
}
use of java.util.concurrent.FutureTask in project hive by apache.
the class TestIOContextMap method testSparkThreadLocal.
@Test
public void testSparkThreadLocal() throws Exception {
// Test that input name does not change IOContext returned, and that each thread gets its own.
final Configuration conf1 = new Configuration();
conf1.set(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, "spark");
final Configuration conf2 = new Configuration(conf1);
conf2.set(Utilities.INPUT_NAME, "Other input");
final int THREAD_COUNT = 2;
ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
@SuppressWarnings("unchecked") FutureTask<IOContext>[] tasks = new FutureTask[THREAD_COUNT];
for (int i = 0; i < tasks.length; ++i) {
tasks[i] = new FutureTask<IOContext>(new Callable<IOContext>() {
public IOContext call() throws Exception {
syncThreadStart(cdlIn, cdlOut);
IOContext c1 = IOContextMap.get(conf1), c2 = IOContextMap.get(conf2);
assertSame(c1, c2);
return c1;
}
});
executor.execute(tasks[i]);
}
// Wait for all threads to be ready.
cdlIn.await();
// Release them at the same time.
cdlOut.countDown();
Set<IOContext> results = Sets.newIdentityHashSet();
for (int i = 0; i < tasks.length; ++i) {
// All the objects must be different.
assertTrue(results.add(tasks[i].get()));
}
}
use of java.util.concurrent.FutureTask in project hive by apache.
the class TestIOContextMap method testMRTezGlobalMap.
@Test
public void testMRTezGlobalMap() throws Exception {
// Tests concurrent modification, and that results are the same per input across threads
// but different between inputs.
final int THREAD_COUNT = 2, ITER_COUNT = 1000;
final AtomicInteger countdown = new AtomicInteger(ITER_COUNT);
final CountDownLatch phase1End = new CountDownLatch(THREAD_COUNT);
final IOContext[] results = new IOContext[ITER_COUNT];
ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
@SuppressWarnings("unchecked") FutureTask<Void>[] tasks = new FutureTask[THREAD_COUNT];
for (int i = 0; i < tasks.length; ++i) {
tasks[i] = new FutureTask<Void>(new Callable<Void>() {
public Void call() throws Exception {
Configuration conf = new Configuration();
syncThreadStart(cdlIn, cdlOut);
// Phase 1 - create objects.
while (true) {
int nextIx = countdown.decrementAndGet();
if (nextIx < 0)
break;
conf.set(Utilities.INPUT_NAME, "Input " + nextIx);
results[nextIx] = IOContextMap.get(conf);
if (nextIx == 0)
break;
}
phase1End.countDown();
phase1End.await();
// Phase 2 - verify we get the expected objects created by all threads.
for (int i = 0; i < ITER_COUNT; ++i) {
conf.set(Utilities.INPUT_NAME, "Input " + i);
IOContext ctx = IOContextMap.get(conf);
assertSame(results[i], ctx);
}
return null;
}
});
executor.execute(tasks[i]);
}
// Wait for all threads to be ready.
cdlIn.await();
// Release them at the same time.
cdlOut.countDown();
for (int i = 0; i < tasks.length; ++i) {
tasks[i].get();
}
Set<IOContext> resultSet = Sets.newIdentityHashSet();
for (int i = 0; i < results.length; ++i) {
// All the objects must be different.
assertTrue(resultSet.add(results[i]));
}
}
Aggregations