use of java.util.concurrent.ExecutorCompletionService in project otter by alibaba.
the class DefaultCommunicationClientImpl method call.
public Object call(final String[] addrs, final Event event) {
Assert.notNull(this.factory, "No factory specified");
if (addrs == null || addrs.length == 0) {
throw new IllegalArgumentException("addrs example: 127.0.0.1:1099");
}
ExecutorCompletionService completionService = new ExecutorCompletionService(executor);
List<Future<Object>> futures = new ArrayList<Future<Object>>(addrs.length);
List result = new ArrayList(10);
for (final String addr : addrs) {
futures.add(completionService.submit((new Callable<Object>() {
@Override
public Object call() throws Exception {
return DefaultCommunicationClientImpl.this.call(addr, event);
}
})));
}
Exception ex = null;
int errorIndex = 0;
while (errorIndex < futures.size()) {
try {
// 它也可能被打断
Future future = completionService.take();
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
ex = e;
break;
} catch (ExecutionException e) {
ex = e;
break;
}
errorIndex++;
}
if (errorIndex < futures.size()) {
for (int index = 0; index < futures.size(); index++) {
Future<Object> future = futures.get(index);
if (future.isDone() == false) {
future.cancel(true);
}
}
} else {
for (int index = 0; index < futures.size(); index++) {
Future<Object> future = futures.get(index);
try {
result.add(future.get());
} catch (InterruptedException e) {
// ignore
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
// ignore
}
}
}
if (ex != null) {
throw new CommunicationException(String.format("call addr[%s] error by %s", addrs[errorIndex], ex.getMessage()), ex);
} else {
return result;
}
}
use of java.util.concurrent.ExecutorCompletionService in project ats-framework by Axway.
the class TestWorker method positiveTest.
@Test
public void positiveTest() {
// create the threads manager
this.threadsManager = new ThreadsManager();
// create all the threads
ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
executor.setKeepAliveTime(0, TimeUnit.SECONDS);
ExecutorCompletionService<Object> executionService = new ExecutorCompletionService<Object>(executor);
IterationListener listener = new IterationListener();
msg(log, "Ask Java to create all threads");
for (int j = 0; j < N_THREADS; j++) {
executionService.submit(new TestWorker(N_ITERATIONS, threadsManager, listener), null);
}
// run all iterations
for (int i = 0; i < N_ITERATIONS; i++) {
msg(log, "ITERATION " + i + "\n\n");
runThisIteration();
waitForIterationCompletion();
}
// it may take a little while all threads are gone
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
// check if there are any remaining threads started by this test
checkAllRemainingThreads();
}
use of java.util.concurrent.ExecutorCompletionService in project ats-framework by Axway.
the class RampUpQueueLoader method scheduleThreads.
@Override
public synchronized void scheduleThreads(String caller, boolean isUseSynchronizedIterations) throws ActionExecutionException, ActionTaskLoaderException, NoSuchComponentException, NoSuchActionException, NoCompatibleMethodFoundException, ThreadingPatternNotSupportedException {
//check the state first
if (state != ActionTaskLoaderState.NOT_STARTED) {
throw new ActionTaskLoaderException("Cannot schedule load queue " + queueName + " - it has already been scheduled");
}
//create the executor - terminate threads when finished
ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
executor.setKeepAliveTime(0, TimeUnit.SECONDS);
ExecutorCompletionService<Object> executionService = new ExecutorCompletionService<Object>(executor);
//synchronization aids
threadsManagers = new ArrayList<ThreadsManager>();
// create the thread for managing max iteration length
int iterationTimeout = startPattern.getIterationTimeout();
if (iterationTimeout > 0) {
itManager = new IterationTimeoutManager(iterationTimeout);
}
taskFutures = new ArrayList<Future<Object>>();
for (int i = 0; i < numThreadGroups; i++) {
//create the thread iterations manager for this group
ThreadsManager threadsManager = new ThreadsManager();
int numThreadsInGroup = (i != numThreadGroups - 1) ? numThreadsPerStep : numThreadsInLastGroup;
//distribute executions per timeFrame according to the number of threads
int executionsPerTimeFrame = executionPattern.getExecutionsPerTimeFrame();
int[] executionsPerTimeFramePerThread = new EvenLoadDistributingUtils().getEvenLoad(executionsPerTimeFrame, numThreads);
if (numThreads > executionsPerTimeFrame && executionsPerTimeFrame > 0) {
throw new ActionTaskLoaderException("We cannot evenly distribute " + executionsPerTimeFrame + " iterations per time frame to " + numThreads + " threads. Iterations per time frame must be at least as many as threads!");
}
for (int j = 0; j < numThreadsInGroup; j++) {
Future<Object> taskFuture = executionService.submit(ActionTaskFactory.createTask(caller, queueName, executionPattern, executionsPerTimeFramePerThread[j], threadsManager, itManager, actionRequests, parameterDataProviders, defaultTaskListeners, isUseSynchronizedIterations), null);
taskFutures.add(taskFuture);
}
threadsManagers.add(threadsManager);
}
state = ActionTaskLoaderState.SCHEDULED;
}
use of java.util.concurrent.ExecutorCompletionService in project hadoop by apache.
the class TestProtoBufRpcServerHandoff method test.
@Test(timeout = 20000)
public void test() throws Exception {
Configuration conf = new Configuration();
TestProtoBufRpcServerHandoffServer serverImpl = new TestProtoBufRpcServerHandoffServer();
BlockingService blockingService = TestProtobufRpcHandoffProto.newReflectiveBlockingService(serverImpl);
RPC.setProtocolEngine(conf, TestProtoBufRpcServerHandoffProtocol.class, ProtobufRpcEngine.class);
RPC.Server server = new RPC.Builder(conf).setProtocol(TestProtoBufRpcServerHandoffProtocol.class).setInstance(blockingService).setVerbose(true).setNumHandlers(// Num Handlers explicitly set to 1 for test.
1).build();
server.start();
InetSocketAddress address = server.getListenerAddress();
long serverStartTime = System.currentTimeMillis();
LOG.info("Server started at: " + address + " at time: " + serverStartTime);
final TestProtoBufRpcServerHandoffProtocol client = RPC.getProxy(TestProtoBufRpcServerHandoffProtocol.class, 1, address, conf);
ExecutorService executorService = Executors.newFixedThreadPool(2);
CompletionService<ClientInvocationCallable> completionService = new ExecutorCompletionService<ClientInvocationCallable>(executorService);
completionService.submit(new ClientInvocationCallable(client, 5000l));
completionService.submit(new ClientInvocationCallable(client, 5000l));
long submitTime = System.currentTimeMillis();
Future<ClientInvocationCallable> future1 = completionService.take();
Future<ClientInvocationCallable> future2 = completionService.take();
ClientInvocationCallable callable1 = future1.get();
ClientInvocationCallable callable2 = future2.get();
LOG.info(callable1);
LOG.info(callable2);
// Ensure the 5 second sleep responses are within a reasonable time of each
// other.
Assert.assertTrue(Math.abs(callable1.endTime - callable2.endTime) < 2000l);
Assert.assertTrue(System.currentTimeMillis() - submitTime < 7000l);
}
use of java.util.concurrent.ExecutorCompletionService in project hadoop by apache.
the class DFSInputStream method hedgedFetchBlockByteRange.
/**
* Like {@link #fetchBlockByteRange}except we start up a second, parallel,
* 'hedged' read if the first read is taking longer than configured amount of
* time. We then wait on which ever read returns first.
*/
private void hedgedFetchBlockByteRange(LocatedBlock block, long start, long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks) throws IOException {
final DfsClientConf conf = dfsClient.getConf();
ArrayList<Future<ByteBuffer>> futures = new ArrayList<>();
CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<>(dfsClient.getHedgedReadsThreadPool());
ArrayList<DatanodeInfo> ignored = new ArrayList<>();
ByteBuffer bb;
int len = (int) (end - start + 1);
int hedgedReadId = 0;
block = refreshLocatedBlock(block);
while (true) {
// see HDFS-6591, this metric is used to verify/catch unnecessary loops
hedgedReadOpsLoopNumForTesting++;
DNAddrPair chosenNode = null;
// there is no request already executing.
if (futures.isEmpty()) {
// chooseDataNode is a commitment. If no node, we go to
// the NN to reget block locations. Only go here on first read.
chosenNode = chooseDataNode(block, ignored);
bb = ByteBuffer.allocate(len);
Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb, corruptedBlocks, hedgedReadId++);
Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
futures.add(firstRequest);
try {
Future<ByteBuffer> future = hedgedService.poll(conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
if (future != null) {
ByteBuffer result = future.get();
result.flip();
buf.put(result);
return;
}
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged " + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
// Ignore this node on next go around.
ignored.add(chosenNode.info);
dfsClient.getHedgedReadMetrics().incHedgedReadOps();
// continue; no need to refresh block locations
} catch (ExecutionException e) {
// Ignore
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted while waiting for reading task");
}
} else {
// If no nodes to do hedged reads against, pass.
try {
chosenNode = getBestNodeDNAddrPair(block, ignored);
if (chosenNode == null) {
chosenNode = chooseDataNode(block, ignored);
}
bb = ByteBuffer.allocate(len);
Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb, corruptedBlocks, hedgedReadId++);
Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
futures.add(oneMoreRequest);
} catch (IOException ioe) {
DFSClient.LOG.debug("Failed getting node for hedged read: {}", ioe.getMessage());
}
// for a fixed interval and get the result from the fastest one.
try {
ByteBuffer result = getFirstToComplete(hedgedService, futures);
// cancel the rest.
cancelAll(futures);
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
result.flip();
buf.put(result);
return;
} catch (InterruptedException ie) {
// Ignore and retry
}
// we found a chosenNode to hedge read against.
if (chosenNode != null && chosenNode.info != null) {
ignored.add(chosenNode.info);
}
}
}
}
Aggregations