Search in sources :

Example 46 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project otter by alibaba.

the class DefaultCommunicationClientImpl method call.

public Object call(final String[] addrs, final Event event) {
    Assert.notNull(this.factory, "No factory specified");
    if (addrs == null || addrs.length == 0) {
        throw new IllegalArgumentException("addrs example: 127.0.0.1:1099");
    }
    ExecutorCompletionService completionService = new ExecutorCompletionService(executor);
    List<Future<Object>> futures = new ArrayList<Future<Object>>(addrs.length);
    List result = new ArrayList(10);
    for (final String addr : addrs) {
        futures.add(completionService.submit((new Callable<Object>() {

            @Override
            public Object call() throws Exception {
                return DefaultCommunicationClientImpl.this.call(addr, event);
            }
        })));
    }
    Exception ex = null;
    int errorIndex = 0;
    while (errorIndex < futures.size()) {
        try {
            // 它也可能被打断
            Future future = completionService.take();
            future.get();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            ex = e;
            break;
        } catch (ExecutionException e) {
            ex = e;
            break;
        }
        errorIndex++;
    }
    if (errorIndex < futures.size()) {
        for (int index = 0; index < futures.size(); index++) {
            Future<Object> future = futures.get(index);
            if (future.isDone() == false) {
                future.cancel(true);
            }
        }
    } else {
        for (int index = 0; index < futures.size(); index++) {
            Future<Object> future = futures.get(index);
            try {
                result.add(future.get());
            } catch (InterruptedException e) {
                // ignore
                Thread.currentThread().interrupt();
            } catch (ExecutionException e) {
            // ignore
            }
        }
    }
    if (ex != null) {
        throw new CommunicationException(String.format("call addr[%s] error by %s", addrs[errorIndex], ex.getMessage()), ex);
    } else {
        return result;
    }
}
Also used : CommunicationException(com.alibaba.otter.shared.communication.core.exception.CommunicationException) ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) CommunicationException(com.alibaba.otter.shared.communication.core.exception.CommunicationException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) Future(java.util.concurrent.Future) ArrayList(java.util.ArrayList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException)

Example 47 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project ats-framework by Axway.

the class TestWorker method positiveTest.

@Test
public void positiveTest() {
    // create the threads manager
    this.threadsManager = new ThreadsManager();
    // create all the threads
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setKeepAliveTime(0, TimeUnit.SECONDS);
    ExecutorCompletionService<Object> executionService = new ExecutorCompletionService<Object>(executor);
    IterationListener listener = new IterationListener();
    msg(log, "Ask Java to create all threads");
    for (int j = 0; j < N_THREADS; j++) {
        executionService.submit(new TestWorker(N_ITERATIONS, threadsManager, listener), null);
    }
    // run all iterations
    for (int i = 0; i < N_ITERATIONS; i++) {
        msg(log, "ITERATION " + i + "\n\n");
        runThisIteration();
        waitForIterationCompletion();
    }
    // it may take a little while all threads are gone
    try {
        Thread.sleep(1000);
    } catch (InterruptedException e) {
    }
    // check if there are any remaining threads started by this test
    checkAllRemainingThreads();
}
Also used : ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) Test(org.junit.Test) BaseTest(com.axway.ats.agent.core.BaseTest)

Example 48 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project ats-framework by Axway.

the class RampUpQueueLoader method scheduleThreads.

@Override
public synchronized void scheduleThreads(String caller, boolean isUseSynchronizedIterations) throws ActionExecutionException, ActionTaskLoaderException, NoSuchComponentException, NoSuchActionException, NoCompatibleMethodFoundException, ThreadingPatternNotSupportedException {
    //check the state first
    if (state != ActionTaskLoaderState.NOT_STARTED) {
        throw new ActionTaskLoaderException("Cannot schedule load queue " + queueName + " - it has already been scheduled");
    }
    //create the executor - terminate threads when finished
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setKeepAliveTime(0, TimeUnit.SECONDS);
    ExecutorCompletionService<Object> executionService = new ExecutorCompletionService<Object>(executor);
    //synchronization aids
    threadsManagers = new ArrayList<ThreadsManager>();
    // create the thread for managing max iteration length
    int iterationTimeout = startPattern.getIterationTimeout();
    if (iterationTimeout > 0) {
        itManager = new IterationTimeoutManager(iterationTimeout);
    }
    taskFutures = new ArrayList<Future<Object>>();
    for (int i = 0; i < numThreadGroups; i++) {
        //create the thread iterations manager for this group
        ThreadsManager threadsManager = new ThreadsManager();
        int numThreadsInGroup = (i != numThreadGroups - 1) ? numThreadsPerStep : numThreadsInLastGroup;
        //distribute executions per timeFrame according to the number of threads
        int executionsPerTimeFrame = executionPattern.getExecutionsPerTimeFrame();
        int[] executionsPerTimeFramePerThread = new EvenLoadDistributingUtils().getEvenLoad(executionsPerTimeFrame, numThreads);
        if (numThreads > executionsPerTimeFrame && executionsPerTimeFrame > 0) {
            throw new ActionTaskLoaderException("We cannot evenly distribute " + executionsPerTimeFrame + " iterations per time frame to " + numThreads + " threads. Iterations per time frame must be at least as many as threads!");
        }
        for (int j = 0; j < numThreadsInGroup; j++) {
            Future<Object> taskFuture = executionService.submit(ActionTaskFactory.createTask(caller, queueName, executionPattern, executionsPerTimeFramePerThread[j], threadsManager, itManager, actionRequests, parameterDataProviders, defaultTaskListeners, isUseSynchronizedIterations), null);
            taskFutures.add(taskFuture);
        }
        threadsManagers.add(threadsManager);
    }
    state = ActionTaskLoaderState.SCHEDULED;
}
Also used : ActionTaskLoaderException(com.axway.ats.agent.core.threading.exceptions.ActionTaskLoaderException) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) Future(java.util.concurrent.Future) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) EvenLoadDistributingUtils(com.axway.ats.agent.core.model.EvenLoadDistributingUtils)

Example 49 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project hadoop by apache.

the class TestProtoBufRpcServerHandoff method test.

@Test(timeout = 20000)
public void test() throws Exception {
    Configuration conf = new Configuration();
    TestProtoBufRpcServerHandoffServer serverImpl = new TestProtoBufRpcServerHandoffServer();
    BlockingService blockingService = TestProtobufRpcHandoffProto.newReflectiveBlockingService(serverImpl);
    RPC.setProtocolEngine(conf, TestProtoBufRpcServerHandoffProtocol.class, ProtobufRpcEngine.class);
    RPC.Server server = new RPC.Builder(conf).setProtocol(TestProtoBufRpcServerHandoffProtocol.class).setInstance(blockingService).setVerbose(true).setNumHandlers(// Num Handlers explicitly set to 1 for test.
    1).build();
    server.start();
    InetSocketAddress address = server.getListenerAddress();
    long serverStartTime = System.currentTimeMillis();
    LOG.info("Server started at: " + address + " at time: " + serverStartTime);
    final TestProtoBufRpcServerHandoffProtocol client = RPC.getProxy(TestProtoBufRpcServerHandoffProtocol.class, 1, address, conf);
    ExecutorService executorService = Executors.newFixedThreadPool(2);
    CompletionService<ClientInvocationCallable> completionService = new ExecutorCompletionService<ClientInvocationCallable>(executorService);
    completionService.submit(new ClientInvocationCallable(client, 5000l));
    completionService.submit(new ClientInvocationCallable(client, 5000l));
    long submitTime = System.currentTimeMillis();
    Future<ClientInvocationCallable> future1 = completionService.take();
    Future<ClientInvocationCallable> future2 = completionService.take();
    ClientInvocationCallable callable1 = future1.get();
    ClientInvocationCallable callable2 = future2.get();
    LOG.info(callable1);
    LOG.info(callable2);
    // Ensure the 5 second sleep responses are within a reasonable time of each
    // other.
    Assert.assertTrue(Math.abs(callable1.endTime - callable2.endTime) < 2000l);
    Assert.assertTrue(System.currentTimeMillis() - submitTime < 7000l);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) BlockingService(com.google.protobuf.BlockingService) ExecutorService(java.util.concurrent.ExecutorService) Test(org.junit.Test)

Example 50 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project hadoop by apache.

the class DFSInputStream method hedgedFetchBlockByteRange.

/**
   * Like {@link #fetchBlockByteRange}except we start up a second, parallel,
   * 'hedged' read if the first read is taking longer than configured amount of
   * time. We then wait on which ever read returns first.
   */
private void hedgedFetchBlockByteRange(LocatedBlock block, long start, long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks) throws IOException {
    final DfsClientConf conf = dfsClient.getConf();
    ArrayList<Future<ByteBuffer>> futures = new ArrayList<>();
    CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<>(dfsClient.getHedgedReadsThreadPool());
    ArrayList<DatanodeInfo> ignored = new ArrayList<>();
    ByteBuffer bb;
    int len = (int) (end - start + 1);
    int hedgedReadId = 0;
    block = refreshLocatedBlock(block);
    while (true) {
        // see HDFS-6591, this metric is used to verify/catch unnecessary loops
        hedgedReadOpsLoopNumForTesting++;
        DNAddrPair chosenNode = null;
        // there is no request already executing.
        if (futures.isEmpty()) {
            // chooseDataNode is a commitment. If no node, we go to
            // the NN to reget block locations. Only go here on first read.
            chosenNode = chooseDataNode(block, ignored);
            bb = ByteBuffer.allocate(len);
            Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb, corruptedBlocks, hedgedReadId++);
            Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
            futures.add(firstRequest);
            try {
                Future<ByteBuffer> future = hedgedService.poll(conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
                if (future != null) {
                    ByteBuffer result = future.get();
                    result.flip();
                    buf.put(result);
                    return;
                }
                DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged " + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
                // Ignore this node on next go around.
                ignored.add(chosenNode.info);
                dfsClient.getHedgedReadMetrics().incHedgedReadOps();
            // continue; no need to refresh block locations
            } catch (ExecutionException e) {
            // Ignore
            } catch (InterruptedException e) {
                throw new InterruptedIOException("Interrupted while waiting for reading task");
            }
        } else {
            // If no nodes to do hedged reads against, pass.
            try {
                chosenNode = getBestNodeDNAddrPair(block, ignored);
                if (chosenNode == null) {
                    chosenNode = chooseDataNode(block, ignored);
                }
                bb = ByteBuffer.allocate(len);
                Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb, corruptedBlocks, hedgedReadId++);
                Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
                futures.add(oneMoreRequest);
            } catch (IOException ioe) {
                DFSClient.LOG.debug("Failed getting node for hedged read: {}", ioe.getMessage());
            }
            // for a fixed interval and get the result from the fastest one.
            try {
                ByteBuffer result = getFirstToComplete(hedgedService, futures);
                // cancel the rest.
                cancelAll(futures);
                dfsClient.getHedgedReadMetrics().incHedgedReadWins();
                result.flip();
                buf.put(result);
                return;
            } catch (InterruptedException ie) {
            // Ignore and retry
            }
            // we found a chosenNode to hedge read against.
            if (chosenNode != null && chosenNode.info != null) {
                ignored.add(chosenNode.info);
            }
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) DfsClientConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf) Future(java.util.concurrent.Future) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

ExecutorCompletionService (java.util.concurrent.ExecutorCompletionService)69 ArrayList (java.util.ArrayList)31 ExecutorService (java.util.concurrent.ExecutorService)30 ExecutionException (java.util.concurrent.ExecutionException)27 IOException (java.io.IOException)24 Test (org.junit.Test)21 Future (java.util.concurrent.Future)18 List (java.util.List)10 InterruptedIOException (java.io.InterruptedIOException)9 Path (org.apache.hadoop.fs.Path)8 KieSession (org.kie.api.runtime.KieSession)8 Callable (java.util.concurrent.Callable)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)7 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)6 EntryPoint (org.kie.api.runtime.rule.EntryPoint)6 HashMap (java.util.HashMap)4 Executor (java.util.concurrent.Executor)4 TimeoutException (java.util.concurrent.TimeoutException)4 KieBase (org.kie.api.KieBase)4 Random (java.util.Random)3