Search in sources :

Example 16 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project GNS by MobilityFirst.

the class LookupWorker method generateReply.

/**
   * Queries DNS and/or GNS servers for DNS records.
   *
   * Note: a null return value means that the caller doesn't need to do
   * anything. Currently this only happens if this is an AXFR request over TCP.
   */
private Message generateReply(Message query) {
    long startTime = System.currentTimeMillis();
    NameResolution.getLogger().log(Level.FINE, "Incoming request:\n {0}", query.toString());
    // If it's not a query we just ignore it.
    if (query.getHeader().getFlag(Flags.QR)) {
        return null;
    }
    long checkStart = System.currentTimeMillis();
    // Check for wierd queries we can't handle.
    Message errorMessage;
    if ((errorMessage = NameResolution.checkForErroneousQueries(query)) != null) {
        return errorMessage;
    }
    DelayProfiler.updateDelay("checkForErroneousQueries", checkStart);
    // If we're not consulting the DNS server as well just send the query to GNS.
    if (dnsServer == null) {
        Message result = NameResolution.lookupGnsServer(incomingPacket.getAddress(), query, handler);
        DelayProfiler.updateDelay("generateReply", startTime);
        return result;
    }
    // Otherwise as a first step before performing GNS/DNS lookup we check our own local cache.
    if (dnsCache != null) {
        Message tempQuery = (Message) query.clone();
        Message result = NameResolution.lookupDnsCache(tempQuery, dnsCache);
        if (result.getHeader().getRcode() == Rcode.NOERROR) {
            NameResolution.getLogger().log(Level.FINE, "Responding the request from cache {0}", NameResolution.queryAndResponseToString(query, result));
            return result;
        }
    }
    // Create a clone of the query for duplicating the request to GNS and DNS
    Message dnsQuery = (Message) query.clone();
    List<LookupTask> tasks;
    if (gnsServer == null) {
        // We make two tasks to check the DNS and GNS in parallel
        tasks = Arrays.asList(// Create GNS lookup task
        new LookupTask(query, handler), // Create DNS lookup task
        new LookupTask(dnsQuery, dnsServer, handler));
    } else {
        tasks = Arrays.asList(// Create GNS lookup task
        new LookupTask(query, gnsServer, true, /* isGNS */
        handler), // Create DNS lookup task
        new LookupTask(dnsQuery, dnsServer, false, /* isGNS */
        handler));
    }
    // A little bit of overkill for two tasks, but it's really not that much longer (if any) than
    // the altenative. Plus it's cool and trendy to use futures.
    ExecutorService executor = Executors.newFixedThreadPool(2);
    ExecutorCompletionService<Message> completionService = new ExecutorCompletionService<>(executor);
    List<Future<Message>> futures = new ArrayList<>(2);
    for (Callable<Message> task : tasks) {
        futures.add(completionService.submit(task));
    }
    Message successResponse = null;
    Message errorResponse = null;
    // loop throught the tasks getting results as they complete
    for (LookupTask task : tasks) {
        // this is just doing things twice btw
        try {
            Message result = completionService.take().get();
            if (result.getHeader().getRcode() == Rcode.NOERROR) {
                successResponse = result;
                break;
            } else {
                // squirrel this away for later in case we get no successes
                errorResponse = result;
            }
        } catch (ExecutionException e) {
            NameResolution.getLogger().log(Level.WARNING, "Problem handling lookup task: {0}", e);
        } catch (InterruptedException e) {
            NameResolution.getLogger().log(Level.WARNING, "Lookup task interrupted: {0}", e);
        }
    }
    // Shutdown the executor threadpool
    executor.shutdown();
    if (successResponse != null) {
        // Cache the successful response
        try {
            SetResponse addMsgResponse = dnsCache.addMessage(successResponse);
            if (!addMsgResponse.isSuccessful()) {
                RRset[] answers = successResponse.getSectionRRsets(Section.ANSWER);
                boolean isAuth = successResponse.getHeader().getFlag(Flags.AA);
                int qClass = successResponse.getQuestion().getDClass();
                for (int i = 0; i < answers.length; i++) {
                    if (answers[i].getDClass() != qClass) {
                        continue;
                    }
                    int cred = getCred(Section.ANSWER, isAuth);
                    dnsCache.addRRset(answers[i], cred);
                    NameResolution.getLogger().log(Level.FINE, "Records added to cache {0}", answers[i].toString());
                }
            }
        } catch (NullPointerException e) {
            NameResolution.getLogger().log(Level.WARNING, "Failed to add a dns response to cache{0}", e);
        }
        return successResponse;
    } else if (errorResponse != null) {
        // currently this is returning the second error response... do we care?
        return errorResponse;
    } else {
        return NameResolution.errorMessage(query, Rcode.NXDOMAIN);
    }
}
Also used : Message(org.xbill.DNS.Message) ArrayList(java.util.ArrayList) RRset(org.xbill.DNS.RRset) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) SetResponse(org.xbill.DNS.SetResponse) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ExecutionException(java.util.concurrent.ExecutionException)

Example 17 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project lucene-solr by apache.

the class IndexSortedFacetCollector method getFacetCounts.

NamedList<Integer> getFacetCounts(Executor executor) throws IOException {
    CompletionService<SegFacet> completionService = new ExecutorCompletionService<>(executor);
    // reuse the translation logic to go from top level set to per-segment set
    baseSet = docs.getTopFilter();
    final List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
    // The list of pending tasks that aren't immediately submitted
    // TODO: Is there a completion service, or a delegating executor that can
    // limit the number of concurrent tasks submitted to a bigger executor?
    LinkedList<Callable<SegFacet>> pending = new LinkedList<>();
    int threads = nThreads <= 0 ? Integer.MAX_VALUE : nThreads;
    for (final LeafReaderContext leave : leaves) {
        final SegFacet segFacet = new SegFacet(leave);
        Callable<SegFacet> task = () -> {
            segFacet.countTerms();
            return segFacet;
        };
        if (--threads >= 0) {
            completionService.submit(task);
        } else {
            pending.add(task);
        }
    }
    // now merge the per-segment results
    PriorityQueue<SegFacet> queue = new PriorityQueue<SegFacet>(leaves.size()) {

        @Override
        protected boolean lessThan(SegFacet a, SegFacet b) {
            return a.tempBR.compareTo(b.tempBR) < 0;
        }
    };
    boolean hasMissingCount = false;
    int missingCount = 0;
    for (int i = 0, c = leaves.size(); i < c; i++) {
        SegFacet seg = null;
        try {
            Future<SegFacet> future = completionService.take();
            seg = future.get();
            if (!pending.isEmpty()) {
                completionService.submit(pending.removeFirst());
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            if (cause instanceof RuntimeException) {
                throw (RuntimeException) cause;
            } else {
                throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in per-segment faceting on field: " + fieldName, cause);
            }
        }
        if (seg.startTermIndex < seg.endTermIndex) {
            if (seg.startTermIndex == -1) {
                hasMissingCount = true;
                missingCount += seg.counts[0];
                seg.pos = 0;
            } else {
                seg.pos = seg.startTermIndex;
            }
            if (seg.pos < seg.endTermIndex && (mincount < 1 || seg.hasAnyCount)) {
                seg.tenum = seg.si.termsEnum();
                seg.tenum.seekExact(seg.pos);
                seg.tempBR = seg.tenum.term();
                queue.add(seg);
            }
        }
    }
    FacetCollector collector;
    if (sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
        collector = new CountSortedFacetCollector(offset, limit, mincount);
    } else {
        collector = new IndexSortedFacetCollector(offset, limit, mincount);
    }
    BytesRefBuilder val = new BytesRefBuilder();
    while (queue.size() > 0) {
        SegFacet seg = queue.top();
        boolean collect = termFilter == null || termFilter.test(seg.tempBR);
        // may be shared across calls.
        if (collect) {
            val.copyBytes(seg.tempBR);
        }
        int count = 0;
        do {
            if (collect) {
                count += seg.counts[seg.pos - seg.startTermIndex];
            }
            // if mincount>0 then seg.pos++ can skip ahead to the next non-zero entry.
            do {
                ++seg.pos;
            } while (//stop incrementing before we run off the end
            (seg.pos < seg.endTermIndex) && //move term enum forward with position -- dont care about value 
            (seg.tenum.next() != null || true) && //only skip ahead if mincount > 0
            (mincount > 0) && //check zero count
            (seg.counts[seg.pos - seg.startTermIndex] == 0));
            if (seg.pos >= seg.endTermIndex) {
                queue.pop();
                seg = queue.top();
            } else {
                seg.tempBR = seg.tenum.term();
                seg = queue.updateTop();
            }
        } while (seg != null && val.get().compareTo(seg.tempBR) == 0);
        if (collect) {
            boolean stop = collector.collect(val.get(), count);
            if (stop)
                break;
        }
    }
    NamedList<Integer> res = collector.getFacetCounts();
    // convert labels to readable form    
    FieldType ft = searcher.getSchema().getFieldType(fieldName);
    int sz = res.size();
    for (int i = 0; i < sz; i++) {
        res.setName(i, ft.indexedToReadable(res.getName(i)));
    }
    if (missing) {
        if (!hasMissingCount) {
            missingCount = SimpleFacets.getFieldMissingCount(searcher, docs, fieldName);
        }
        res.add(null, missingCount);
    }
    return res;
}
Also used : ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) Callable(java.util.concurrent.Callable) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) ExecutionException(java.util.concurrent.ExecutionException) SolrException(org.apache.solr.common.SolrException) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) PriorityQueue(org.apache.lucene.util.PriorityQueue) LinkedList(java.util.LinkedList) FieldType(org.apache.solr.schema.FieldType)

Example 18 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project robovm by robovm.

the class SecureRandomTest method testSecureRandomThreadSafety.

public void testSecureRandomThreadSafety() throws Exception {
    final SecureRandom secureRandom = SecureRandom.getInstance(algorithmName);
    int threads = 2;
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ExecutorCompletionService ecs = new ExecutorCompletionService(executor);
    for (int t = 0; t < threads; t++) {
        ecs.submit(new Callable<Void>() {

            public Void call() {
                for (int i = 0; i < 1000; i++) {
                    secureRandom.generateSeed(1024);
                }
                return null;
            }
        });
    }
    executor.shutdown();
    for (int i = 0; i < threads; i++) {
        ecs.take().get();
    }
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) SecureRandom(java.security.SecureRandom) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService)

Example 19 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project cdap by caskdata.

the class SparkTransactionHandlerTest method verifyStagesTransactions.

/**
   * Verifies the result of get stage transaction for the given set of stages.
   * The get transaction will be called concurrently for all stages.
   *
   * @param stages set of stages to verify
   * @param verifier a {@link ClientTransactionVerifier} to verify the http call result.
   */
private void verifyStagesTransactions(Set<Integer> stages, final ClientTransactionVerifier verifier) throws Exception {
    final CyclicBarrier barrier = new CyclicBarrier(stages.size());
    final ExecutorService executor = Executors.newFixedThreadPool(stages.size());
    try {
        CompletionService<Boolean> completionService = new ExecutorCompletionService<>(executor);
        for (final int stageId : stages) {
            completionService.submit(new Callable<Boolean>() {

                @Override
                public Boolean call() throws Exception {
                    barrier.await();
                    try {
                        return verifier.verify(sparkTxClient.getTransaction(stageId, 0, TimeUnit.SECONDS), null);
                    } catch (Throwable t) {
                        return verifier.verify(null, t);
                    }
                }
            });
        }
        boolean result = true;
        for (int i = 0; i < stages.size(); i++) {
            result = result && completionService.poll(10, TimeUnit.SECONDS).get();
        }
        // All verifications must be true
        Assert.assertTrue(result);
    } finally {
        executor.shutdown();
    }
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) TransactionFailureException(org.apache.tephra.TransactionFailureException) TimeoutException(java.util.concurrent.TimeoutException) UnknownHostException(java.net.UnknownHostException) CyclicBarrier(java.util.concurrent.CyclicBarrier)

Example 20 with ExecutorCompletionService

use of java.util.concurrent.ExecutorCompletionService in project indy by Commonjava.

the class ConcurrencyTest method deadlockOnGroupContains.

@BMRules(rules = { @BMRule(name = "init rendezvous", targetClass = "MemoryStoreDataManager", targetMethod = "<init>", targetLocation = "ENTRY", action = "createRendezvous($0, 2, true)"), @BMRule(name = "getGroupsContaining call", targetClass = "MemoryStoreDataManager", targetMethod = "getGroupsContaining", targetLocation = "ENTRY", action = "rendezvous($0); debug(Thread.currentThread().getName() + \": thread proceeding.\")") })
@Test
public void deadlockOnGroupContains() throws IndyDataException, InterruptedException, ExecutionException {
    ExecutorService executor = Executors.newFixedThreadPool(2);
    ExecutorCompletionService<String> completionService = new ExecutorCompletionService<>(executor);
    AtomicInteger count = new AtomicInteger(0);
    RemoteRepository repo = new RemoteRepository(MAVEN_PKG_KEY, "central", "http://repo.maven.apache.org/maven2");
    TestUpdatingEventDispatcher dispatcher = new TestUpdatingEventDispatcher(repo, completionService, count);
    MemoryStoreDataManager dataManager = new MemoryStoreDataManager(dispatcher, new DefaultIndyConfiguration());
    dispatcher.setDataManager(dataManager);
    ChangeSummary summary = new ChangeSummary(ChangeSummary.SYSTEM_USER, "Test init");
    dataManager.storeArtifactStore(repo, summary, false, false, new EventMetadata());
    for (int i = 0; i < 2; i++) {
        Group group = new Group(MAVEN_PKG_KEY, "group" + i);
        if (i % 2 == 0) {
            group.addConstituent(repo);
        }
        dataManager.storeArtifactStore(group, summary, false, false, new EventMetadata());
    }
    for (int i = 0; i < count.get(); i++) {
        Future<String> future = completionService.take();
        assertThat(future.get(), nullValue());
    }
}
Also used : Group(org.commonjava.indy.model.core.Group) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) RemoteRepository(org.commonjava.indy.model.core.RemoteRepository) EventMetadata(org.commonjava.maven.galley.event.EventMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) ChangeSummary(org.commonjava.indy.audit.ChangeSummary) DefaultIndyConfiguration(org.commonjava.indy.conf.DefaultIndyConfiguration) Test(org.junit.Test) BMRules(org.jboss.byteman.contrib.bmunit.BMRules)

Aggregations

ExecutorCompletionService (java.util.concurrent.ExecutorCompletionService)69 ArrayList (java.util.ArrayList)31 ExecutorService (java.util.concurrent.ExecutorService)30 ExecutionException (java.util.concurrent.ExecutionException)27 IOException (java.io.IOException)24 Test (org.junit.Test)21 Future (java.util.concurrent.Future)18 List (java.util.List)10 InterruptedIOException (java.io.InterruptedIOException)9 Path (org.apache.hadoop.fs.Path)8 KieSession (org.kie.api.runtime.KieSession)8 Callable (java.util.concurrent.Callable)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)7 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)6 EntryPoint (org.kie.api.runtime.rule.EntryPoint)6 HashMap (java.util.HashMap)4 Executor (java.util.concurrent.Executor)4 TimeoutException (java.util.concurrent.TimeoutException)4 KieBase (org.kie.api.KieBase)4 Random (java.util.Random)3