Search in sources :

Example 26 with AbstractIterator

use of com.google.common.collect.AbstractIterator in project xtext-core by eclipse.

the class AbstractTrace method toLocations.

protected Iterable<? extends ILocationInResource> toLocations(final Iterable<AbstractTraceRegion> allTraceRegions) {
    return new Iterable<ILocationInResource>() {

        @Override
        public Iterator<ILocationInResource> iterator() {
            return new AbstractIterator<ILocationInResource>() {

                private Iterator<AbstractTraceRegion> delegate = allTraceRegions.iterator();

                private AbstractTraceRegion region;

                private Iterator<ILocationData> locationDelegate;

                @Override
                protected ILocationInResource computeNext() {
                    while (true) {
                        if (locationDelegate == null || !locationDelegate.hasNext()) {
                            if (delegate.hasNext()) {
                                region = delegate.next();
                                locationDelegate = region.getAssociatedLocations().iterator();
                                if (!locationDelegate.hasNext()) {
                                    continue;
                                }
                            }
                        }
                        if (locationDelegate != null && locationDelegate.hasNext()) {
                            ILocationData locationData = locationDelegate.next();
                            ILocationInResource result = createLocationInResourceFor(locationData, region);
                            if (result != null) {
                                return result;
                            }
                            continue;
                        }
                        return endOfData();
                    }
                }
            };
        }
    };
}
Also used : Iterator(java.util.Iterator) AbstractIterator(com.google.common.collect.AbstractIterator) AbstractTraceRegion(org.eclipse.xtext.generator.trace.AbstractTraceRegion) AbstractIterator(com.google.common.collect.AbstractIterator) ILocationInResource(org.eclipse.xtext.generator.trace.ILocationInResource) ILocationData(org.eclipse.xtext.generator.trace.ILocationData)

Example 27 with AbstractIterator

use of com.google.common.collect.AbstractIterator in project airlift by airlift.

the class BodyGeneratorContentProvider method iterator.

@Override
public Iterator<ByteBuffer> iterator() {
    final BlockingQueue<ByteBuffer> chunks = new ArrayBlockingQueue<>(16);
    final AtomicReference<Exception> exception = new AtomicReference<>();
    executor.execute(() -> {
        BodyGeneratorOutputStream out = new BodyGeneratorOutputStream(chunks);
        try {
            bodyGenerator.write(out);
            out.close();
        } catch (Exception e) {
            exception.set(e);
            chunks.add(EXCEPTION);
        }
    });
    return new AbstractIterator<ByteBuffer>() {

        @Override
        protected ByteBuffer computeNext() {
            ByteBuffer chunk;
            try {
                chunk = chunks.take();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new RuntimeException("Interrupted", e);
            }
            if (chunk == EXCEPTION) {
                throwIfUnchecked(exception.get());
                throw new RuntimeException(exception.get());
            }
            if (chunk == DONE) {
                return endOfData();
            }
            return chunk;
        }
    };
}
Also used : ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) AtomicReference(java.util.concurrent.atomic.AtomicReference) AbstractIterator(com.google.common.collect.AbstractIterator) ByteBuffer(java.nio.ByteBuffer) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException)

Example 28 with AbstractIterator

use of com.google.common.collect.AbstractIterator in project presto by prestodb.

the class BackgroundHiveSplitLoader method createHiveSplitIterator.

private Iterator<HiveSplit> createHiveSplitIterator(String partitionName, String path, BlockLocation[] blockLocations, long start, long length, Properties schema, List<HivePartitionKey> partitionKeys, boolean splittable, ConnectorSession session, OptionalInt bucketNumber, TupleDomain<HiveColumnHandle> effectivePredicate, Map<Integer, HiveType> columnCoercions) throws IOException {
    boolean forceLocalScheduling = HiveSessionProperties.isForceLocalScheduling(session);
    if (splittable) {
        PeekingIterator<BlockLocation> blockLocationIterator = Iterators.peekingIterator(Arrays.stream(blockLocations).iterator());
        return new AbstractIterator<HiveSplit>() {

            private long chunkOffset = 0;

            @Override
            protected HiveSplit computeNext() {
                if (!blockLocationIterator.hasNext()) {
                    return endOfData();
                }
                BlockLocation blockLocation = blockLocationIterator.peek();
                List<HostAddress> addresses;
                try {
                    addresses = toHostAddress(blockLocation.getHosts());
                } catch (IOException e) {
                    throw Throwables.propagate(e);
                }
                long targetChunkSize;
                if (remainingInitialSplits.decrementAndGet() >= 0) {
                    targetChunkSize = maxInitialSplitSize.toBytes();
                } else {
                    long maxBytes = maxSplitSize.toBytes();
                    int chunks = toIntExact((long) Math.ceil((blockLocation.getLength() - chunkOffset) * 1.0 / maxBytes));
                    targetChunkSize = (long) Math.ceil((blockLocation.getLength() - chunkOffset) * 1.0 / chunks);
                }
                // adjust the actual chunk size to account for the overrun when chunks are slightly bigger than necessary (see above)
                long chunkLength = Math.min(targetChunkSize, blockLocation.getLength() - chunkOffset);
                HiveSplit result = new HiveSplit(connectorId, table.getDatabaseName(), table.getTableName(), partitionName, path, blockLocation.getOffset() + chunkOffset, chunkLength, schema, partitionKeys, addresses, bucketNumber, forceLocalScheduling && hasRealAddress(addresses), effectivePredicate, columnCoercions);
                chunkOffset += chunkLength;
                if (chunkOffset >= blockLocation.getLength()) {
                    checkState(chunkOffset == blockLocation.getLength(), "Error splitting blocks");
                    blockLocationIterator.next();
                    chunkOffset = 0;
                }
                return result;
            }
        };
    } else {
        // not splittable, use the hosts from the first block if it exists
        List<HostAddress> addresses = ImmutableList.of();
        if (blockLocations.length > 0) {
            addresses = toHostAddress(blockLocations[0].getHosts());
        }
        return Iterators.singletonIterator(new HiveSplit(connectorId, table.getDatabaseName(), table.getTableName(), partitionName, path, start, length, schema, partitionKeys, addresses, bucketNumber, forceLocalScheduling && hasRealAddress(addresses), effectivePredicate, columnCoercions));
    }
}
Also used : AbstractIterator(com.google.common.collect.AbstractIterator) IOException(java.io.IOException) BlockLocation(org.apache.hadoop.fs.BlockLocation) HostAddress(com.facebook.presto.spi.HostAddress)

Example 29 with AbstractIterator

use of com.google.common.collect.AbstractIterator in project jackrabbit-oak by apache.

the class SolrQueryIndex method getIterator.

private AbstractIterator<SolrResultRow> getIterator(final Filter filter, final IndexPlan plan, final String parent, final int parentDepth, final OakSolrConfiguration configuration, final SolrServer solrServer, final LMSEstimator estimator) {
    return new AbstractIterator<SolrResultRow>() {

        public Collection<FacetField> facetFields = new LinkedList<FacetField>();

        private final Set<String> seenPaths = Sets.newHashSet();

        private final Deque<SolrResultRow> queue = Queues.newArrayDeque();

        private int offset = 0;

        private boolean noDocs = false;

        private long numFound = 0;

        @Override
        protected SolrResultRow computeNext() {
            if (!queue.isEmpty() || loadDocs()) {
                return queue.remove();
            }
            return endOfData();
        }

        private SolrResultRow convertToRow(SolrDocument doc) {
            String path = String.valueOf(doc.getFieldValue(configuration.getPathField()));
            if ("".equals(path)) {
                path = "/";
            }
            if (!parent.isEmpty()) {
                path = getAncestorPath(path, parentDepth);
                // avoid duplicate entries
                if (seenPaths.contains(path)) {
                    return null;
                }
                seenPaths.add(path);
            }
            float score = 0f;
            Object scoreObj = doc.get("score");
            if (scoreObj != null) {
                score = (Float) scoreObj;
            }
            return new SolrResultRow(path, score, doc, facetFields);
        }

        /**
             * Loads the Solr documents in batches
             * @return true if any document is loaded
             */
        private boolean loadDocs() {
            if (noDocs) {
                return false;
            }
            try {
                if (log.isDebugEnabled()) {
                    log.debug("converting filter {}", filter);
                }
                SolrQuery query = FilterQueryParser.getQuery(filter, plan, configuration);
                if (numFound > 0) {
                    long rows = configuration.getRows();
                    long maxQueries = numFound / 2;
                    if (maxQueries > configuration.getRows()) {
                        // adjust the rows to avoid making more than 3 Solr requests for this particular query
                        rows = maxQueries;
                        query.setParam("rows", String.valueOf(rows));
                    }
                    long newOffset = configuration.getRows() + offset * rows;
                    if (newOffset >= numFound) {
                        return false;
                    }
                    query.setParam("start", String.valueOf(newOffset));
                    offset++;
                }
                if (log.isDebugEnabled()) {
                    log.debug("sending query {}", query);
                }
                QueryResponse queryResponse = solrServer.query(query);
                if (log.isDebugEnabled()) {
                    log.debug("getting response {}", queryResponse.getHeader());
                }
                SolrDocumentList docs = queryResponse.getResults();
                if (docs != null) {
                    numFound = docs.getNumFound();
                    estimator.update(filter, docs);
                    Map<String, Map<String, List<String>>> highlighting = queryResponse.getHighlighting();
                    for (SolrDocument doc : docs) {
                        // handle highlight
                        if (highlighting != null) {
                            Object pathObject = doc.getFieldValue(configuration.getPathField());
                            if (pathObject != null && highlighting.get(String.valueOf(pathObject)) != null) {
                                Map<String, List<String>> value = highlighting.get(String.valueOf(pathObject));
                                for (Map.Entry<String, List<String>> entry : value.entrySet()) {
                                    // all highlighted values end up in 'rep:excerpt', regardless of field match
                                    for (String v : entry.getValue()) {
                                        doc.addField(QueryImpl.REP_EXCERPT, v);
                                    }
                                }
                            }
                        }
                        SolrResultRow row = convertToRow(doc);
                        if (row != null) {
                            queue.add(row);
                        }
                    }
                }
                // get facets
                List<FacetField> returnedFieldFacet = queryResponse.getFacetFields();
                if (returnedFieldFacet != null) {
                    facetFields.addAll(returnedFieldFacet);
                }
                // filter facets on doc paths
                if (!facetFields.isEmpty() && docs != null) {
                    for (SolrDocument doc : docs) {
                        String path = String.valueOf(doc.getFieldValue(configuration.getPathField()));
                        // if facet path doesn't exist for the calling user, filter the facet for this doc
                        for (FacetField ff : facetFields) {
                            if (!filter.isAccessible(path + "/" + ff.getName())) {
                                filterFacet(doc, ff);
                            }
                        }
                    }
                }
                // handle spellcheck
                SpellCheckResponse spellCheckResponse = queryResponse.getSpellCheckResponse();
                if (spellCheckResponse != null && spellCheckResponse.getSuggestions() != null && spellCheckResponse.getSuggestions().size() > 0) {
                    putSpellChecks(spellCheckResponse, queue, filter, configuration, solrServer);
                    noDocs = true;
                }
                // handle suggest
                NamedList<Object> response = queryResponse.getResponse();
                Map suggest = (Map) response.get("suggest");
                if (suggest != null) {
                    Set<Map.Entry<String, Object>> suggestEntries = suggest.entrySet();
                    if (!suggestEntries.isEmpty()) {
                        putSuggestions(suggestEntries, queue, filter, configuration, solrServer);
                        noDocs = true;
                    }
                }
            } catch (Exception e) {
                if (log.isWarnEnabled()) {
                    log.warn("query via {} failed.", solrServer, e);
                }
            }
            return !queue.isEmpty();
        }
    };
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) FacetField(org.apache.solr.client.solrj.response.FacetField) SolrQuery(org.apache.solr.client.solrj.SolrQuery) SpellCheckResponse(org.apache.solr.client.solrj.response.SpellCheckResponse) SolrDocument(org.apache.solr.common.SolrDocument) SolrDocumentList(org.apache.solr.common.SolrDocumentList) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) NamedList(org.apache.solr.common.util.NamedList) AbstractIterator(com.google.common.collect.AbstractIterator) SolrDocumentList(org.apache.solr.common.SolrDocumentList) Deque(java.util.Deque) SolrServerException(org.apache.solr.client.solrj.SolrServerException) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) Collection(java.util.Collection) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) Map(java.util.Map) WeakHashMap(java.util.WeakHashMap)

Example 30 with AbstractIterator

use of com.google.common.collect.AbstractIterator in project GeoGig by boundlessgeo.

the class MongoObjectDatabase method getAll.

@Override
public Iterator<RevObject> getAll(final Iterable<ObjectId> ids, final BulkOpListener listener) {
    return new AbstractIterator<RevObject>() {

        final Iterator<ObjectId> queryIds = ids.iterator();

        @Override
        protected RevObject computeNext() {
            RevObject obj = null;
            while (obj == null) {
                if (!queryIds.hasNext()) {
                    return endOfData();
                }
                ObjectId id = queryIds.next();
                obj = getIfPresent(id);
                if (obj == null) {
                    listener.notFound(id);
                } else {
                    listener.found(obj.getId(), null);
                }
            }
            return obj == null ? endOfData() : obj;
        }
    };
}
Also used : RevObject(org.locationtech.geogig.api.RevObject) ObjectId(org.locationtech.geogig.api.ObjectId) Iterator(java.util.Iterator) AbstractIterator(com.google.common.collect.AbstractIterator) AbstractIterator(com.google.common.collect.AbstractIterator)

Aggregations

AbstractIterator (com.google.common.collect.AbstractIterator)55 IOException (java.io.IOException)15 Iterator (java.util.Iterator)14 Map (java.util.Map)8 ArrayList (java.util.ArrayList)7 List (java.util.List)6 File (java.io.File)5 EOFException (java.io.EOFException)4 Collection (java.util.Collection)4 HashSet (java.util.HashSet)4 BIGINT (com.facebook.presto.common.type.BigintType.BIGINT)3 SMALLINT (com.facebook.presto.common.type.SmallintType.SMALLINT)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 Deque (java.util.Deque)3 Set (java.util.Set)3 CountDownLatch (java.util.concurrent.CountDownLatch)3 CyclicBarrier (java.util.concurrent.CyclicBarrier)3 Test (org.junit.Test)3 TopicMetadata (co.cask.cdap.messaging.TopicMetadata)2 TopicId (co.cask.cdap.proto.id.TopicId)2