Search in sources :

Example 56 with IntArrayList

use of com.carrotsearch.hppc.IntArrayList in project graphhopper by graphhopper.

the class PrepareRoutingSubnetworksTest method test481.

@Test
public void test481() {
    // 0->1->3->4->5->6
    // 2        7<--/
    GraphHopperStorage g = createStorage(em);
    g.edge(0, 1, 1, false);
    g.edge(1, 2, 1, false);
    g.edge(2, 0, 1, false);
    g.edge(1, 3, 1, false);
    g.edge(3, 4, 1, false);
    g.edge(4, 5, 1, false);
    g.edge(5, 6, 1, false);
    g.edge(6, 7, 1, false);
    g.edge(7, 4, 1, false);
    PrepareRoutingSubnetworks instance = new PrepareRoutingSubnetworks(g, Collections.singletonList(carFlagEncoder)).setMinOneWayNetworkSize(2).setMinNetworkSize(4);
    instance.doWork();
    // only one remaining network
    List<IntArrayList> components = instance.findSubnetworks(new PrepEdgeFilter(carFlagEncoder));
    assertEquals(1, components.size());
}
Also used : IntArrayList(com.carrotsearch.hppc.IntArrayList) PrepEdgeFilter(com.graphhopper.routing.subnetwork.PrepareRoutingSubnetworks.PrepEdgeFilter) GraphHopperStorage(com.graphhopper.storage.GraphHopperStorage) Test(org.junit.Test)

Example 57 with IntArrayList

use of com.carrotsearch.hppc.IntArrayList in project graphhopper by graphhopper.

the class LocationIndexTree method prepareAlgo.

void prepareAlgo() {
    // 0.1 meter should count as 'equal'
    equalNormedDelta = distCalc.calcNormalizedDist(0.1);
    // now calculate the necessary maxDepth d for our current bounds
    // if we assume a minimum resolution like 0.5km for a leaf-tile
    // n^(depth/2) = toMeter(dLon) / minResolution
    BBox bounds = graph.getBounds();
    if (graph.getNodes() == 0)
        throw new IllegalStateException("Cannot create location index of empty graph!");
    if (!bounds.isValid())
        throw new IllegalStateException("Cannot create location index when graph has invalid bounds: " + bounds);
    double lat = Math.min(Math.abs(bounds.maxLat), Math.abs(bounds.minLat));
    double maxDistInMeter = Math.max((bounds.maxLat - bounds.minLat) / 360 * DistanceCalcEarth.C, (bounds.maxLon - bounds.minLon) / 360 * preciseDistCalc.calcCircumference(lat));
    double tmp = maxDistInMeter / minResolutionInMeter;
    tmp = tmp * tmp;
    IntArrayList tmpEntries = new IntArrayList();
    // the last one is always 4 to reduce costs if only a single entry
    tmp /= 4;
    while (tmp > 1) {
        int tmpNo;
        if (tmp >= 64) {
            tmpNo = 64;
        } else if (tmp >= 16) {
            tmpNo = 16;
        } else if (tmp >= 4) {
            tmpNo = 4;
        } else {
            break;
        }
        tmpEntries.add(tmpNo);
        tmp /= tmpNo;
    }
    tmpEntries.add(4);
    initEntries(tmpEntries.toArray());
    int shiftSum = 0;
    long parts = 1;
    for (int i = 0; i < shifts.length; i++) {
        shiftSum += shifts[i];
        parts *= entries[i];
    }
    if (shiftSum > 64)
        throw new IllegalStateException("sum of all shifts does not fit into a long variable");
    keyAlgo = new SpatialKeyAlgo(shiftSum).bounds(bounds);
    parts = Math.round(Math.sqrt(parts));
    deltaLat = (bounds.maxLat - bounds.minLat) / parts;
    deltaLon = (bounds.maxLon - bounds.minLon) / parts;
}
Also used : SpatialKeyAlgo(com.graphhopper.geohash.SpatialKeyAlgo) BBox(com.graphhopper.util.shapes.BBox) IntArrayList(com.carrotsearch.hppc.IntArrayList) GHPoint(com.graphhopper.util.shapes.GHPoint)

Example 58 with IntArrayList

use of com.carrotsearch.hppc.IntArrayList in project SearchServices by Alfresco.

the class SolrInformationServer method getCascadeNodes.

public List<NodeMetaData> getCascadeNodes(List<Long> txnIds) throws AuthenticationException, IOException, JSONException {
    List<FieldInstance> list = AlfrescoSolrDataModel.getInstance().getIndexedFieldNamesForProperty(ContentModel.PROP_CASCADE_TX).getFields();
    FieldInstance fieldInstance = list.get(0);
    RefCounted<SolrIndexSearcher> refCounted = null;
    IntArrayList docList = null;
    HashSet<Long> childIds = new HashSet();
    try {
        refCounted = core.getSearcher();
        SolrIndexSearcher searcher = refCounted.get();
        String field = fieldInstance.getField();
        SchemaField schemaField = searcher.getSchema().getField(field);
        FieldType fieldType = schemaField.getType();
        BooleanQuery.Builder builder = new BooleanQuery.Builder();
        BooleanQuery booleanQuery = null;
        for (Long l : txnIds) {
            BytesRefBuilder bytesRefBuilder = new BytesRefBuilder();
            fieldType.readableToIndexed(l.toString(), bytesRefBuilder);
            TermQuery termQuery = new TermQuery(new Term(field, bytesRefBuilder.toBytesRef()));
            BooleanClause booleanClause = new BooleanClause(termQuery, BooleanClause.Occur.SHOULD);
            builder.add(booleanClause);
        }
        booleanQuery = builder.build();
        DocListCollector collector = new DocListCollector();
        searcher.search(booleanQuery, collector);
        docList = collector.getDocs();
        // System.out.println("################ CASCASDE Parent Nodes:"+docList.size());
        int size = docList.size();
        Set set = new HashSet();
        set.add(FIELD_SOLR4_ID);
        for (int i = 0; i < size; i++) {
            int docId = docList.get(i);
            Document document = searcher.doc(docId, set);
            IndexableField indexableField = document.getField(FIELD_SOLR4_ID);
            String id = indexableField.stringValue();
            TenantAclIdDbId ids = AlfrescoSolrDataModel.decodeNodeDocumentId(id);
            // System.out.println("################## Cascade Parent:"+ ids.dbId);
            childIds.add(ids.dbId);
        }
    } finally {
        refCounted.decref();
    }
    List<NodeMetaData> allNodeMetaDatas = new ArrayList();
    for (Long childId : childIds) {
        NodeMetaDataParameters nmdp = new NodeMetaDataParameters();
        nmdp.setFromNodeId(childId);
        nmdp.setToNodeId(childId);
        nmdp.setIncludeAclId(false);
        nmdp.setIncludeAspects(false);
        nmdp.setIncludeChildAssociations(false);
        nmdp.setIncludeChildIds(true);
        nmdp.setIncludeNodeRef(false);
        nmdp.setIncludeOwner(false);
        nmdp.setIncludeParentAssociations(false);
        // We only care about the path and ancestors (which is included) for this case
        nmdp.setIncludePaths(true);
        nmdp.setIncludeProperties(false);
        nmdp.setIncludeType(false);
        nmdp.setIncludeTxnId(true);
        // Gets only one
        List<NodeMetaData> nodeMetaDatas = repositoryClient.getNodesMetaData(nmdp, 1);
        allNodeMetaDatas.addAll(nodeMetaDatas);
    }
    return allNodeMetaDatas;
}
Also used : BooleanQuery(org.apache.lucene.search.BooleanQuery) Set(java.util.Set) AclChangeSet(org.alfresco.solr.client.AclChangeSet) LinkedHashSet(java.util.LinkedHashSet) IOpenBitSet(org.alfresco.solr.adapters.IOpenBitSet) HashSet(java.util.HashSet) TenantAclIdDbId(org.alfresco.solr.AlfrescoSolrDataModel.TenantAclIdDbId) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) IntArrayList(com.carrotsearch.hppc.IntArrayList) ArrayList(java.util.ArrayList) Document(org.apache.lucene.document.Document) SolrInputDocument(org.apache.solr.common.SolrInputDocument) SolrDocument(org.apache.solr.common.SolrDocument) NodeMetaDataParameters(org.alfresco.solr.client.NodeMetaDataParameters) FieldInstance(org.alfresco.solr.AlfrescoSolrDataModel.FieldInstance) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet) TermQuery(org.apache.lucene.search.TermQuery) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) NodeMetaData(org.alfresco.solr.client.NodeMetaData) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) Term(org.apache.lucene.index.Term) FieldType(org.apache.solr.schema.FieldType) SchemaField(org.apache.solr.schema.SchemaField) BooleanClause(org.apache.lucene.search.BooleanClause) IndexableField(org.apache.lucene.index.IndexableField) IntArrayList(com.carrotsearch.hppc.IntArrayList)

Example 59 with IntArrayList

use of com.carrotsearch.hppc.IntArrayList in project SearchServices by Alfresco.

the class SolrInformationServer method getDocsWithUncleanContent.

@Override
public List<TenantAclIdDbId> getDocsWithUncleanContent(int start, int rows) throws IOException {
    RefCounted<SolrIndexSearcher> refCounted = null;
    try {
        List<TenantAclIdDbId> docIds = new ArrayList<>();
        refCounted = this.core.getSearcher();
        SolrIndexSearcher searcher = refCounted.get();
        /*
            *  Below is the code for purging the cleanContentCache.
            *  The cleanContentCache is an in-memory LRU cache of the transactions that have already
            *  had their content fetched. This is needed because the ContentTracker does not have an up-to-date
            *  snapshot of the index to determine which nodes are marked as dirty/new. The cleanContentCache is used
            *  to filter out nodes that belong to transactions that have already been processed, which stops them from
            *  being re-processed.
            *
            *  The cleanContentCache needs to be purged periodically to support retrying of failed content fetches.
            *  This is because fetches for individual nodes within the transaction may have failed, but the transaction will still be in the
            *  cleanContentCache, which prevents it from being retried.
            *
            *  Once a transaction is purged from the cleanContentCache it will be retried automatically if it is marked dirty/new
            *  in current snapshot of the index.
            *
            *  The code below runs every two minutes and purges transactions from the
            *  cleanContentCache that is more then 20 minutes old.
            *
            */
        long purgeTime = System.currentTimeMillis();
        if (purgeTime - cleanContentLastPurged > 120000) {
            Iterator<Entry> entries = cleanContentCache.entrySet().iterator();
            while (entries.hasNext()) {
                Entry<Long, Long> entry = entries.next();
                long txnTime = entry.getValue();
                if (purgeTime - txnTime > 1200000) {
                    // Purge the clean content cache of records more then 20 minutes old.
                    entries.remove();
                }
            }
            cleanContentLastPurged = purgeTime;
        }
        long txnFloor = -1;
        // This query gets lowest txnID that has dirty content.
        // System.out.println("############### finding the transaction floor ################");
        TermQuery termQuery1 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.Dirty.toString()));
        TermQuery termQuery2 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.New.toString()));
        BooleanClause clause1 = new BooleanClause(termQuery1, BooleanClause.Occur.SHOULD);
        BooleanClause clause2 = new BooleanClause(termQuery2, BooleanClause.Occur.SHOULD);
        BooleanQuery.Builder builder = new BooleanQuery.Builder();
        builder.add(clause1);
        builder.add(clause2);
        BooleanQuery orQuery = builder.build();
        Sort sort = new Sort(new SortField(FIELD_INTXID, SortField.Type.LONG));
        sort = sort.rewrite(searcher);
        TopFieldCollector collector = TopFieldCollector.create(sort, 1, null, false, false, false);
        // Filter transactions that have already been processed.
        DelegatingCollector delegatingCollector = new TxnCacheFilter(cleanContentCache);
        delegatingCollector.setLastDelegate(collector);
        searcher.search(orQuery, delegatingCollector);
        if (collector.getTotalHits() == 0) {
            return docIds;
        }
        ScoreDoc[] scoreDocs = collector.topDocs().scoreDocs;
        List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
        int index = ReaderUtil.subIndex(scoreDocs[0].doc, leaves);
        LeafReaderContext context = leaves.get(index);
        NumericDocValues longs = context.reader().getNumericDocValues(FIELD_INTXID);
        txnFloor = longs.get(scoreDocs[0].doc - context.docBase);
        // System.out.println("################ Transaction floor:"+txnFloor);
        // Find the next N transactions
        collector = TopFieldCollector.create(new Sort(new SortField(FIELD_INTXID, SortField.Type.LONG)), rows, null, false, false, false);
        delegatingCollector = new TxnFloorFilter(txnFloor, cleanContentCache);
        delegatingCollector.setLastDelegate(collector);
        TermQuery txnQuery = new TermQuery(new Term(FIELD_DOC_TYPE, DOC_TYPE_TX));
        searcher.search(txnQuery, delegatingCollector);
        TopDocs docs = collector.topDocs();
        if (collector.getTotalHits() == 0) {
            // No new transactions to consider
            return docIds;
        }
        leaves = searcher.getTopReaderContext().leaves();
        FieldType fieldType = searcher.getSchema().getField(FIELD_INTXID).getType();
        builder = new BooleanQuery.Builder();
        for (ScoreDoc scoreDoc : docs.scoreDocs) {
            index = ReaderUtil.subIndex(scoreDoc.doc, leaves);
            context = leaves.get(index);
            longs = context.reader().getNumericDocValues(FIELD_INTXID);
            long txnID = longs.get(scoreDoc.doc - context.docBase);
            // Build up the query for the filter of transactions we need to pull the dirty content for.
            TermQuery txnIDQuery = new TermQuery(new Term(FIELD_INTXID, fieldType.readableToIndexed(Long.toString(txnID))));
            builder.add(new BooleanClause(txnIDQuery, BooleanClause.Occur.SHOULD));
        }
        BooleanQuery txnFilterQuery = builder.build();
        // Get the docs with dirty content for the transactions gathered above.
        TermQuery statusQuery1 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.Dirty.toString()));
        TermQuery statusQuery2 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.New.toString()));
        BooleanClause statusClause1 = new BooleanClause(statusQuery1, BooleanClause.Occur.SHOULD);
        BooleanClause statusClause2 = new BooleanClause(statusQuery2, BooleanClause.Occur.SHOULD);
        BooleanQuery.Builder builder1 = new BooleanQuery.Builder();
        builder1.add(statusClause1);
        builder1.add(statusClause2);
        BooleanQuery statusQuery = builder1.build();
        DocListCollector docListCollector = new DocListCollector();
        BooleanQuery.Builder builder2 = new BooleanQuery.Builder();
        builder2.add(statusQuery, BooleanClause.Occur.MUST);
        builder2.add(new QueryWrapperFilter(txnFilterQuery), BooleanClause.Occur.MUST);
        searcher.search(builder2.build(), docListCollector);
        IntArrayList docList = docListCollector.getDocs();
        int size = docList.size();
        // System.out.println("############### Dirty Doc Count ################:" + size);
        Set<String> fields = new HashSet<String>();
        fields.add(FIELD_SOLR4_ID);
        List<Long> processedTxns = new ArrayList<Long>();
        for (int i = 0; i < size; ++i) {
            int doc = docList.get(i);
            Document document = searcher.doc(doc, fields);
            index = ReaderUtil.subIndex(doc, leaves);
            context = leaves.get(index);
            longs = context.reader().getNumericDocValues(FIELD_INTXID);
            long txnId = longs.get(doc - context.docBase);
            if (!cleanContentCache.containsKey(txnId)) {
                processedTxns.add(txnId);
                IndexableField id = document.getField(FIELD_SOLR4_ID);
                String idString = id.stringValue();
                TenantAclIdDbId tenantAndDbId = AlfrescoSolrDataModel.decodeNodeDocumentId(idString);
                docIds.add(tenantAndDbId);
            }
        }
        long txnTime = System.currentTimeMillis();
        for (Long l : processedTxns) {
            // Save the indexVersion so we know when we can clean out this entry
            cleanContentCache.put(l, txnTime);
        }
        return docIds;
    } finally {
        refCounted.decref();
    }
}
Also used : BooleanQuery(org.apache.lucene.search.BooleanQuery) NumericDocValues(org.apache.lucene.index.NumericDocValues) TenantAclIdDbId(org.alfresco.solr.AlfrescoSolrDataModel.TenantAclIdDbId) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) IntArrayList(com.carrotsearch.hppc.IntArrayList) ArrayList(java.util.ArrayList) SortField(org.apache.lucene.search.SortField) Document(org.apache.lucene.document.Document) SolrInputDocument(org.apache.solr.common.SolrInputDocument) SolrDocument(org.apache.solr.common.SolrDocument) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) DelegatingCollector(org.apache.solr.search.DelegatingCollector) Entry(java.util.Map.Entry) Sort(org.apache.lucene.search.Sort) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) TopFieldCollector(org.apache.lucene.search.TopFieldCollector) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet) TermQuery(org.apache.lucene.search.TermQuery) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) Term(org.apache.lucene.index.Term) QueryWrapperFilter(org.apache.solr.search.QueryWrapperFilter) FieldType(org.apache.solr.schema.FieldType) BooleanClause(org.apache.lucene.search.BooleanClause) IndexableField(org.apache.lucene.index.IndexableField) IntArrayList(com.carrotsearch.hppc.IntArrayList)

Example 60 with IntArrayList

use of com.carrotsearch.hppc.IntArrayList in project graphhopper by graphhopper.

the class LineIntIndex method store.

private int store(InMemConstructionIndex.InMemEntry entry, int intPointer) {
    long pointer = (long) intPointer * 4;
    if (entry.isLeaf()) {
        InMemConstructionIndex.InMemLeafEntry leaf = ((InMemConstructionIndex.InMemLeafEntry) entry);
        IntArrayList entries = leaf.getResults();
        int len = entries.size();
        if (len == 0) {
            return intPointer;
        }
        size += len;
        intPointer++;
        leafs++;
        dataAccess.ensureCapacity((long) (intPointer + len + 1) * 4);
        if (len == 1) {
            // less disc space for single entries
            dataAccess.setInt(pointer, -entries.get(0) - 1);
        } else {
            for (int index = 0; index < len; index++, intPointer++) {
                dataAccess.setInt((long) intPointer * 4, entries.get(index));
            }
            dataAccess.setInt(pointer, intPointer);
        }
    } else {
        InMemConstructionIndex.InMemTreeEntry treeEntry = ((InMemConstructionIndex.InMemTreeEntry) entry);
        int len = treeEntry.subEntries.length;
        intPointer += len;
        for (int subCounter = 0; subCounter < len; subCounter++, pointer += 4) {
            InMemConstructionIndex.InMemEntry subEntry = treeEntry.subEntries[subCounter];
            if (subEntry == null) {
                continue;
            }
            dataAccess.ensureCapacity((long) (intPointer + 1) * 4);
            int prevIntPointer = intPointer;
            intPointer = store(subEntry, prevIntPointer);
            if (intPointer == prevIntPointer) {
                dataAccess.setInt(pointer, 0);
            } else {
                dataAccess.setInt(pointer, prevIntPointer);
            }
        }
    }
    return intPointer;
}
Also used : IntArrayList(com.carrotsearch.hppc.IntArrayList)

Aggregations

IntArrayList (com.carrotsearch.hppc.IntArrayList)94 Test (org.junit.jupiter.api.Test)16 RepeatedTest (org.junit.jupiter.api.RepeatedTest)13 GHPoint (com.graphhopper.util.shapes.GHPoint)10 ArrayList (java.util.ArrayList)9 Test (org.junit.Test)9 GraphHopperStorage (com.graphhopper.storage.GraphHopperStorage)7 IntObjectHashMap (com.carrotsearch.hppc.IntObjectHashMap)6 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)6 IntIndexedContainer (com.carrotsearch.hppc.IntIndexedContainer)5 HashMap (java.util.HashMap)5 UUID (java.util.UUID)5 GHIntArrayList (com.graphhopper.coll.GHIntArrayList)4 EdgeIteratorState (com.graphhopper.util.EdgeIteratorState)4 RelationName (io.crate.metadata.RelationName)4 IOException (java.io.IOException)4 IntObjectMap (com.carrotsearch.hppc.IntObjectMap)3 IntCursor (com.carrotsearch.hppc.cursors.IntCursor)3 PrepEdgeFilter (com.graphhopper.routing.subnetwork.PrepareRoutingSubnetworks.PrepEdgeFilter)3 FlagEncoder (com.graphhopper.routing.util.FlagEncoder)3