Search in sources :

Example 1 with NamedList

use of org.apache.solr.common.util.NamedList in project Solbase by Photobucket.

the class SolbaseQuerySenderListener method newSearcher.

@Override
public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
    // don't warm up this core. only warm up shard cores
    if (!core.getName().equals("") && core.getName().indexOf("~") > 0) {
        final SolrIndexSearcher searcher = newSearcher;
        log.info("QuerySenderListener sending requests to " + core.getName());
        for (NamedList nlst : (List<NamedList>) args.get("queries")) {
            try {
                SolrQueryResponse rsp = new SolrQueryResponse();
                LocalSolrQueryRequest req = new LocalSolrQueryRequest(core, nlst) {

                    @Override
                    public SolrIndexSearcher getSearcher() {
                        return searcher;
                    }

                    @Override
                    public void close() {
                    }
                };
                req.getContext().put("solbase-index", core.getName());
                req.getContext().put("webapp", "/solbase");
                req.getContext().put("path", "/select");
                core.execute(core.getRequestHandler(req.getParams().get(core.getName())), req, rsp);
                req.close();
            } catch (Exception e) {
            // do nothing... we want to continue with the other
            // requests.
            // the failure should have already been logged.
            }
        }
        log.info("QuerySenderListener done.");
    }
}
Also used : LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryResponse(org.apache.solr.request.SolrQueryResponse) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List) DocList(org.apache.solr.search.DocList) NamedList(org.apache.solr.common.util.NamedList) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher)

Example 2 with NamedList

use of org.apache.solr.common.util.NamedList in project qi4j-sdk by Qi4j.

the class SolrEntityQueryMixin method findEntities.

@Override
public Iterable<EntityReference> findEntities(Class<?> resultType, @Optional Specification<Composite> whereClause, @Optional OrderBy[] orderBySegments, @Optional Integer firstResult, @Optional Integer maxResults, Map<String, Object> variables) throws EntityFinderException {
    try {
        QuerySpecification expr = (QuerySpecification) whereClause;
        SolrServer server = solr.solrServer();
        NamedList<Object> list = new NamedList<Object>();
        list.add("q", expr.query());
        list.add("rows", maxResults != 0 ? maxResults : 10000);
        list.add("start", firstResult);
        if (orderBySegments != null && orderBySegments.length > 0) {
            for (OrderBy orderBySegment : orderBySegments) {
                String propName = ((Member) orderBySegment.property().accessor()).getName() + "_for_sort";
                String order = orderBySegment.order() == OrderBy.Order.ASCENDING ? "asc" : "desc";
                list.add("sort", propName + " " + order);
            }
        }
        SolrParams solrParams = SolrParams.toSolrParams(list);
        logger.debug("Search:" + list.toString());
        QueryResponse query = server.query(solrParams);
        SolrDocumentList results = query.getResults();
        List<EntityReference> references = new ArrayList<EntityReference>(results.size());
        for (SolrDocument result : results) {
            references.add(EntityReference.parseEntityReference(result.getFirstValue("id").toString()));
        }
        return references;
    } catch (SolrServerException e) {
        throw new EntityFinderException(e);
    }
}
Also used : OrderBy(org.qi4j.api.query.grammar.OrderBy) NamedList(org.apache.solr.common.util.NamedList) SolrServerException(org.apache.solr.client.solrj.SolrServerException) ArrayList(java.util.ArrayList) SolrDocumentList(org.apache.solr.common.SolrDocumentList) SolrServer(org.apache.solr.client.solrj.SolrServer) QuerySpecification(org.qi4j.api.query.grammar.QuerySpecification) SolrDocument(org.apache.solr.common.SolrDocument) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) EntityFinderException(org.qi4j.spi.query.EntityFinderException) EntityReference(org.qi4j.api.entity.EntityReference) SolrParams(org.apache.solr.common.params.SolrParams)

Example 3 with NamedList

use of org.apache.solr.common.util.NamedList in project spring-boot by spring-projects.

the class SolrHealthIndicatorTests method solrIsUp.

@Test
public void solrIsUp() throws Exception {
    SolrClient solrClient = mock(SolrClient.class);
    SolrPingResponse pingResponse = new SolrPingResponse();
    NamedList<Object> response = new NamedList<>();
    response.add("status", "OK");
    pingResponse.setResponse(response);
    given(solrClient.ping()).willReturn(pingResponse);
    SolrHealthIndicator healthIndicator = new SolrHealthIndicator(solrClient);
    Health health = healthIndicator.health();
    assertThat(health.getStatus()).isEqualTo(Status.UP);
    assertThat(health.getDetails().get("solrStatus")).isEqualTo("OK");
}
Also used : SolrPingResponse(org.apache.solr.client.solrj.response.SolrPingResponse) SolrClient(org.apache.solr.client.solrj.SolrClient) NamedList(org.apache.solr.common.util.NamedList) Test(org.junit.Test)

Example 4 with NamedList

use of org.apache.solr.common.util.NamedList in project Xponents by OpenSextant.

the class GazetteerMatcher method tagText.

/**
     * Geotag a document, returning PlaceCandidates for the mentions in
     * document. Optionally just return the PlaceCandidates with name only and
     * no Place objects attached. Names of contients are passed back as matches,
     * with geo matches. Continents are filtered out by default.
     *
     * @param buffer
     *            text
     * @param docid
     *            identity of the text
     * @param tagOnly
     *            True if you wish to get the matched phrases only. False if you
     *            want the full list of Place Candidates.
     * @param fld
     *            gazetteer field to use for tagging
     * @param langid
     *             ISO lang ID 
     * @return place_candidates List of place candidates
     * @throws ExtractionException
     *             on err
     */
public List<PlaceCandidate> tagText(String buffer, String docid, boolean tagOnly, String fld, String langid) throws ExtractionException {
    // "tagsCount":10, "tags":[{ "ids":[35], "endOffset":40,
    // "startOffset":38},
    // { "ids":[750308, 2769912, 2770041, 10413973, 10417546],
    // "endOffset":49,
    // "startOffset":41},
    // ...
    // "matchingDocs":{"numFound":75, "start":0, "docs":[ {
    // "place_id":"USGS1992921", "name":"Monterrey", "cc":"PR"}, {
    // "place_id":"USGS1991763", "name":"Monterrey", "cc":"PR"}, ]
    // Reset counts.
    this.defaultFilterCount = 0;
    this.userFilterCount = 0;
    // during post-processing tags we may have to distinguish between tagging/tokenizing 
    // general vs. cjk vs. ar. But not yet though.
    // boolean useGeneralMode = DEFAULT_TAG_FIELD.equals(fld);
    long t0 = System.currentTimeMillis();
    log.debug("TEXT SIZE = {}", buffer.length());
    int[] textMetrics = TextUtils.measureCase(buffer);
    boolean isUpperCase = TextUtils.isUpperCaseDocument(textMetrics);
    boolean isLowerCase = TextUtils.isLowerCaseDocument(textMetrics);
    params.set("field", fld);
    Map<Integer, Object> beanMap = new HashMap<Integer, Object>(100);
    QueryResponse response = tagTextCallSolrTagger(buffer, docid, beanMap);
    @SuppressWarnings("unchecked") List<NamedList<?>> tags = (List<NamedList<?>>) response.getResponse().get("tags");
    this.tagNamesTime = response.getQTime();
    long t1 = t0 + tagNamesTime;
    long t2 = System.currentTimeMillis();
    boolean geocode = !tagOnly;
    /*
         * Retrieve all offsets into a long list. These offsets will report a
         * text span and all the gazetteer record IDs that are associated to
         * that span. The text could either be a name, a code or some other
         * abbreviation.
         *
         * For practical reasons the default behavior is to filter trivial spans
         * given the gazetteer data that is returned for them.
         *
         * WARNING: lots of optimizations occur here due to the potentially
         * large volume of tags and gazetteer data that is involved. And this is
         * relatively early in the pipline.
         */
    log.debug("DOC={} TAGS SIZE={}", docid, tags.size());
    TreeMap<Integer, PlaceCandidate> candidates = new TreeMap<Integer, PlaceCandidate>();
    // names matched is used only for debugging, currently.
    Set<String> namesMatched = new HashSet<>();
    tagLoop: for (NamedList<?> tag : tags) {
        int x1 = (Integer) tag.get("startOffset");
        int x2 = (Integer) tag.get("endOffset");
        int len = x2 - x1;
        if (len == 1) {
            // Ignoring place names whose length is less than 2 chars
            ++this.defaultFilterCount;
            continue;
        }
        // +1 char after last matched
        // Could have enabled the "matchText" option from the tagger to get
        // this, but since we already have the content as a String then
        // we might as well not make the tagger do any more work.
        String matchText = (String) tag.get("matchText");
        // Get char immediately following match, for light NLP rules.
        char postChar = 0;
        char preChar = 0;
        if (x2 < buffer.length()) {
            postChar = buffer.charAt(x2);
        }
        if (x1 > 0) {
            preChar = buffer.charAt(x1 - 1);
            if (assessApostrophe(preChar, matchText)) {
                ++this.defaultFilterCount;
                continue;
            }
        }
        // be allowed. If lowercase abbreviations are allowed, then all matches are passed.               
        if (len < 3) {
            if (!allowLowercaseAbbrev) {
                if (TextUtils.isASCII(matchText) && !StringUtils.isAllUpperCase(matchText)) {
                    ++this.defaultFilterCount;
                    continue;
                }
            }
        }
        if (TextUtils.countFormattingSpace(matchText) > 1) {
            // Phrases with words broken across more than one line are not
            // valid matches.
            // Phrase with a single TAB is okay
            ++this.defaultFilterCount;
            continue;
        }
        // Eliminate any newlines and extra whitespace in match
        matchText = TextUtils.squeeze_whitespace(matchText);
        /**
             * Filter out trivial tags. Due to normalization, we tend to get
             * lots of false positives that can be eliminated early.  This is 
             * testing matches against the most general set of stop words.
             */
        if (filter.filterOut(matchText)) {
            ++this.defaultFilterCount;
            continue;
        }
        PlaceCandidate pc = new PlaceCandidate();
        pc.start = x1;
        pc.end = x2;
        pc.setText(matchText);
        /*
             * Filter out tags that user determined ahead of time as not-places
             * for their context.
             *
             */
        if (userfilter != null) {
            if (userfilter.filterOut(pc.getTextnorm())) {
                log.debug("User Filter:{}", matchText);
                ++this.userFilterCount;
                continue;
            }
        }
        /*
             * Continent filter is needed, as many mentions of contients confuse
             * real geotagging/geocoding.
             * 
             */
        if (continents.filterOut(pc.getTextnorm())) {
            pc.isContinent = true;
            pc.setFilteredOut(true);
            candidates.put(pc.start, pc);
            continue;
        }
        /**
             * Further testing is done if lang ID is provided AND if we have a stop list
             * for that language.  Otherwise, short terms are filtered out if they appear in any lang stop list.
             * NOTE: internally TagFilter here checks only languages other than English, Spanish and Vietnamese.
             */
        if (filter.filterOut(pc, langid, isUpperCase, isLowerCase)) {
            ++this.defaultFilterCount;
            log.debug("STOPWORD {} {}", langid, pc.getText());
            continue;
        }
        /*
             * Found UPPER CASE text in a mixed-cased document.
             * Conservatively, this is likely an acronym or some heading.
             * But possibly still a valid place name.
             * HEURISTIC: acronyms are relatively short. 
             * HEURISTIC: region codes can be acronyms and are valid places
             * 
             * using such place candidates you may score short acronym matches lower than fully named ones.
             * when inferring boundaries (states, provinces, etc)
             */
        if (!isUpperCase && pc.isUpper() && len < 5) {
            pc.isAcronym = true;
        }
        pc.hasDiacritics = TextUtils.hasDiacritics(pc.getText());
        pc.setSurroundingTokens(buffer);
        @SuppressWarnings("unchecked") List<Integer> placeRecordIds = (List<Integer>) tag.get("ids");
        /*
             * This assertion is helpful in debugging: assert
             * placeRecordIds.size() == new
             * HashSet<Integer>(placeRecordIds).size() : "ids should be unique";
             */
        // assert!placeRecordIds.isEmpty();
        namesMatched.clear();
        //double maxNameBias = 0.0;
        for (Integer solrId : placeRecordIds) {
            log.debug("{} = {}", pc.getText(), beanMap.get(solrId));
            // Yes, we must cast here.
            // As long as createTag generates the correct type stored in
            // beanMap we are fine.
            ScoredPlace pGeo = (ScoredPlace) beanMap.get(solrId);
            //
            if (!allowLowercaseAbbrev && pGeo.isAbbreviation() && pc.isLower()) {
                log.debug("Ignore lower case term={}", pc.getText());
                // loop and not tagLoop?
                continue tagLoop;
            }
            /*
                 * If text match contains "." and it matches any abbreviation,
                 * mark the candidate as an abbrev. TODO: Possibly best confirm
                 * this by sentence detection, as well. However, this pertains
                 * to text spans that contain "." within the bounds, and not
                 * likely an ending. E.g., "U.S." or "U.S" are trivial examples;
                 * "US" is more ambiguous, as we need to know if document is
                 * upperCase.
                 * 
                 * Any place abbreviation will trigger isAbbreviation = true
                 * 
                 * "IF YOU FIND US HERE"  the term 'US' is ambiguous here, so 
                 * it is not classified as an abbreviation. Otherwise if you have
                 * "My organization YAK happens to coincide with a place named Yak.
                 * But we first must determine if 'YAK' is a valid abbreviation for an actual place.
                 * HEURISTIC: place abbreviations are relatively short, e.g. one word(len=7 or less)
                 */
            if (len < 8 && !pc.isAbbreviation) {
                assessAbbreviation(pc, pGeo, postChar, isUpperCase);
            }
            if (log.isDebugEnabled()) {
                namesMatched.add(pGeo.getName());
            }
            /**
                 * Country names are the only names you can reasonably set ahead
                 * of time. All other names need to be assessed in context.
                 * Negate country names, e.g., "Georgia", by exception.
                 */
            if (pGeo.isCountry()) {
                pc.isCountry = true;
            }
            if (geocode) {
                pGeo.defaultHierarchicalPath();
                // Default score for geo will be calculated in PlaceCandidate
                pc.addPlace(pGeo);
            }
        }
        // to filtering)
        if (geocode && !pc.hasPlaces()) {
            log.debug("Place has no places={}", pc.getText());
            continue;
        } else {
            if (log.isDebugEnabled()) {
                log.debug("Text {} matched {}", pc.getText(), namesMatched);
            }
        }
        candidates.put(pc.start, pc);
    }
    // for tag
    long t3 = System.currentTimeMillis();
    // this.tagNamesTime = (int)(t1 - t0);
    this.getNamesTime = (int) (t2 - t1);
    this.totalTime = (int) (t3 - t0);
    if (log.isDebugEnabled()) {
        summarizeExtraction(candidates.values(), docid);
    }
    this.filteredTotal += this.defaultFilterCount + this.userFilterCount;
    this.matchedTotal += candidates.size();
    return new ArrayList<PlaceCandidate>(candidates.values());
}
Also used : HashMap(java.util.HashMap) NamedList(org.apache.solr.common.util.NamedList) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List) HashSet(java.util.HashSet)

Example 5 with NamedList

use of org.apache.solr.common.util.NamedList in project Xponents by OpenSextant.

the class TaxonMatcher method extractorImpl.

/**
     * Implementation details -- use with or without the formal ID/buffer
     * pairing.
     *
     * @param id
     *            doc id
     * @param buf
     *            input text
     * @return list of matches
     * @throws ExtractionException
     */
private List<TextMatch> extractorImpl(String id, String buf) throws ExtractionException {
    List<TextMatch> matches = new ArrayList<TextMatch>();
    String docid = (id != null ? id : NO_DOC_ID);
    Map<Integer, Object> beanMap = new HashMap<Integer, Object>(100);
    QueryResponse response = tagTextCallSolrTagger(buf, docid, beanMap);
    @SuppressWarnings("unchecked") List<NamedList<?>> tags = (List<NamedList<?>>) response.getResponse().get("tags");
    log.debug("TAGS SIZE = {}", tags.size());
    /*
         * Retrieve all offsets into a long list.
         */
    TaxonMatch m = null;
    // int x1 = -1, x2 = -1;
    int tag_count = 0;
    String id_prefix = docid + "#";
    for (NamedList<?> tag : tags) {
        m = new TaxonMatch();
        m.start = ((Integer) tag.get("startOffset")).intValue();
        // +1 char after
        m.end = ((Integer) tag.get("endOffset")).intValue();
        // last matched
        // m.pattern_id = "taxtag";
        ++tag_count;
        m.match_id = id_prefix + tag_count;
        // m.setText((String) tag.get("matchText")); // Not reliable.
        // matchText can be null.
        m.setText(buf.substring(m.start, m.end));
        if (TextUtils.countFormattingSpace(m.getText()) > 1) {
            // Phrase with a single TAB is okay
            continue;
        }
        @SuppressWarnings("unchecked") List<Integer> taxonIDs = (List<Integer>) tag.get("ids");
        for (Integer solrId : taxonIDs) {
            Object refData = beanMap.get(solrId);
            if (refData == null) {
                continue;
            }
            /*
                 * Filter out non-Acronyms. e.g., 'who' is not a match for 'WHO'
                 */
            Taxon tx = (Taxon) refData;
            if (this.filterNonAcronyms) {
                if (tx.isAcronym && !m.isUpper()) {
                    continue;
                }
            }
            m.addTaxon(tx);
        }
        //
        if (m.hasTaxons()) {
            matches.add(m);
        }
    }
    log.debug("FOUND LABELS count={}", matches.size());
    return matches;
}
Also used : HashMap(java.util.HashMap) NamedList(org.apache.solr.common.util.NamedList) Taxon(org.opensextant.data.Taxon) ArrayList(java.util.ArrayList) TextMatch(org.opensextant.extraction.TextMatch) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) SolrDocumentList(org.apache.solr.common.SolrDocumentList) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List)

Aggregations

NamedList (org.apache.solr.common.util.NamedList)438 Test (org.junit.Test)125 ArrayList (java.util.ArrayList)110 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)83 Map (java.util.Map)82 SolrException (org.apache.solr.common.SolrException)80 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)78 List (java.util.List)75 HashMap (java.util.HashMap)64 SolrQueryResponse (org.apache.solr.response.SolrQueryResponse)55 IOException (java.io.IOException)53 SolrDocumentList (org.apache.solr.common.SolrDocumentList)45 QueryRequest (org.apache.solr.client.solrj.request.QueryRequest)35 SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)35 SolrParams (org.apache.solr.common.params.SolrParams)31 LocalSolrQueryRequest (org.apache.solr.request.LocalSolrQueryRequest)31 QueryResponse (org.apache.solr.client.solrj.response.QueryResponse)30 SolrCore (org.apache.solr.core.SolrCore)30 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)27 SolrIndexSearcher (org.apache.solr.search.SolrIndexSearcher)27