Search in sources :

Example 46 with SolrParams

use of org.apache.solr.common.params.SolrParams in project lucene-solr by apache.

the class HighlightComponent method process.

@Override
public void process(ResponseBuilder rb) throws IOException {
    if (rb.doHighlights) {
        SolrQueryRequest req = rb.req;
        SolrParams params = req.getParams();
        SolrHighlighter highlighter = getHighlighter(params);
        //TODO: get from builder by default?
        String[] defaultHighlightFields = rb.getQparser() != null ? rb.getQparser().getDefaultHighlightFields() : null;
        Query highlightQuery = rb.getHighlightQuery();
        if (highlightQuery == null) {
            if (rb.getQparser() != null) {
                try {
                    highlightQuery = rb.getQparser().getHighlightQuery();
                    rb.setHighlightQuery(highlightQuery);
                } catch (Exception e) {
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
                }
            } else {
                highlightQuery = rb.getQuery();
                rb.setHighlightQuery(highlightQuery);
            }
        }
        // No highlighting if there is no query -- consider q.alt=*:*
        if (highlightQuery != null) {
            NamedList sumData = highlighter.doHighlighting(rb.getResults().docList, highlightQuery, req, defaultHighlightFields);
            if (sumData != null) {
                // TODO ???? add this directly to the response?
                rb.rsp.add("highlighting", sumData);
            }
        }
    }
}
Also used : UnifiedSolrHighlighter(org.apache.solr.highlight.UnifiedSolrHighlighter) DefaultSolrHighlighter(org.apache.solr.highlight.DefaultSolrHighlighter) SolrHighlighter(org.apache.solr.highlight.SolrHighlighter) PostingsSolrHighlighter(org.apache.solr.highlight.PostingsSolrHighlighter) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) Query(org.apache.lucene.search.Query) NamedList(org.apache.solr.common.util.NamedList) SolrParams(org.apache.solr.common.params.SolrParams) SolrException(org.apache.solr.common.SolrException) IOException(java.io.IOException) SolrException(org.apache.solr.common.SolrException)

Example 47 with SolrParams

use of org.apache.solr.common.params.SolrParams in project lucene-solr by apache.

the class HighlightComponent method prepare.

@Override
public void prepare(ResponseBuilder rb) throws IOException {
    SolrParams params = rb.req.getParams();
    rb.doHighlights = solrConfigHighlighter.isHighlightingEnabled(params);
    if (rb.doHighlights) {
        rb.setNeedDocList(true);
        String hlq = params.get(HighlightParams.Q);
        String hlparser = Objects.firstNonNull(params.get(HighlightParams.QPARSER), params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE));
        if (hlq != null) {
            try {
                QParser parser = QParser.getParser(hlq, hlparser, rb.req);
                rb.setHighlightQuery(parser.getHighlightQuery());
            } catch (SyntaxError e) {
                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
            }
        }
    }
}
Also used : SyntaxError(org.apache.solr.search.SyntaxError) QParser(org.apache.solr.search.QParser) SolrParams(org.apache.solr.common.params.SolrParams) SolrException(org.apache.solr.common.SolrException)

Example 48 with SolrParams

use of org.apache.solr.common.params.SolrParams in project lucene-solr by apache.

the class HttpShardHandler method prepDistributed.

@Override
public void prepDistributed(ResponseBuilder rb) {
    final SolrQueryRequest req = rb.req;
    final SolrParams params = req.getParams();
    final String shards = params.get(ShardParams.SHARDS);
    // since the cost of grabbing cloud state is still up in the air, we grab it only
    // if we need it.
    ClusterState clusterState = null;
    Map<String, Slice> slices = null;
    CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor();
    CloudDescriptor cloudDescriptor = coreDescriptor.getCloudDescriptor();
    ZkController zkController = req.getCore().getCoreContainer().getZkController();
    final ReplicaListTransformer replicaListTransformer = httpShardHandlerFactory.getReplicaListTransformer(req);
    if (shards != null) {
        List<String> lst = StrUtils.splitSmart(shards, ",", true);
        rb.shards = lst.toArray(new String[lst.size()]);
        rb.slices = new String[rb.shards.length];
        if (zkController != null) {
            // figure out which shards are slices
            for (int i = 0; i < rb.shards.length; i++) {
                if (rb.shards[i].indexOf('/') < 0) {
                    // this is a logical shard
                    rb.slices[i] = rb.shards[i];
                    rb.shards[i] = null;
                }
            }
        }
    } else if (zkController != null) {
        // we weren't provided with an explicit list of slices to query via "shards", so use the cluster state
        clusterState = zkController.getClusterState();
        String shardKeys = params.get(ShardParams._ROUTE_);
        // This will be the complete list of slices we need to query for this request.
        slices = new HashMap<>();
        // we need to find out what collections this request is for.
        // A comma-separated list of specified collections.
        // Eg: "collection1,collection2,collection3"
        String collections = params.get("collection");
        if (collections != null) {
            // If there were one or more collections specified in the query, split
            // each parameter and store as a separate member of a List.
            List<String> collectionList = StrUtils.splitSmart(collections, ",", true);
            // cloud state and add them to the Map 'slices'.
            for (String collectionName : collectionList) {
                // The original code produced <collection-name>_<shard-name> when the collections
                // parameter was specified (see ClientUtils.appendMap)
                // Is this necessary if ony one collection is specified?
                // i.e. should we change multiCollection to collectionList.size() > 1?
                addSlices(slices, clusterState, params, collectionName, shardKeys, true);
            }
        } else {
            // just this collection
            String collectionName = cloudDescriptor.getCollectionName();
            addSlices(slices, clusterState, params, collectionName, shardKeys, false);
        }
        // Store the logical slices in the ResponseBuilder and create a new
        // String array to hold the physical shards (which will be mapped
        // later).
        rb.slices = slices.keySet().toArray(new String[slices.size()]);
        rb.shards = new String[rb.slices.length];
    }
    //
    if (zkController != null) {
        // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
        // and make it a non-distributed request.
        String ourSlice = cloudDescriptor.getShardId();
        String ourCollection = cloudDescriptor.getCollectionName();
        // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
        boolean onlyNrtReplicas = Boolean.TRUE == req.getContext().get(ONLY_NRT_REPLICAS);
        if (rb.slices.length == 1 && rb.slices[0] != null && // handle the <collection>_<slice> format
        (rb.slices[0].equals(ourSlice) || rb.slices[0].equals(ourCollection + "_" + ourSlice)) && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
            // currently just a debugging parameter to check distrib search on a single node
            boolean shortCircuit = params.getBool("shortCircuit", true);
            String targetHandler = params.get(ShardParams.SHARDS_QT);
            // if a different handler is specified, don't short-circuit
            shortCircuit = shortCircuit && targetHandler == null;
            if (shortCircuit) {
                rb.isDistrib = false;
                rb.shortCircuitedURL = ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
                return;
            }
        // We shouldn't need to do anything to handle "shard.rows" since it was previously meant to be an optimization?
        }
        for (int i = 0; i < rb.shards.length; i++) {
            final List<String> shardUrls;
            if (rb.shards[i] != null) {
                shardUrls = StrUtils.splitSmart(rb.shards[i], "|", true);
                replicaListTransformer.transform(shardUrls);
            } else {
                if (clusterState == null) {
                    clusterState = zkController.getClusterState();
                    slices = clusterState.getSlicesMap(cloudDescriptor.getCollectionName());
                }
                String sliceName = rb.slices[i];
                Slice slice = slices.get(sliceName);
                if (slice == null) {
                    // Treat this the same as "all servers down" for a slice, and let things continue
                    // if partial results are acceptable
                    rb.shards[i] = "";
                    continue;
                // throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such shard: " + sliceName);
                }
                final Predicate<Replica> isShardLeader = new Predicate<Replica>() {

                    private Replica shardLeader = null;

                    @Override
                    public boolean test(Replica replica) {
                        if (shardLeader == null) {
                            try {
                                shardLeader = zkController.getZkStateReader().getLeaderRetry(cloudDescriptor.getCollectionName(), slice.getName());
                            } catch (InterruptedException e) {
                                throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Exception finding leader for shard " + slice.getName() + " in collection " + cloudDescriptor.getCollectionName(), e);
                            } catch (SolrException e) {
                                if (log.isDebugEnabled()) {
                                    log.debug("Exception finding leader for shard {} in collection {}. Collection State: {}", slice.getName(), cloudDescriptor.getCollectionName(), zkController.getZkStateReader().getClusterState().getCollectionOrNull(cloudDescriptor.getCollectionName()));
                                }
                                throw e;
                            }
                        }
                        return replica.getName().equals(shardLeader.getName());
                    }
                };
                final List<Replica> eligibleSliceReplicas = collectEligibleReplicas(slice, clusterState, onlyNrtReplicas, isShardLeader);
                replicaListTransformer.transform(eligibleSliceReplicas);
                shardUrls = new ArrayList<>(eligibleSliceReplicas.size());
                for (Replica replica : eligibleSliceReplicas) {
                    String url = ZkCoreNodeProps.getCoreUrl(replica);
                    shardUrls.add(url);
                }
                if (shardUrls.isEmpty()) {
                    boolean tolerant = rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false);
                    if (!tolerant) {
                        // stop the check when there are no replicas available for a shard
                        throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "no servers hosting shard: " + rb.slices[i]);
                    }
                }
            }
            // And now recreate the | delimited list of equivalent servers
            rb.shards[i] = createSliceShardsStr(shardUrls);
        }
    }
    String shards_rows = params.get(ShardParams.SHARDS_ROWS);
    if (shards_rows != null) {
        rb.shards_rows = Integer.parseInt(shards_rows);
    }
    String shards_start = params.get(ShardParams.SHARDS_START);
    if (shards_start != null) {
        rb.shards_start = Integer.parseInt(shards_start);
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) CoreDescriptor(org.apache.solr.core.CoreDescriptor) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) Predicate(java.util.function.Predicate) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) Slice(org.apache.solr.common.cloud.Slice) ZkController(org.apache.solr.cloud.ZkController) SolrParams(org.apache.solr.common.params.SolrParams) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List) SolrException(org.apache.solr.common.SolrException)

Example 49 with SolrParams

use of org.apache.solr.common.params.SolrParams in project lucene-solr by apache.

the class LukeRequestHandler method getDetailedFieldInfo.

// Get terribly detailed information about a particular field. This is a very expensive call, use it with caution
// especially on large indexes!
@SuppressWarnings("unchecked")
private static void getDetailedFieldInfo(SolrQueryRequest req, String field, SimpleOrderedMap<Object> fieldMap) throws IOException {
    SolrParams params = req.getParams();
    final int numTerms = params.getInt(NUMTERMS, DEFAULT_COUNT);
    // Something to collect the top N terms in.
    TopTermQueue tiq = new TopTermQueue(numTerms + 1);
    final CharsRefBuilder spare = new CharsRefBuilder();
    Terms terms = MultiFields.getTerms(req.getSearcher().getIndexReader(), field);
    if (terms == null) {
        // field does not exist
        return;
    }
    TermsEnum termsEnum = terms.iterator();
    BytesRef text;
    int[] buckets = new int[HIST_ARRAY_SIZE];
    while ((text = termsEnum.next()) != null) {
        ++tiq.distinctTerms;
        // This calculation seems odd, but it gives the same results as it used to.
        int freq = termsEnum.docFreq();
        int slot = 32 - Integer.numberOfLeadingZeros(Math.max(0, freq - 1));
        buckets[slot] = buckets[slot] + 1;
        if (numTerms > 0 && freq > tiq.minFreq) {
            spare.copyUTF8Bytes(text);
            String t = spare.toString();
            tiq.add(new TopTermQueue.TermInfo(new Term(field, t), termsEnum.docFreq()));
            if (tiq.size() > numTerms) {
                // if tiq full
                // remove lowest in tiq
                tiq.pop();
                tiq.minFreq = tiq.getTopTermInfo().docFreq;
            }
        }
    }
    tiq.histogram.add(buckets);
    fieldMap.add("distinct", tiq.distinctTerms);
    // Include top terms
    fieldMap.add("topTerms", tiq.toNamedList(req.getSearcher().getSchema()));
    // Add a histogram
    fieldMap.add("histogram", tiq.histogram.toNamedList());
}
Also used : Terms(org.apache.lucene.index.Terms) SolrParams(org.apache.solr.common.params.SolrParams) CharsRefBuilder(org.apache.lucene.util.CharsRefBuilder) Term(org.apache.lucene.index.Term) BytesRef(org.apache.lucene.util.BytesRef) TermsEnum(org.apache.lucene.index.TermsEnum)

Example 50 with SolrParams

use of org.apache.solr.common.params.SolrParams in project lucene-solr by apache.

the class MergeIndexesOp method execute.

@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
    SolrParams params = it.req.getParams();
    String cname = params.required().get(CoreAdminParams.CORE);
    SolrCore core = it.handler.coreContainer.getCore(cname);
    SolrQueryRequest wrappedReq = null;
    List<SolrCore> sourceCores = Lists.newArrayList();
    List<RefCounted<SolrIndexSearcher>> searchers = Lists.newArrayList();
    // stores readers created from indexDir param values
    List<DirectoryReader> readersToBeClosed = Lists.newArrayList();
    Map<Directory, Boolean> dirsToBeReleased = new HashMap<>();
    if (core != null) {
        try {
            String[] dirNames = params.getParams(CoreAdminParams.INDEX_DIR);
            if (dirNames == null || dirNames.length == 0) {
                String[] sources = params.getParams("srcCore");
                if (sources == null || sources.length == 0)
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "At least one indexDir or srcCore must be specified");
                for (int i = 0; i < sources.length; i++) {
                    String source = sources[i];
                    SolrCore srcCore = it.handler.coreContainer.getCore(source);
                    if (srcCore == null)
                        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core: " + source + " does not exist");
                    sourceCores.add(srcCore);
                }
            } else {
                DirectoryFactory dirFactory = core.getDirectoryFactory();
                for (int i = 0; i < dirNames.length; i++) {
                    boolean markAsDone = false;
                    if (dirFactory instanceof CachingDirectoryFactory) {
                        if (!((CachingDirectoryFactory) dirFactory).getLivePaths().contains(dirNames[i])) {
                            markAsDone = true;
                        }
                    }
                    Directory dir = dirFactory.get(dirNames[i], DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
                    dirsToBeReleased.put(dir, markAsDone);
                    // TODO: why doesn't this use the IR factory? what is going on here?
                    readersToBeClosed.add(DirectoryReader.open(dir));
                }
            }
            List<DirectoryReader> readers = null;
            if (readersToBeClosed.size() > 0) {
                readers = readersToBeClosed;
            } else {
                readers = Lists.newArrayList();
                for (SolrCore solrCore : sourceCores) {
                    // record the searchers so that we can decref
                    RefCounted<SolrIndexSearcher> searcher = solrCore.getSearcher();
                    searchers.add(searcher);
                    readers.add(searcher.get().getIndexReader());
                }
            }
            UpdateRequestProcessorChain processorChain = core.getUpdateProcessingChain(params.get(UpdateParams.UPDATE_CHAIN));
            wrappedReq = new LocalSolrQueryRequest(core, it.req.getParams());
            UpdateRequestProcessor processor = processorChain.createProcessor(wrappedReq, it.rsp);
            processor.processMergeIndexes(new MergeIndexesCommand(readers, it.req));
        } catch (Exception e) {
            // log and rethrow so that if the finally fails we don't lose the original problem
            log.error("ERROR executing merge:", e);
            throw e;
        } finally {
            for (RefCounted<SolrIndexSearcher> searcher : searchers) {
                if (searcher != null)
                    searcher.decref();
            }
            for (SolrCore solrCore : sourceCores) {
                if (solrCore != null)
                    solrCore.close();
            }
            IOUtils.closeWhileHandlingException(readersToBeClosed);
            Set<Map.Entry<Directory, Boolean>> entries = dirsToBeReleased.entrySet();
            for (Map.Entry<Directory, Boolean> entry : entries) {
                DirectoryFactory dirFactory = core.getDirectoryFactory();
                Directory dir = entry.getKey();
                boolean markAsDone = entry.getValue();
                if (markAsDone) {
                    dirFactory.doneWithDirectory(dir);
                }
                dirFactory.release(dir);
            }
            if (wrappedReq != null)
                wrappedReq.close();
            core.close();
        }
    }
}
Also used : HashMap(java.util.HashMap) SolrCore(org.apache.solr.core.SolrCore) UpdateRequestProcessorChain(org.apache.solr.update.processor.UpdateRequestProcessorChain) DirectoryFactory(org.apache.solr.core.DirectoryFactory) CachingDirectoryFactory(org.apache.solr.core.CachingDirectoryFactory) UpdateRequestProcessor(org.apache.solr.update.processor.UpdateRequestProcessor) SolrException(org.apache.solr.common.SolrException) Directory(org.apache.lucene.store.Directory) DirectoryReader(org.apache.lucene.index.DirectoryReader) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) SolrException(org.apache.solr.common.SolrException) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) MergeIndexesCommand(org.apache.solr.update.MergeIndexesCommand) RefCounted(org.apache.solr.util.RefCounted) SolrParams(org.apache.solr.common.params.SolrParams) CachingDirectoryFactory(org.apache.solr.core.CachingDirectoryFactory) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

SolrParams (org.apache.solr.common.params.SolrParams)310 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)179 SolrException (org.apache.solr.common.SolrException)78 Test (org.junit.Test)45 Tuple (org.apache.solr.client.solrj.io.Tuple)43 SolrDocument (org.apache.solr.common.SolrDocument)42 ArrayList (java.util.ArrayList)41 NamedList (org.apache.solr.common.util.NamedList)40 MapSolrParams (org.apache.solr.common.params.MapSolrParams)37 SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)37 IOException (java.io.IOException)35 SolrDocumentList (org.apache.solr.common.SolrDocumentList)34 HashMap (java.util.HashMap)33 QueryResponse (org.apache.solr.client.solrj.response.QueryResponse)30 SolrClientCache (org.apache.solr.client.solrj.io.SolrClientCache)27 UpdateRequest (org.apache.solr.client.solrj.request.UpdateRequest)26 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)24 Map (java.util.Map)22 SolrIndexSearcher (org.apache.solr.search.SolrIndexSearcher)22 SolrCore (org.apache.solr.core.SolrCore)20