Search in sources :

Example 26 with SolrQueryRequest

use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.

the class ExpandComponent method process.

@SuppressWarnings("unchecked")
@Override
public void process(ResponseBuilder rb) throws IOException {
    if (!rb.doExpand) {
        return;
    }
    SolrQueryRequest req = rb.req;
    SolrParams params = req.getParams();
    String field = params.get(ExpandParams.EXPAND_FIELD);
    String hint = null;
    if (field == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (q instanceof CollapsingQParserPlugin.CollapsingPostFilter) {
                    CollapsingQParserPlugin.CollapsingPostFilter cp = (CollapsingQParserPlugin.CollapsingPostFilter) q;
                    field = cp.getField();
                    hint = cp.hint;
                }
            }
        }
    }
    if (field == null) {
        throw new IOException("Expand field is null.");
    }
    String sortParam = params.get(ExpandParams.EXPAND_SORT);
    String[] fqs = params.getParams(ExpandParams.EXPAND_FQ);
    String qs = params.get(ExpandParams.EXPAND_Q);
    int limit = params.getInt(ExpandParams.EXPAND_ROWS, 5);
    Sort sort = null;
    if (sortParam != null) {
        sort = SortSpecParsing.parseSortSpec(sortParam, rb.req).getSort();
    }
    Query query;
    if (qs == null) {
        query = rb.getQuery();
    } else {
        try {
            QParser parser = QParser.getParser(qs, req);
            query = parser.getQuery();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }
    List<Query> newFilters = new ArrayList<>();
    if (fqs == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (!(q instanceof CollapsingQParserPlugin.CollapsingPostFilter)) {
                    newFilters.add(q);
                }
            }
        }
    } else {
        try {
            for (String fq : fqs) {
                if (fq != null && fq.trim().length() != 0 && !fq.equals("*:*")) {
                    QParser fqp = QParser.getParser(fq, req);
                    newFilters.add(fqp.getQuery());
                }
            }
        } catch (Exception e) {
            throw new IOException(e);
        }
    }
    SolrIndexSearcher searcher = req.getSearcher();
    LeafReader reader = searcher.getSlowAtomicReader();
    SchemaField schemaField = searcher.getSchema().getField(field);
    FieldType fieldType = schemaField.getType();
    SortedDocValues values = null;
    long nullValue = 0L;
    if (fieldType instanceof StrField) {
        //Get The Top Level SortedDocValues
        if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
            Map<String, UninvertingReader.Type> mapping = new HashMap();
            mapping.put(field, UninvertingReader.Type.SORTED);
            UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
            values = uninvertingReader.getSortedDocValues(field);
        } else {
            values = DocValues.getSorted(reader, field);
        }
    } else {
        //Get the nullValue for the numeric collapse field
        String defaultValue = searcher.getSchema().getField(field).getDefaultValue();
        final NumberType numType = fieldType.getNumberType();
        // we don't need to handle invalid 64-bit field types here.
        if (defaultValue != null) {
            if (numType == NumberType.INTEGER) {
                nullValue = Long.parseLong(defaultValue);
            } else if (numType == NumberType.FLOAT) {
                nullValue = Float.floatToIntBits(Float.parseFloat(defaultValue));
            }
        } else if (NumberType.FLOAT.equals(numType)) {
            // Integer case already handled by nullValue defaulting to 0
            nullValue = Float.floatToIntBits(0.0f);
        }
    }
    FixedBitSet groupBits = null;
    LongHashSet groupSet = null;
    DocList docList = rb.getResults().docList;
    IntHashSet collapsedSet = new IntHashSet(docList.size() * 2);
    //Gather the groups for the current page of documents
    DocIterator idit = docList.iterator();
    int[] globalDocs = new int[docList.size()];
    int docsIndex = -1;
    while (idit.hasNext()) {
        globalDocs[++docsIndex] = idit.nextDoc();
    }
    Arrays.sort(globalDocs);
    Query groupQuery = null;
    /*
    * This code gathers the group information for the current page.
    */
    List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
    if (contexts.size() == 0) {
        //When no context is available we can skip the expanding
        return;
    }
    int currentContext = 0;
    int currentDocBase = contexts.get(currentContext).docBase;
    int nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
    IntObjectHashMap<BytesRef> ordBytes = null;
    if (values != null) {
        groupBits = new FixedBitSet(values.getValueCount());
        MultiDocValues.OrdinalMap ordinalMap = null;
        SortedDocValues[] sortedDocValues = null;
        LongValues segmentOrdinalMap = null;
        SortedDocValues currentValues = null;
        if (values instanceof MultiDocValues.MultiSortedDocValues) {
            ordinalMap = ((MultiDocValues.MultiSortedDocValues) values).mapping;
            sortedDocValues = ((MultiDocValues.MultiSortedDocValues) values).values;
            currentValues = sortedDocValues[currentContext];
            segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
        }
        int count = 0;
        ordBytes = new IntObjectHashMap<>();
        for (int i = 0; i < globalDocs.length; i++) {
            int globalDoc = globalDocs[i];
            while (globalDoc >= nextDocBase) {
                currentContext++;
                currentDocBase = contexts.get(currentContext).docBase;
                nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
                if (ordinalMap != null) {
                    currentValues = sortedDocValues[currentContext];
                    segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
                }
            }
            int contextDoc = globalDoc - currentDocBase;
            if (ordinalMap != null) {
                if (contextDoc > currentValues.docID()) {
                    currentValues.advance(contextDoc);
                }
                if (contextDoc == currentValues.docID()) {
                    int ord = currentValues.ordValue();
                    ++count;
                    BytesRef ref = currentValues.lookupOrd(ord);
                    ord = (int) segmentOrdinalMap.get(ord);
                    ordBytes.put(ord, BytesRef.deepCopyOf(ref));
                    groupBits.set(ord);
                    collapsedSet.add(globalDoc);
                }
            } else {
                if (globalDoc > values.docID()) {
                    values.advance(globalDoc);
                }
                if (globalDoc == values.docID()) {
                    int ord = values.ordValue();
                    ++count;
                    BytesRef ref = values.lookupOrd(ord);
                    ordBytes.put(ord, BytesRef.deepCopyOf(ref));
                    groupBits.set(ord);
                    collapsedSet.add(globalDoc);
                }
            }
        }
        if (count > 0 && count < 200) {
            try {
                groupQuery = getGroupQuery(field, count, ordBytes);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }
    } else {
        groupSet = new LongHashSet(docList.size());
        NumericDocValues collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
        int count = 0;
        for (int i = 0; i < globalDocs.length; i++) {
            int globalDoc = globalDocs[i];
            while (globalDoc >= nextDocBase) {
                currentContext++;
                currentDocBase = contexts.get(currentContext).docBase;
                nextDocBase = currentContext + 1 < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
                collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
            }
            int contextDoc = globalDoc - currentDocBase;
            int valueDocID = collapseValues.docID();
            if (valueDocID < contextDoc) {
                valueDocID = collapseValues.advance(contextDoc);
            }
            long value;
            if (valueDocID == contextDoc) {
                value = collapseValues.longValue();
            } else {
                value = 0;
            }
            if (value != nullValue) {
                ++count;
                groupSet.add(value);
                collapsedSet.add(globalDoc);
            }
        }
        if (count > 0 && count < 200) {
            if (fieldType.isPointField()) {
                groupQuery = getPointGroupQuery(schemaField, count, groupSet);
            } else {
                groupQuery = getGroupQuery(field, fieldType, count, groupSet);
            }
        }
    }
    Collector collector;
    if (sort != null)
        sort = sort.rewrite(searcher);
    Collector groupExpandCollector = null;
    if (values != null) {
        //Get The Top Level SortedDocValues again so we can re-iterate:
        if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
            Map<String, UninvertingReader.Type> mapping = new HashMap();
            mapping.put(field, UninvertingReader.Type.SORTED);
            UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
            values = uninvertingReader.getSortedDocValues(field);
        } else {
            values = DocValues.getSorted(reader, field);
        }
        groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit, sort);
    } else {
        groupExpandCollector = new NumericGroupExpandCollector(field, nullValue, groupSet, collapsedSet, limit, sort);
    }
    if (groupQuery != null) {
        //Limits the results to documents that are in the same group as the documents in the page.
        newFilters.add(groupQuery);
    }
    SolrIndexSearcher.ProcessedFilter pfilter = searcher.getProcessedFilter(null, newFilters);
    if (pfilter.postFilter != null) {
        pfilter.postFilter.setLastDelegate(groupExpandCollector);
        collector = pfilter.postFilter;
    } else {
        collector = groupExpandCollector;
    }
    if (pfilter.filter == null) {
        searcher.search(query, collector);
    } else {
        Query q = new BooleanQuery.Builder().add(query, Occur.MUST).add(pfilter.filter, Occur.FILTER).build();
        searcher.search(q, collector);
    }
    LongObjectMap<Collector> groups = ((GroupCollector) groupExpandCollector).getGroups();
    NamedList outMap = new SimpleOrderedMap();
    CharsRefBuilder charsRef = new CharsRefBuilder();
    for (LongObjectCursor<Collector> cursor : groups) {
        long groupValue = cursor.key;
        TopDocsCollector<?> topDocsCollector = TopDocsCollector.class.cast(cursor.value);
        TopDocs topDocs = topDocsCollector.topDocs();
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if (scoreDocs.length > 0) {
            int[] docs = new int[scoreDocs.length];
            float[] scores = new float[scoreDocs.length];
            for (int i = 0; i < docs.length; i++) {
                ScoreDoc scoreDoc = scoreDocs[i];
                docs[i] = scoreDoc.doc;
                scores[i] = scoreDoc.score;
            }
            DocSlice slice = new DocSlice(0, docs.length, docs, scores, topDocs.totalHits, topDocs.getMaxScore());
            if (fieldType instanceof StrField) {
                final BytesRef bytesRef = ordBytes.get((int) groupValue);
                fieldType.indexedToReadable(bytesRef, charsRef);
                String group = charsRef.toString();
                outMap.add(group, slice);
            } else {
                outMap.add(numericToString(fieldType, groupValue), slice);
            }
        }
    }
    rb.rsp.add("expanded", outMap);
}
Also used : StrField(org.apache.solr.schema.StrField) BooleanQuery(org.apache.lucene.search.BooleanQuery) Query(org.apache.lucene.search.Query) TermInSetQuery(org.apache.lucene.search.TermInSetQuery) SolrConstantScoreQuery(org.apache.solr.search.SolrConstantScoreQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) HashMap(java.util.HashMap) LongObjectHashMap(com.carrotsearch.hppc.LongObjectHashMap) IntObjectHashMap(com.carrotsearch.hppc.IntObjectHashMap) ArrayList(java.util.ArrayList) IntHashSet(com.carrotsearch.hppc.IntHashSet) MultiDocValues(org.apache.lucene.index.MultiDocValues) DocSlice(org.apache.solr.search.DocSlice) ScoreDoc(org.apache.lucene.search.ScoreDoc) FixedBitSet(org.apache.lucene.util.FixedBitSet) Sort(org.apache.lucene.search.Sort) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) CharsRefBuilder(org.apache.lucene.util.CharsRefBuilder) SortedDocValues(org.apache.lucene.index.SortedDocValues) LongHashSet(com.carrotsearch.hppc.LongHashSet) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) NumberType(org.apache.solr.schema.NumberType) QParser(org.apache.solr.search.QParser) SolrParams(org.apache.solr.common.params.SolrParams) NumericDocValues(org.apache.lucene.index.NumericDocValues) DocIterator(org.apache.solr.search.DocIterator) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) UninvertingReader(org.apache.solr.uninverting.UninvertingReader) TopDocs(org.apache.lucene.search.TopDocs) TopFieldCollector(org.apache.lucene.search.TopFieldCollector) LeafCollector(org.apache.lucene.search.LeafCollector) Collector(org.apache.lucene.search.Collector) TopScoreDocCollector(org.apache.lucene.search.TopScoreDocCollector) TopDocsCollector(org.apache.lucene.search.TopDocsCollector) BytesRef(org.apache.lucene.util.BytesRef) LeafReader(org.apache.lucene.index.LeafReader) FilterLeafReader(org.apache.lucene.index.FilterLeafReader) NamedList(org.apache.solr.common.util.NamedList) IOException(java.io.IOException) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) IOException(java.io.IOException) FieldType(org.apache.solr.schema.FieldType) CollapsingQParserPlugin(org.apache.solr.search.CollapsingQParserPlugin) SchemaField(org.apache.solr.schema.SchemaField) NumberType(org.apache.solr.schema.NumberType) FieldType(org.apache.solr.schema.FieldType) DocValuesType(org.apache.lucene.index.DocValuesType) LongValues(org.apache.lucene.util.LongValues) DocList(org.apache.solr.search.DocList)

Example 27 with SolrQueryRequest

use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.

the class HighlightComponent method process.

@Override
public void process(ResponseBuilder rb) throws IOException {
    if (rb.doHighlights) {
        SolrQueryRequest req = rb.req;
        SolrParams params = req.getParams();
        SolrHighlighter highlighter = getHighlighter(params);
        //TODO: get from builder by default?
        String[] defaultHighlightFields = rb.getQparser() != null ? rb.getQparser().getDefaultHighlightFields() : null;
        Query highlightQuery = rb.getHighlightQuery();
        if (highlightQuery == null) {
            if (rb.getQparser() != null) {
                try {
                    highlightQuery = rb.getQparser().getHighlightQuery();
                    rb.setHighlightQuery(highlightQuery);
                } catch (Exception e) {
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
                }
            } else {
                highlightQuery = rb.getQuery();
                rb.setHighlightQuery(highlightQuery);
            }
        }
        // No highlighting if there is no query -- consider q.alt=*:*
        if (highlightQuery != null) {
            NamedList sumData = highlighter.doHighlighting(rb.getResults().docList, highlightQuery, req, defaultHighlightFields);
            if (sumData != null) {
                // TODO ???? add this directly to the response?
                rb.rsp.add("highlighting", sumData);
            }
        }
    }
}
Also used : UnifiedSolrHighlighter(org.apache.solr.highlight.UnifiedSolrHighlighter) DefaultSolrHighlighter(org.apache.solr.highlight.DefaultSolrHighlighter) SolrHighlighter(org.apache.solr.highlight.SolrHighlighter) PostingsSolrHighlighter(org.apache.solr.highlight.PostingsSolrHighlighter) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) Query(org.apache.lucene.search.Query) NamedList(org.apache.solr.common.util.NamedList) SolrParams(org.apache.solr.common.params.SolrParams) SolrException(org.apache.solr.common.SolrException) IOException(java.io.IOException) SolrException(org.apache.solr.common.SolrException)

Example 28 with SolrQueryRequest

use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.

the class HttpShardHandler method prepDistributed.

@Override
public void prepDistributed(ResponseBuilder rb) {
    final SolrQueryRequest req = rb.req;
    final SolrParams params = req.getParams();
    final String shards = params.get(ShardParams.SHARDS);
    // since the cost of grabbing cloud state is still up in the air, we grab it only
    // if we need it.
    ClusterState clusterState = null;
    Map<String, Slice> slices = null;
    CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor();
    CloudDescriptor cloudDescriptor = coreDescriptor.getCloudDescriptor();
    ZkController zkController = req.getCore().getCoreContainer().getZkController();
    final ReplicaListTransformer replicaListTransformer = httpShardHandlerFactory.getReplicaListTransformer(req);
    if (shards != null) {
        List<String> lst = StrUtils.splitSmart(shards, ",", true);
        rb.shards = lst.toArray(new String[lst.size()]);
        rb.slices = new String[rb.shards.length];
        if (zkController != null) {
            // figure out which shards are slices
            for (int i = 0; i < rb.shards.length; i++) {
                if (rb.shards[i].indexOf('/') < 0) {
                    // this is a logical shard
                    rb.slices[i] = rb.shards[i];
                    rb.shards[i] = null;
                }
            }
        }
    } else if (zkController != null) {
        // we weren't provided with an explicit list of slices to query via "shards", so use the cluster state
        clusterState = zkController.getClusterState();
        String shardKeys = params.get(ShardParams._ROUTE_);
        // This will be the complete list of slices we need to query for this request.
        slices = new HashMap<>();
        // we need to find out what collections this request is for.
        // A comma-separated list of specified collections.
        // Eg: "collection1,collection2,collection3"
        String collections = params.get("collection");
        if (collections != null) {
            // If there were one or more collections specified in the query, split
            // each parameter and store as a separate member of a List.
            List<String> collectionList = StrUtils.splitSmart(collections, ",", true);
            // cloud state and add them to the Map 'slices'.
            for (String collectionName : collectionList) {
                // The original code produced <collection-name>_<shard-name> when the collections
                // parameter was specified (see ClientUtils.appendMap)
                // Is this necessary if ony one collection is specified?
                // i.e. should we change multiCollection to collectionList.size() > 1?
                addSlices(slices, clusterState, params, collectionName, shardKeys, true);
            }
        } else {
            // just this collection
            String collectionName = cloudDescriptor.getCollectionName();
            addSlices(slices, clusterState, params, collectionName, shardKeys, false);
        }
        // Store the logical slices in the ResponseBuilder and create a new
        // String array to hold the physical shards (which will be mapped
        // later).
        rb.slices = slices.keySet().toArray(new String[slices.size()]);
        rb.shards = new String[rb.slices.length];
    }
    //
    if (zkController != null) {
        // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
        // and make it a non-distributed request.
        String ourSlice = cloudDescriptor.getShardId();
        String ourCollection = cloudDescriptor.getCollectionName();
        // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
        boolean onlyNrtReplicas = Boolean.TRUE == req.getContext().get(ONLY_NRT_REPLICAS);
        if (rb.slices.length == 1 && rb.slices[0] != null && // handle the <collection>_<slice> format
        (rb.slices[0].equals(ourSlice) || rb.slices[0].equals(ourCollection + "_" + ourSlice)) && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
            // currently just a debugging parameter to check distrib search on a single node
            boolean shortCircuit = params.getBool("shortCircuit", true);
            String targetHandler = params.get(ShardParams.SHARDS_QT);
            // if a different handler is specified, don't short-circuit
            shortCircuit = shortCircuit && targetHandler == null;
            if (shortCircuit) {
                rb.isDistrib = false;
                rb.shortCircuitedURL = ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
                return;
            }
        // We shouldn't need to do anything to handle "shard.rows" since it was previously meant to be an optimization?
        }
        for (int i = 0; i < rb.shards.length; i++) {
            final List<String> shardUrls;
            if (rb.shards[i] != null) {
                shardUrls = StrUtils.splitSmart(rb.shards[i], "|", true);
                replicaListTransformer.transform(shardUrls);
            } else {
                if (clusterState == null) {
                    clusterState = zkController.getClusterState();
                    slices = clusterState.getSlicesMap(cloudDescriptor.getCollectionName());
                }
                String sliceName = rb.slices[i];
                Slice slice = slices.get(sliceName);
                if (slice == null) {
                    // Treat this the same as "all servers down" for a slice, and let things continue
                    // if partial results are acceptable
                    rb.shards[i] = "";
                    continue;
                // throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such shard: " + sliceName);
                }
                final Predicate<Replica> isShardLeader = new Predicate<Replica>() {

                    private Replica shardLeader = null;

                    @Override
                    public boolean test(Replica replica) {
                        if (shardLeader == null) {
                            try {
                                shardLeader = zkController.getZkStateReader().getLeaderRetry(cloudDescriptor.getCollectionName(), slice.getName());
                            } catch (InterruptedException e) {
                                throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Exception finding leader for shard " + slice.getName() + " in collection " + cloudDescriptor.getCollectionName(), e);
                            } catch (SolrException e) {
                                if (log.isDebugEnabled()) {
                                    log.debug("Exception finding leader for shard {} in collection {}. Collection State: {}", slice.getName(), cloudDescriptor.getCollectionName(), zkController.getZkStateReader().getClusterState().getCollectionOrNull(cloudDescriptor.getCollectionName()));
                                }
                                throw e;
                            }
                        }
                        return replica.getName().equals(shardLeader.getName());
                    }
                };
                final List<Replica> eligibleSliceReplicas = collectEligibleReplicas(slice, clusterState, onlyNrtReplicas, isShardLeader);
                replicaListTransformer.transform(eligibleSliceReplicas);
                shardUrls = new ArrayList<>(eligibleSliceReplicas.size());
                for (Replica replica : eligibleSliceReplicas) {
                    String url = ZkCoreNodeProps.getCoreUrl(replica);
                    shardUrls.add(url);
                }
                if (shardUrls.isEmpty()) {
                    boolean tolerant = rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false);
                    if (!tolerant) {
                        // stop the check when there are no replicas available for a shard
                        throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "no servers hosting shard: " + rb.slices[i]);
                    }
                }
            }
            // And now recreate the | delimited list of equivalent servers
            rb.shards[i] = createSliceShardsStr(shardUrls);
        }
    }
    String shards_rows = params.get(ShardParams.SHARDS_ROWS);
    if (shards_rows != null) {
        rb.shards_rows = Integer.parseInt(shards_rows);
    }
    String shards_start = params.get(ShardParams.SHARDS_START);
    if (shards_start != null) {
        rb.shards_start = Integer.parseInt(shards_start);
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) CoreDescriptor(org.apache.solr.core.CoreDescriptor) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) Predicate(java.util.function.Predicate) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) Slice(org.apache.solr.common.cloud.Slice) ZkController(org.apache.solr.cloud.ZkController) SolrParams(org.apache.solr.common.params.SolrParams) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List) SolrException(org.apache.solr.common.SolrException)

Example 29 with SolrQueryRequest

use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.

the class MergeIndexesOp method execute.

@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
    SolrParams params = it.req.getParams();
    String cname = params.required().get(CoreAdminParams.CORE);
    SolrCore core = it.handler.coreContainer.getCore(cname);
    SolrQueryRequest wrappedReq = null;
    List<SolrCore> sourceCores = Lists.newArrayList();
    List<RefCounted<SolrIndexSearcher>> searchers = Lists.newArrayList();
    // stores readers created from indexDir param values
    List<DirectoryReader> readersToBeClosed = Lists.newArrayList();
    Map<Directory, Boolean> dirsToBeReleased = new HashMap<>();
    if (core != null) {
        try {
            String[] dirNames = params.getParams(CoreAdminParams.INDEX_DIR);
            if (dirNames == null || dirNames.length == 0) {
                String[] sources = params.getParams("srcCore");
                if (sources == null || sources.length == 0)
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "At least one indexDir or srcCore must be specified");
                for (int i = 0; i < sources.length; i++) {
                    String source = sources[i];
                    SolrCore srcCore = it.handler.coreContainer.getCore(source);
                    if (srcCore == null)
                        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core: " + source + " does not exist");
                    sourceCores.add(srcCore);
                }
            } else {
                DirectoryFactory dirFactory = core.getDirectoryFactory();
                for (int i = 0; i < dirNames.length; i++) {
                    boolean markAsDone = false;
                    if (dirFactory instanceof CachingDirectoryFactory) {
                        if (!((CachingDirectoryFactory) dirFactory).getLivePaths().contains(dirNames[i])) {
                            markAsDone = true;
                        }
                    }
                    Directory dir = dirFactory.get(dirNames[i], DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
                    dirsToBeReleased.put(dir, markAsDone);
                    // TODO: why doesn't this use the IR factory? what is going on here?
                    readersToBeClosed.add(DirectoryReader.open(dir));
                }
            }
            List<DirectoryReader> readers = null;
            if (readersToBeClosed.size() > 0) {
                readers = readersToBeClosed;
            } else {
                readers = Lists.newArrayList();
                for (SolrCore solrCore : sourceCores) {
                    // record the searchers so that we can decref
                    RefCounted<SolrIndexSearcher> searcher = solrCore.getSearcher();
                    searchers.add(searcher);
                    readers.add(searcher.get().getIndexReader());
                }
            }
            UpdateRequestProcessorChain processorChain = core.getUpdateProcessingChain(params.get(UpdateParams.UPDATE_CHAIN));
            wrappedReq = new LocalSolrQueryRequest(core, it.req.getParams());
            UpdateRequestProcessor processor = processorChain.createProcessor(wrappedReq, it.rsp);
            processor.processMergeIndexes(new MergeIndexesCommand(readers, it.req));
        } catch (Exception e) {
            // log and rethrow so that if the finally fails we don't lose the original problem
            log.error("ERROR executing merge:", e);
            throw e;
        } finally {
            for (RefCounted<SolrIndexSearcher> searcher : searchers) {
                if (searcher != null)
                    searcher.decref();
            }
            for (SolrCore solrCore : sourceCores) {
                if (solrCore != null)
                    solrCore.close();
            }
            IOUtils.closeWhileHandlingException(readersToBeClosed);
            Set<Map.Entry<Directory, Boolean>> entries = dirsToBeReleased.entrySet();
            for (Map.Entry<Directory, Boolean> entry : entries) {
                DirectoryFactory dirFactory = core.getDirectoryFactory();
                Directory dir = entry.getKey();
                boolean markAsDone = entry.getValue();
                if (markAsDone) {
                    dirFactory.doneWithDirectory(dir);
                }
                dirFactory.release(dir);
            }
            if (wrappedReq != null)
                wrappedReq.close();
            core.close();
        }
    }
}
Also used : HashMap(java.util.HashMap) SolrCore(org.apache.solr.core.SolrCore) UpdateRequestProcessorChain(org.apache.solr.update.processor.UpdateRequestProcessorChain) DirectoryFactory(org.apache.solr.core.DirectoryFactory) CachingDirectoryFactory(org.apache.solr.core.CachingDirectoryFactory) UpdateRequestProcessor(org.apache.solr.update.processor.UpdateRequestProcessor) SolrException(org.apache.solr.common.SolrException) Directory(org.apache.lucene.store.Directory) DirectoryReader(org.apache.lucene.index.DirectoryReader) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) SolrException(org.apache.solr.common.SolrException) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) MergeIndexesCommand(org.apache.solr.update.MergeIndexesCommand) RefCounted(org.apache.solr.util.RefCounted) SolrParams(org.apache.solr.common.params.SolrParams) CachingDirectoryFactory(org.apache.solr.core.CachingDirectoryFactory) HashMap(java.util.HashMap) Map(java.util.Map)

Example 30 with SolrQueryRequest

use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.

the class SplitOp method execute.

@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
    SolrParams params = it.req.getParams();
    List<DocRouter.Range> ranges = null;
    String[] pathsArr = params.getParams(PATH);
    // ranges=a-b,c-d,e-f
    String rangesStr = params.get(CoreAdminParams.RANGES);
    if (rangesStr != null) {
        String[] rangesArr = rangesStr.split(",");
        if (rangesArr.length == 0) {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index");
        } else {
            ranges = new ArrayList<>(rangesArr.length);
            for (String r : rangesArr) {
                try {
                    ranges.add(DocRouter.DEFAULT.fromString(r));
                } catch (Exception e) {
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception parsing hexadecimal hash range: " + r, e);
                }
            }
        }
    }
    String splitKey = params.get("split.key");
    String[] newCoreNames = params.getParams("targetCore");
    String cname = params.get(CoreAdminParams.CORE, "");
    if ((pathsArr == null || pathsArr.length == 0) && (newCoreNames == null || newCoreNames.length == 0)) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified");
    }
    log.info("Invoked split action for core: " + cname);
    SolrCore core = it.handler.coreContainer.getCore(cname);
    SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
    List<SolrCore> newCores = null;
    try {
        // TODO: allow use of rangesStr in the future
        List<String> paths = null;
        int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length;
        DocRouter router = null;
        String routeFieldName = null;
        if (it.handler.coreContainer.isZooKeeperAware()) {
            ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
            String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
            DocCollection collection = clusterState.getCollection(collectionName);
            String sliceName = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
            Slice slice = collection.getSlice(sliceName);
            router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
            if (ranges == null) {
                DocRouter.Range currentRange = slice.getRange();
                ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null;
            }
            // for back-compat with Solr 4.4
            Object routerObj = collection.get(DOC_ROUTER);
            if (routerObj != null && routerObj instanceof Map) {
                Map routerProps = (Map) routerObj;
                routeFieldName = (String) routerProps.get("field");
            }
        }
        if (pathsArr == null) {
            newCores = new ArrayList<>(partitions);
            for (String newCoreName : newCoreNames) {
                SolrCore newcore = it.handler.coreContainer.getCore(newCoreName);
                if (newcore != null) {
                    newCores.add(newcore);
                } else {
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " expected but doesn't exist.");
                }
            }
        } else {
            paths = Arrays.asList(pathsArr);
        }
        SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router, routeFieldName, splitKey);
        core.getUpdateHandler().split(cmd);
    // After the split has completed, someone (here?) should start the process of replaying the buffered updates.
    } catch (Exception e) {
        log.error("ERROR executing split:", e);
        throw new RuntimeException(e);
    } finally {
        if (req != null)
            req.close();
        if (core != null)
            core.close();
        if (newCores != null) {
            for (SolrCore newCore : newCores) {
                newCore.close();
            }
        }
    }
}
Also used : SplitIndexCommand(org.apache.solr.update.SplitIndexCommand) ClusterState(org.apache.solr.common.cloud.ClusterState) SolrCore(org.apache.solr.core.SolrCore) SolrException(org.apache.solr.common.SolrException) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) Slice(org.apache.solr.common.cloud.Slice) DocRouter(org.apache.solr.common.cloud.DocRouter) SolrParams(org.apache.solr.common.params.SolrParams) DocCollection(org.apache.solr.common.cloud.DocCollection) Map(java.util.Map) SolrException(org.apache.solr.common.SolrException)

Aggregations

SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)362 LocalSolrQueryRequest (org.apache.solr.request.LocalSolrQueryRequest)148 Test (org.junit.Test)143 SolrQueryResponse (org.apache.solr.response.SolrQueryResponse)129 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)106 SolrCore (org.apache.solr.core.SolrCore)58 ArrayList (java.util.ArrayList)49 NamedList (org.apache.solr.common.util.NamedList)48 SolrInputDocument (org.apache.solr.common.SolrInputDocument)45 HashMap (java.util.HashMap)43 AddUpdateCommand (org.apache.solr.update.AddUpdateCommand)37 SolrParams (org.apache.solr.common.params.SolrParams)36 SolrException (org.apache.solr.common.SolrException)34 IOException (java.io.IOException)24 Query (org.apache.lucene.search.Query)24 BufferingRequestProcessor (org.apache.solr.update.processor.BufferingRequestProcessor)24 List (java.util.List)23 MapSolrParams (org.apache.solr.common.params.MapSolrParams)23 ContentStreamBase (org.apache.solr.common.util.ContentStreamBase)23 Map (java.util.Map)22