Search in sources :

Example 41 with SchemaField

use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.

the class SortSpecParsing method parseSortSpecImpl.

private static SortSpec parseSortSpecImpl(String sortSpec, IndexSchema schema, SolrQueryRequest optionalReq) {
    if (sortSpec == null || sortSpec.length() == 0)
        return newEmptySortSpec();
    List<SortField> sorts = new ArrayList<>(4);
    List<SchemaField> fields = new ArrayList<>(4);
    try {
        StrParser sp = new StrParser(sortSpec);
        while (sp.pos < sp.end) {
            sp.eatws();
            final int start = sp.pos;
            // short circuit test for a really simple field name
            String field = sp.getId(null);
            Exception qParserException = null;
            if ((field == null || !Character.isWhitespace(sp.peekChar())) && (optionalReq != null)) {
                // let's try it as a function instead
                field = null;
                String funcStr = sp.val.substring(start);
                QParser parser = QParser.getParser(funcStr, FunctionQParserPlugin.NAME, optionalReq);
                Query q = null;
                try {
                    if (parser instanceof FunctionQParser) {
                        FunctionQParser fparser = (FunctionQParser) parser;
                        fparser.setParseMultipleSources(false);
                        fparser.setParseToEnd(false);
                        q = fparser.getQuery();
                        if (fparser.localParams != null) {
                            if (fparser.valFollowedParams) {
                                // need to find the end of the function query via the string parser
                                int leftOver = fparser.sp.end - fparser.sp.pos;
                                // reset our parser to the same amount of leftover
                                sp.pos = sp.end - leftOver;
                            } else {
                                // the value was via the "v" param in localParams, so we need to find
                                // the end of the local params themselves to pick up where we left off
                                sp.pos = start + fparser.localParamsEnd;
                            }
                        } else {
                            // need to find the end of the function query via the string parser
                            int leftOver = fparser.sp.end - fparser.sp.pos;
                            // reset our parser to the same amount of leftover
                            sp.pos = sp.end - leftOver;
                        }
                    } else {
                        // A QParser that's not for function queries.
                        // It must have been specified via local params.
                        q = parser.getQuery();
                        assert parser.getLocalParams() != null;
                        sp.pos = start + parser.localParamsEnd;
                    }
                    Boolean top = sp.getSortDirection();
                    if (null != top) {
                        // we have a Query and a valid direction
                        if (q instanceof FunctionQuery) {
                            sorts.add(((FunctionQuery) q).getValueSource().getSortField(top));
                        } else {
                            sorts.add((new QueryValueSource(q, 0.0f)).getSortField(top));
                        }
                        fields.add(null);
                        continue;
                    }
                } catch (Exception e) {
                    // hang onto this in case the string isn't a full field name either
                    qParserException = e;
                }
            }
            if (field == null) {
                // try again, simple rules for a field name with no whitespace
                sp.pos = start;
                field = sp.getSimpleString();
            }
            Boolean top = sp.getSortDirection();
            if (null == top) {
                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't determine a Sort Order (asc or desc) in sort spec " + sp);
            }
            if (SCORE.equals(field)) {
                if (top) {
                    sorts.add(SortField.FIELD_SCORE);
                } else {
                    sorts.add(new SortField(null, SortField.Type.SCORE, true));
                }
                fields.add(null);
            } else if (DOCID.equals(field)) {
                sorts.add(new SortField(null, SortField.Type.DOC, top));
                fields.add(null);
            } else {
                // try to find the field
                SchemaField sf = schema.getFieldOrNull(field);
                if (null == sf) {
                    if (null != qParserException) {
                        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sort param could not be parsed as a query, and is not a " + "field that exists in the index: " + field, qParserException);
                    }
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "sort param field can't be found: " + field);
                }
                sorts.add(sf.getSortField(top));
                fields.add(sf);
            }
        }
    } catch (SyntaxError e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "error in sort: " + sortSpec, e);
    }
    // normalize a sort on score desc to null
    if (sorts.size() == 1 && sorts.get(0) == SortField.FIELD_SCORE) {
        return newEmptySortSpec();
    }
    Sort s = new Sort(sorts.toArray(new SortField[sorts.size()]));
    return new SortSpec(s, fields);
}
Also used : Query(org.apache.lucene.search.Query) FunctionQuery(org.apache.lucene.queries.function.FunctionQuery) ArrayList(java.util.ArrayList) SortField(org.apache.lucene.search.SortField) SolrException(org.apache.solr.common.SolrException) SchemaField(org.apache.solr.schema.SchemaField) FunctionQuery(org.apache.lucene.queries.function.FunctionQuery) Sort(org.apache.lucene.search.Sort) QueryValueSource(org.apache.lucene.queries.function.valuesource.QueryValueSource) SolrException(org.apache.solr.common.SolrException)

Example 42 with SchemaField

use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.

the class SpatialFilterQParser method parse.

@Override
public Query parse() throws SyntaxError {
    //if more than one, we need to treat them as a point...
    //TODO: Should we accept multiple fields
    String[] fields = localParams.getParams("f");
    if (fields == null || fields.length == 0) {
        String field = getParam(SpatialParams.FIELD);
        if (field == null)
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, " missing sfield for spatial request");
        fields = new String[] { field };
    }
    String pointStr = getParam(SpatialParams.POINT);
    if (pointStr == null) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, SpatialParams.POINT + " missing.");
    }
    double dist = -1;
    String distS = getParam(SpatialParams.DISTANCE);
    if (distS != null)
        dist = Double.parseDouble(distS);
    if (dist < 0) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, SpatialParams.DISTANCE + " must be >= 0");
    }
    String measStr = localParams.get(SpatialParams.MEASURE);
    //TODO: Need to do something with Measures
    Query result = null;
    //fields is valid at this point
    if (fields.length == 1) {
        SchemaField sf = req.getSchema().getField(fields[0]);
        FieldType type = sf.getType();
        if (type instanceof SpatialQueryable) {
            SpatialQueryable queryable = ((SpatialQueryable) type);
            double radius = localParams.getDouble(SpatialParams.SPHERE_RADIUS, queryable.getSphereRadius());
            SpatialOptions opts = new SpatialOptions(pointStr, dist, sf, measStr, radius);
            opts.bbox = bbox;
            result = queryable.createSpatialQuery(this, opts);
        } else {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The field " + fields[0] + " does not support spatial filtering");
        }
    } else {
    // fields.length > 1
    //TODO: Not sure about this just yet, is there a way to delegate, or do we just have a helper class?
    //Seems like we could just use FunctionQuery, but then what about scoring
    /*List<ValueSource> sources = new ArrayList<ValueSource>(fields.length);
      for (String field : fields) {
        SchemaField sf = schema.getField(field);
        sources.add(sf.getType().getValueSource(sf, this));
      }
      MultiValueSource vs = new VectorValueSource(sources);
      ValueSourceRangeFilter rf = new ValueSourceRangeFilter(vs, "0", String.valueOf(dist), true, true);
      result = new SolrConstantScoreQuery(rf);*/
    }
    return result;
}
Also used : SchemaField(org.apache.solr.schema.SchemaField) Query(org.apache.lucene.search.Query) SpatialQueryable(org.apache.solr.schema.SpatialQueryable) SolrException(org.apache.solr.common.SolrException) FieldType(org.apache.solr.schema.FieldType)

Example 43 with SchemaField

use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.

the class XLSXWriter method writeResponse.

public void writeResponse(OutputStream out, LinkedHashMap<String, String> colNamesMap, LinkedHashMap<String, Integer> colWidthsMap) throws IOException {
    SolrParams params = req.getParams();
    Collection<String> fields = returnFields.getRequestedFieldNames();
    Object responseObj = rsp.getValues().get("response");
    boolean returnOnlyStored = false;
    if (fields == null || returnFields.hasPatternMatching()) {
        if (responseObj instanceof SolrDocumentList) {
            // get the list of fields from the SolrDocumentList
            if (fields == null) {
                fields = new LinkedHashSet<String>();
            }
            for (SolrDocument sdoc : (SolrDocumentList) responseObj) {
                fields.addAll(sdoc.getFieldNames());
            }
        } else {
            // get the list of fields from the index
            Iterable<String> all = req.getSearcher().getFieldNames();
            if (fields == null) {
                fields = Sets.newHashSet(all);
            } else {
                Iterables.addAll(fields, all);
            }
        }
        if (returnFields.wantsScore()) {
            fields.add("score");
        } else {
            fields.remove("score");
        }
        returnOnlyStored = true;
    }
    for (String field : fields) {
        if (!returnFields.wantsField(field)) {
            continue;
        }
        if (field.equals("score")) {
            XLField xlField = new XLField();
            xlField.name = "score";
            xlFields.put("score", xlField);
            continue;
        }
        SchemaField sf = schema.getFieldOrNull(field);
        if (sf == null) {
            FieldType ft = new StrField();
            sf = new SchemaField(field, ft);
        }
        // Return only stored fields, unless an explicit field list is specified
        if (returnOnlyStored && sf != null && !sf.stored()) {
            continue;
        }
        XLField xlField = new XLField();
        xlField.name = field;
        xlField.sf = sf;
        xlFields.put(field, xlField);
    }
    wb.addRow();
    //write header
    for (XLField xlField : xlFields.values()) {
        String printName = xlField.name;
        int colWidth = 14;
        String niceName = colNamesMap.get(xlField.name);
        if (niceName != null) {
            printName = niceName;
        }
        Integer niceWidth = colWidthsMap.get(xlField.name);
        if (niceWidth != null) {
            colWidth = niceWidth.intValue();
        }
        writeStr(xlField.name, printName, false);
        wb.setColWidth(colWidth);
        wb.setHeaderCell();
    }
    wb.setHeaderRow();
    wb.addRow();
    if (responseObj instanceof ResultContext) {
        writeDocuments(null, (ResultContext) responseObj);
    } else if (responseObj instanceof DocList) {
        ResultContext ctx = new BasicResultContext((DocList) responseObj, returnFields, null, null, req);
        writeDocuments(null, ctx);
    } else if (responseObj instanceof SolrDocumentList) {
        writeSolrDocumentList(null, (SolrDocumentList) responseObj, returnFields);
    }
    wb.flush(out);
    wb = null;
}
Also used : BasicResultContext(org.apache.solr.response.BasicResultContext) ResultContext(org.apache.solr.response.ResultContext) StrField(org.apache.solr.schema.StrField) SolrDocumentList(org.apache.solr.common.SolrDocumentList) FieldType(org.apache.solr.schema.FieldType) SchemaField(org.apache.solr.schema.SchemaField) BasicResultContext(org.apache.solr.response.BasicResultContext) SolrDocument(org.apache.solr.common.SolrDocument) SolrParams(org.apache.solr.common.params.SolrParams) DocList(org.apache.solr.search.DocList)

Example 44 with SchemaField

use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.

the class ExpandComponent method process.

@SuppressWarnings("unchecked")
@Override
public void process(ResponseBuilder rb) throws IOException {
    if (!rb.doExpand) {
        return;
    }
    SolrQueryRequest req = rb.req;
    SolrParams params = req.getParams();
    String field = params.get(ExpandParams.EXPAND_FIELD);
    String hint = null;
    if (field == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (q instanceof CollapsingQParserPlugin.CollapsingPostFilter) {
                    CollapsingQParserPlugin.CollapsingPostFilter cp = (CollapsingQParserPlugin.CollapsingPostFilter) q;
                    field = cp.getField();
                    hint = cp.hint;
                }
            }
        }
    }
    if (field == null) {
        throw new IOException("Expand field is null.");
    }
    String sortParam = params.get(ExpandParams.EXPAND_SORT);
    String[] fqs = params.getParams(ExpandParams.EXPAND_FQ);
    String qs = params.get(ExpandParams.EXPAND_Q);
    int limit = params.getInt(ExpandParams.EXPAND_ROWS, 5);
    Sort sort = null;
    if (sortParam != null) {
        sort = SortSpecParsing.parseSortSpec(sortParam, rb.req).getSort();
    }
    Query query;
    if (qs == null) {
        query = rb.getQuery();
    } else {
        try {
            QParser parser = QParser.getParser(qs, req);
            query = parser.getQuery();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }
    List<Query> newFilters = new ArrayList<>();
    if (fqs == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (!(q instanceof CollapsingQParserPlugin.CollapsingPostFilter)) {
                    newFilters.add(q);
                }
            }
        }
    } else {
        try {
            for (String fq : fqs) {
                if (fq != null && fq.trim().length() != 0 && !fq.equals("*:*")) {
                    QParser fqp = QParser.getParser(fq, req);
                    newFilters.add(fqp.getQuery());
                }
            }
        } catch (Exception e) {
            throw new IOException(e);
        }
    }
    SolrIndexSearcher searcher = req.getSearcher();
    LeafReader reader = searcher.getSlowAtomicReader();
    SchemaField schemaField = searcher.getSchema().getField(field);
    FieldType fieldType = schemaField.getType();
    SortedDocValues values = null;
    long nullValue = 0L;
    if (fieldType instanceof StrField) {
        //Get The Top Level SortedDocValues
        if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
            Map<String, UninvertingReader.Type> mapping = new HashMap();
            mapping.put(field, UninvertingReader.Type.SORTED);
            UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
            values = uninvertingReader.getSortedDocValues(field);
        } else {
            values = DocValues.getSorted(reader, field);
        }
    } else {
        //Get the nullValue for the numeric collapse field
        String defaultValue = searcher.getSchema().getField(field).getDefaultValue();
        final NumberType numType = fieldType.getNumberType();
        // we don't need to handle invalid 64-bit field types here.
        if (defaultValue != null) {
            if (numType == NumberType.INTEGER) {
                nullValue = Long.parseLong(defaultValue);
            } else if (numType == NumberType.FLOAT) {
                nullValue = Float.floatToIntBits(Float.parseFloat(defaultValue));
            }
        } else if (NumberType.FLOAT.equals(numType)) {
            // Integer case already handled by nullValue defaulting to 0
            nullValue = Float.floatToIntBits(0.0f);
        }
    }
    FixedBitSet groupBits = null;
    LongHashSet groupSet = null;
    DocList docList = rb.getResults().docList;
    IntHashSet collapsedSet = new IntHashSet(docList.size() * 2);
    //Gather the groups for the current page of documents
    DocIterator idit = docList.iterator();
    int[] globalDocs = new int[docList.size()];
    int docsIndex = -1;
    while (idit.hasNext()) {
        globalDocs[++docsIndex] = idit.nextDoc();
    }
    Arrays.sort(globalDocs);
    Query groupQuery = null;
    /*
    * This code gathers the group information for the current page.
    */
    List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
    if (contexts.size() == 0) {
        //When no context is available we can skip the expanding
        return;
    }
    int currentContext = 0;
    int currentDocBase = contexts.get(currentContext).docBase;
    int nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
    IntObjectHashMap<BytesRef> ordBytes = null;
    if (values != null) {
        groupBits = new FixedBitSet(values.getValueCount());
        MultiDocValues.OrdinalMap ordinalMap = null;
        SortedDocValues[] sortedDocValues = null;
        LongValues segmentOrdinalMap = null;
        SortedDocValues currentValues = null;
        if (values instanceof MultiDocValues.MultiSortedDocValues) {
            ordinalMap = ((MultiDocValues.MultiSortedDocValues) values).mapping;
            sortedDocValues = ((MultiDocValues.MultiSortedDocValues) values).values;
            currentValues = sortedDocValues[currentContext];
            segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
        }
        int count = 0;
        ordBytes = new IntObjectHashMap<>();
        for (int i = 0; i < globalDocs.length; i++) {
            int globalDoc = globalDocs[i];
            while (globalDoc >= nextDocBase) {
                currentContext++;
                currentDocBase = contexts.get(currentContext).docBase;
                nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
                if (ordinalMap != null) {
                    currentValues = sortedDocValues[currentContext];
                    segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
                }
            }
            int contextDoc = globalDoc - currentDocBase;
            if (ordinalMap != null) {
                if (contextDoc > currentValues.docID()) {
                    currentValues.advance(contextDoc);
                }
                if (contextDoc == currentValues.docID()) {
                    int ord = currentValues.ordValue();
                    ++count;
                    BytesRef ref = currentValues.lookupOrd(ord);
                    ord = (int) segmentOrdinalMap.get(ord);
                    ordBytes.put(ord, BytesRef.deepCopyOf(ref));
                    groupBits.set(ord);
                    collapsedSet.add(globalDoc);
                }
            } else {
                if (globalDoc > values.docID()) {
                    values.advance(globalDoc);
                }
                if (globalDoc == values.docID()) {
                    int ord = values.ordValue();
                    ++count;
                    BytesRef ref = values.lookupOrd(ord);
                    ordBytes.put(ord, BytesRef.deepCopyOf(ref));
                    groupBits.set(ord);
                    collapsedSet.add(globalDoc);
                }
            }
        }
        if (count > 0 && count < 200) {
            try {
                groupQuery = getGroupQuery(field, count, ordBytes);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }
    } else {
        groupSet = new LongHashSet(docList.size());
        NumericDocValues collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
        int count = 0;
        for (int i = 0; i < globalDocs.length; i++) {
            int globalDoc = globalDocs[i];
            while (globalDoc >= nextDocBase) {
                currentContext++;
                currentDocBase = contexts.get(currentContext).docBase;
                nextDocBase = currentContext + 1 < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
                collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
            }
            int contextDoc = globalDoc - currentDocBase;
            int valueDocID = collapseValues.docID();
            if (valueDocID < contextDoc) {
                valueDocID = collapseValues.advance(contextDoc);
            }
            long value;
            if (valueDocID == contextDoc) {
                value = collapseValues.longValue();
            } else {
                value = 0;
            }
            if (value != nullValue) {
                ++count;
                groupSet.add(value);
                collapsedSet.add(globalDoc);
            }
        }
        if (count > 0 && count < 200) {
            if (fieldType.isPointField()) {
                groupQuery = getPointGroupQuery(schemaField, count, groupSet);
            } else {
                groupQuery = getGroupQuery(field, fieldType, count, groupSet);
            }
        }
    }
    Collector collector;
    if (sort != null)
        sort = sort.rewrite(searcher);
    Collector groupExpandCollector = null;
    if (values != null) {
        //Get The Top Level SortedDocValues again so we can re-iterate:
        if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
            Map<String, UninvertingReader.Type> mapping = new HashMap();
            mapping.put(field, UninvertingReader.Type.SORTED);
            UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
            values = uninvertingReader.getSortedDocValues(field);
        } else {
            values = DocValues.getSorted(reader, field);
        }
        groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit, sort);
    } else {
        groupExpandCollector = new NumericGroupExpandCollector(field, nullValue, groupSet, collapsedSet, limit, sort);
    }
    if (groupQuery != null) {
        //Limits the results to documents that are in the same group as the documents in the page.
        newFilters.add(groupQuery);
    }
    SolrIndexSearcher.ProcessedFilter pfilter = searcher.getProcessedFilter(null, newFilters);
    if (pfilter.postFilter != null) {
        pfilter.postFilter.setLastDelegate(groupExpandCollector);
        collector = pfilter.postFilter;
    } else {
        collector = groupExpandCollector;
    }
    if (pfilter.filter == null) {
        searcher.search(query, collector);
    } else {
        Query q = new BooleanQuery.Builder().add(query, Occur.MUST).add(pfilter.filter, Occur.FILTER).build();
        searcher.search(q, collector);
    }
    LongObjectMap<Collector> groups = ((GroupCollector) groupExpandCollector).getGroups();
    NamedList outMap = new SimpleOrderedMap();
    CharsRefBuilder charsRef = new CharsRefBuilder();
    for (LongObjectCursor<Collector> cursor : groups) {
        long groupValue = cursor.key;
        TopDocsCollector<?> topDocsCollector = TopDocsCollector.class.cast(cursor.value);
        TopDocs topDocs = topDocsCollector.topDocs();
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if (scoreDocs.length > 0) {
            int[] docs = new int[scoreDocs.length];
            float[] scores = new float[scoreDocs.length];
            for (int i = 0; i < docs.length; i++) {
                ScoreDoc scoreDoc = scoreDocs[i];
                docs[i] = scoreDoc.doc;
                scores[i] = scoreDoc.score;
            }
            DocSlice slice = new DocSlice(0, docs.length, docs, scores, topDocs.totalHits, topDocs.getMaxScore());
            if (fieldType instanceof StrField) {
                final BytesRef bytesRef = ordBytes.get((int) groupValue);
                fieldType.indexedToReadable(bytesRef, charsRef);
                String group = charsRef.toString();
                outMap.add(group, slice);
            } else {
                outMap.add(numericToString(fieldType, groupValue), slice);
            }
        }
    }
    rb.rsp.add("expanded", outMap);
}
Also used : StrField(org.apache.solr.schema.StrField) BooleanQuery(org.apache.lucene.search.BooleanQuery) Query(org.apache.lucene.search.Query) TermInSetQuery(org.apache.lucene.search.TermInSetQuery) SolrConstantScoreQuery(org.apache.solr.search.SolrConstantScoreQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) HashMap(java.util.HashMap) LongObjectHashMap(com.carrotsearch.hppc.LongObjectHashMap) IntObjectHashMap(com.carrotsearch.hppc.IntObjectHashMap) ArrayList(java.util.ArrayList) IntHashSet(com.carrotsearch.hppc.IntHashSet) MultiDocValues(org.apache.lucene.index.MultiDocValues) DocSlice(org.apache.solr.search.DocSlice) ScoreDoc(org.apache.lucene.search.ScoreDoc) FixedBitSet(org.apache.lucene.util.FixedBitSet) Sort(org.apache.lucene.search.Sort) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) CharsRefBuilder(org.apache.lucene.util.CharsRefBuilder) SortedDocValues(org.apache.lucene.index.SortedDocValues) LongHashSet(com.carrotsearch.hppc.LongHashSet) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) NumberType(org.apache.solr.schema.NumberType) QParser(org.apache.solr.search.QParser) SolrParams(org.apache.solr.common.params.SolrParams) NumericDocValues(org.apache.lucene.index.NumericDocValues) DocIterator(org.apache.solr.search.DocIterator) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) UninvertingReader(org.apache.solr.uninverting.UninvertingReader) TopDocs(org.apache.lucene.search.TopDocs) TopFieldCollector(org.apache.lucene.search.TopFieldCollector) LeafCollector(org.apache.lucene.search.LeafCollector) Collector(org.apache.lucene.search.Collector) TopScoreDocCollector(org.apache.lucene.search.TopScoreDocCollector) TopDocsCollector(org.apache.lucene.search.TopDocsCollector) BytesRef(org.apache.lucene.util.BytesRef) LeafReader(org.apache.lucene.index.LeafReader) FilterLeafReader(org.apache.lucene.index.FilterLeafReader) NamedList(org.apache.solr.common.util.NamedList) IOException(java.io.IOException) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) IOException(java.io.IOException) FieldType(org.apache.solr.schema.FieldType) CollapsingQParserPlugin(org.apache.solr.search.CollapsingQParserPlugin) SchemaField(org.apache.solr.schema.SchemaField) NumberType(org.apache.solr.schema.NumberType) FieldType(org.apache.solr.schema.FieldType) DocValuesType(org.apache.lucene.index.DocValuesType) LongValues(org.apache.lucene.util.LongValues) DocList(org.apache.solr.search.DocList)

Example 45 with SchemaField

use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.

the class LukeRequestHandler method getSchemaInfo.

/**
   * Return info from the index
   */
private static SimpleOrderedMap<Object> getSchemaInfo(IndexSchema schema) {
    Map<String, List<String>> typeusemap = new TreeMap<>();
    Map<String, Object> fields = new TreeMap<>();
    SchemaField uniqueField = schema.getUniqueKeyField();
    for (SchemaField f : schema.getFields().values()) {
        populateFieldInfo(schema, typeusemap, fields, uniqueField, f);
    }
    Map<String, Object> dynamicFields = new TreeMap<>();
    for (SchemaField f : schema.getDynamicFieldPrototypes()) {
        populateFieldInfo(schema, typeusemap, dynamicFields, uniqueField, f);
    }
    SimpleOrderedMap<Object> types = new SimpleOrderedMap<>();
    Map<String, FieldType> sortedTypes = new TreeMap<>(schema.getFieldTypes());
    for (FieldType ft : sortedTypes.values()) {
        SimpleOrderedMap<Object> field = new SimpleOrderedMap<>();
        field.add("fields", typeusemap.get(ft.getTypeName()));
        field.add("tokenized", ft.isTokenized());
        field.add("className", ft.getClass().getName());
        field.add("indexAnalyzer", getAnalyzerInfo(ft.getIndexAnalyzer()));
        field.add("queryAnalyzer", getAnalyzerInfo(ft.getQueryAnalyzer()));
        field.add("similarity", getSimilarityInfo(ft.getSimilarity()));
        types.add(ft.getTypeName(), field);
    }
    // Must go through this to maintain binary compatbility. Putting a TreeMap into a resp leads to casting errors
    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
    SimpleOrderedMap<Object> fieldsSimple = new SimpleOrderedMap<>();
    for (Map.Entry<String, Object> ent : fields.entrySet()) {
        fieldsSimple.add(ent.getKey(), ent.getValue());
    }
    finfo.add("fields", fieldsSimple);
    SimpleOrderedMap<Object> dynamicSimple = new SimpleOrderedMap<>();
    for (Map.Entry<String, Object> ent : dynamicFields.entrySet()) {
        dynamicSimple.add(ent.getKey(), ent.getValue());
    }
    finfo.add("dynamicFields", dynamicSimple);
    finfo.add("uniqueKeyField", null == uniqueField ? null : uniqueField.getName());
    finfo.add("similarity", getSimilarityInfo(schema.getSimilarity()));
    finfo.add("types", types);
    return finfo;
}
Also used : TreeMap(java.util.TreeMap) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) FieldType(org.apache.solr.schema.FieldType) SchemaField(org.apache.solr.schema.SchemaField) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) NamedList(org.apache.solr.common.util.NamedList) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Aggregations

SchemaField (org.apache.solr.schema.SchemaField)182 SolrException (org.apache.solr.common.SolrException)48 ArrayList (java.util.ArrayList)42 FieldType (org.apache.solr.schema.FieldType)41 IndexSchema (org.apache.solr.schema.IndexSchema)35 NamedList (org.apache.solr.common.util.NamedList)29 Query (org.apache.lucene.search.Query)23 IOException (java.io.IOException)22 BytesRef (org.apache.lucene.util.BytesRef)21 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)21 SolrIndexSearcher (org.apache.solr.search.SolrIndexSearcher)21 Document (org.apache.lucene.document.Document)20 SolrParams (org.apache.solr.common.params.SolrParams)19 IndexableField (org.apache.lucene.index.IndexableField)18 HashMap (java.util.HashMap)17 SolrInputDocument (org.apache.solr.common.SolrInputDocument)16 SolrDocument (org.apache.solr.common.SolrDocument)15 SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)15 Map (java.util.Map)14 Term (org.apache.lucene.index.Term)14