use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class DocumentAnalysisRequestHandler method handleAnalysisRequest.
/**
* Handles the resolved {@link DocumentAnalysisRequest} and returns the analysis response as a named list.
*
* @param request The {@link DocumentAnalysisRequest} to be handled.
* @param schema The index schema.
*
* @return The analysis response as a named list.
*/
NamedList<Object> handleAnalysisRequest(DocumentAnalysisRequest request, IndexSchema schema) {
SchemaField uniqueKeyField = schema.getUniqueKeyField();
NamedList<Object> result = new SimpleOrderedMap<>();
for (SolrInputDocument document : request.getDocuments()) {
NamedList<NamedList> theTokens = new SimpleOrderedMap<>();
result.add(document.getFieldValue(uniqueKeyField.getName()).toString(), theTokens);
for (String name : document.getFieldNames()) {
// there's no point of providing analysis to unindexed fields.
SchemaField field = schema.getField(name);
if (!field.indexed()) {
continue;
}
NamedList<Object> fieldTokens = new SimpleOrderedMap<>();
theTokens.add(name, fieldTokens);
FieldType fieldType = schema.getFieldType(name);
final String queryValue = request.getQuery();
Set<BytesRef> termsToMatch;
try {
termsToMatch = (queryValue != null && request.isShowMatch()) ? getQueryTokenSet(queryValue, fieldType.getQueryAnalyzer()) : EMPTY_BYTES_SET;
} catch (Exception e) {
// ignore analysis exceptions since we are applying arbitrary text to all fields
termsToMatch = EMPTY_BYTES_SET;
}
if (request.getQuery() != null) {
try {
AnalysisContext analysisContext = new AnalysisContext(fieldType, fieldType.getQueryAnalyzer(), EMPTY_BYTES_SET);
fieldTokens.add("query", analyzeValue(request.getQuery(), analysisContext));
} catch (Exception e) {
// ignore analysis exceptions since we are applying arbitrary text to all fields
}
}
Analyzer analyzer = fieldType.getIndexAnalyzer();
AnalysisContext analysisContext = new AnalysisContext(fieldType, analyzer, termsToMatch);
Collection<Object> fieldValues = document.getFieldValues(name);
NamedList<NamedList<? extends Object>> indexTokens = new SimpleOrderedMap<>();
for (Object fieldValue : fieldValues) {
indexTokens.add(String.valueOf(fieldValue), analyzeValue(fieldValue.toString(), analysisContext));
}
fieldTokens.add("index", indexTokens);
}
}
return result;
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class UnifiedSolrHighlighter method getUniqueKeys.
/**
* Retrieves the unique keys for the topdocs to key the results
*/
protected String[] getUniqueKeys(SolrIndexSearcher searcher, int[] docIDs) throws IOException {
IndexSchema schema = searcher.getSchema();
SchemaField keyField = schema.getUniqueKeyField();
if (keyField != null) {
Set<String> selector = Collections.singleton(keyField.getName());
String[] uniqueKeys = new String[docIDs.length];
for (int i = 0; i < docIDs.length; i++) {
int docid = docIDs[i];
Document doc = searcher.doc(docid, selector);
String id = schema.printableUniqueKey(doc);
uniqueKeys[i] = id;
}
return uniqueKeys;
} else {
return new String[docIDs.length];
}
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class SimpleFacets method getListedTermCounts.
/**
* Computes the term->count counts for the specified term values relative to the
* @param field the name of the field to compute term counts against
* @param parsed contains the docset to compute term counts relative to
* @param terms a list of term values (in the specified field) to compute the counts for
*/
protected NamedList<Integer> getListedTermCounts(String field, final ParsedParams parsed, List<String> terms) throws IOException {
SchemaField sf = searcher.getSchema().getField(field);
FieldType ft = sf.getType();
NamedList<Integer> res = new NamedList<>();
for (String term : terms) {
int count = searcher.numDocs(ft.getFieldQuery(null, sf, term), parsed.docs);
res.add(term, count);
}
return res;
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class SimpleFacets method getFacetIntervalCounts.
/**
* Returns a <code>NamedList</code> with each entry having the "key" of the interval as name and the count of docs
* in that interval as value. All intervals added in the request are included in the returned
* <code>NamedList</code> (included those with 0 count), and it's required that the order of the intervals
* is deterministic and equals in all shards of a distributed request, otherwise the collation of results
* will fail.
*
*/
public NamedList<Object> getFacetIntervalCounts() throws IOException, SyntaxError {
NamedList<Object> res = new SimpleOrderedMap<Object>();
String[] fields = global.getParams(FacetParams.FACET_INTERVAL);
if (fields == null || fields.length == 0)
return res;
for (String field : fields) {
final ParsedParams parsed = parseParams(FacetParams.FACET_INTERVAL, field);
String[] intervalStrs = parsed.required.getFieldParams(parsed.facetValue, FacetParams.FACET_INTERVAL_SET);
SchemaField schemaField = searcher.getCore().getLatestSchema().getField(parsed.facetValue);
if (parsed.params.getBool(GroupParams.GROUP_FACET, false)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Interval Faceting can't be used with " + GroupParams.GROUP_FACET);
}
if (schemaField.getType().isPointField() && !schemaField.hasDocValues()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't use interval faceting on a PointField without docValues");
}
SimpleOrderedMap<Integer> fieldResults = new SimpleOrderedMap<Integer>();
res.add(parsed.key, fieldResults);
IntervalFacets intervalFacets = new IntervalFacets(schemaField, searcher, parsed.docs, intervalStrs, parsed.params);
for (FacetInterval interval : intervalFacets) {
fieldResults.add(interval.getKey(), interval.getCount());
}
}
return res;
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class TextResponseWriter method writeVal.
public final void writeVal(String name, Object val) throws IOException {
// go in order of most common to least common
if (val == null) {
writeNull(name);
} else if (val instanceof String) {
writeStr(name, val.toString(), true);
// micro-optimization... using toString() avoids a cast first
} else if (val instanceof IndexableField) {
IndexableField f = (IndexableField) val;
SchemaField sf = schema.getFieldOrNull(f.name());
if (sf != null) {
sf.getType().write(this, name, f);
} else {
writeStr(name, f.stringValue(), true);
}
} else if (val instanceof Number) {
writeNumber(name, (Number) val);
} else if (val instanceof Boolean) {
writeBool(name, (Boolean) val);
} else if (val instanceof Date) {
writeDate(name, (Date) val);
} else if (val instanceof Document) {
SolrDocument doc = DocsStreamer.convertLuceneDocToSolrDoc((Document) val, schema);
writeSolrDocument(name, doc, returnFields, 0);
} else if (val instanceof SolrDocument) {
writeSolrDocument(name, (SolrDocument) val, returnFields, 0);
} else if (val instanceof ResultContext) {
// requires access to IndexReader
writeDocuments(name, (ResultContext) val);
} else if (val instanceof DocList) {
// Should not happen normally
ResultContext ctx = new BasicResultContext((DocList) val, returnFields, null, null, req);
writeDocuments(name, ctx);
// }
// else if (val instanceof DocSet) {
// how do we know what fields to read?
// todo: have a DocList/DocSet wrapper that
// restricts the fields to write...?
} else if (val instanceof SolrDocumentList) {
writeSolrDocumentList(name, (SolrDocumentList) val, returnFields);
} else if (val instanceof Map) {
writeMap(name, (Map) val, false, true);
} else if (val instanceof NamedList) {
writeNamedList(name, (NamedList) val);
} else if (val instanceof Path) {
writeStr(name, ((Path) val).toAbsolutePath().toString(), true);
} else if (val instanceof IteratorWriter) {
writeIterator((IteratorWriter) val);
} else if (val instanceof Iterable) {
writeArray(name, ((Iterable) val).iterator());
} else if (val instanceof Object[]) {
writeArray(name, (Object[]) val);
} else if (val instanceof Iterator) {
writeArray(name, (Iterator) val);
} else if (val instanceof byte[]) {
byte[] arr = (byte[]) val;
writeByteArr(name, arr, 0, arr.length);
} else if (val instanceof BytesRef) {
BytesRef arr = (BytesRef) val;
writeByteArr(name, arr.bytes, arr.offset, arr.length);
} else if (val instanceof EnumFieldValue) {
writeStr(name, val.toString(), true);
} else if (val instanceof WriteableValue) {
((WriteableValue) val).write(name, this);
} else if (val instanceof MapWriter) {
writeMap((MapWriter) val);
} else if (val instanceof MapSerializable) {
//todo find a better way to reuse the map more efficiently
writeMap(name, ((MapSerializable) val).toMap(new LinkedHashMap<>()), false, true);
} else {
// default... for debugging only
writeStr(name, val.getClass().getName() + ':' + val.toString(), true);
}
}
Aggregations