use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class RangeFacetProcessor method getFacetRangeCountsDocValues.
private <T extends Comparable<T>> NamedList<Object> getFacetRangeCountsDocValues(RangeFacetRequest rfr) throws IOException, SyntaxError {
SchemaField sf = rfr.getSchemaField();
final NamedList<Object> res = new SimpleOrderedMap<>();
final NamedList<Integer> counts = new NamedList<>();
res.add("counts", counts);
ArrayList<IntervalFacets.FacetInterval> intervals = new ArrayList<>();
// explicitly return the gap. compute this early so we are more
// likely to catch parse errors before attempting math
res.add("gap", rfr.getGapObj());
final int minCount = rfr.getMinCount();
boolean includeBefore = false;
boolean includeBetween = false;
boolean includeAfter = false;
Set<FacetRangeOther> others = rfr.getOthers();
// anything if "none" is specified.
if (!others.contains(FacetRangeOther.NONE)) {
if (others.contains(FacetRangeOther.ALL) || others.contains(FacetRangeOther.BEFORE)) {
// We'll add an interval later in this position
intervals.add(null);
includeBefore = true;
}
if (others.contains(FacetRangeOther.ALL) || others.contains(FacetRangeOther.BETWEEN)) {
// We'll add an interval later in this position
intervals.add(null);
includeBetween = true;
}
if (others.contains(FacetRangeOther.ALL) || others.contains(FacetRangeOther.AFTER)) {
includeAfter = true;
}
}
IntervalFacets.FacetInterval after = null;
for (RangeFacetRequest.FacetRange range : rfr.getFacetRanges()) {
try {
FacetRangeOther other = FacetRangeOther.get(range.name);
if (other != null) {
switch(other) {
case BEFORE:
assert range.lower == null;
intervals.set(0, new IntervalFacets.FacetInterval(sf, "*", range.upper, range.includeLower, range.includeUpper, FacetRangeOther.BEFORE.toString()));
break;
case AFTER:
assert range.upper == null;
after = new IntervalFacets.FacetInterval(sf, range.lower, "*", range.includeLower, range.includeUpper, FacetRangeOther.AFTER.toString());
break;
case BETWEEN:
intervals.set(includeBefore ? 1 : 0, new IntervalFacets.FacetInterval(sf, range.lower, range.upper, range.includeLower, range.includeUpper, FacetRangeOther.BETWEEN.toString()));
break;
}
}
continue;
} catch (SolrException e) {
// safe to ignore
}
intervals.add(new IntervalFacets.FacetInterval(sf, range.lower, range.upper, range.includeLower, range.includeUpper, range.lower));
}
if (includeAfter) {
assert after != null;
intervals.add(after);
}
IntervalFacets.FacetInterval[] intervalsArray = intervals.toArray(new IntervalFacets.FacetInterval[intervals.size()]);
// don't use the ArrayList anymore
intervals = null;
new IntervalFacets(sf, searcher, computeDocSet(docsOrig, rfr.getExcludeTags()), intervalsArray);
int intervalIndex = 0;
int lastIntervalIndex = intervalsArray.length - 1;
// response named list instead of with the counts
if (includeBefore) {
res.add(intervalsArray[intervalIndex].getKey(), intervalsArray[intervalIndex].getCount());
intervalIndex++;
}
// "BEFORE" was also requested). Needs to be added to the response named list instead of with the counts
if (includeBetween) {
res.add(intervalsArray[intervalIndex].getKey(), intervalsArray[intervalIndex].getCount());
intervalIndex++;
}
// Needs to be added to the response named list instead of with the counts
if (includeAfter) {
res.add(intervalsArray[lastIntervalIndex].getKey(), intervalsArray[lastIntervalIndex].getCount());
lastIntervalIndex--;
}
// now add all other intervals to the counts NL
while (intervalIndex <= lastIntervalIndex) {
IntervalFacets.FacetInterval interval = intervalsArray[intervalIndex];
if (interval.getCount() >= minCount) {
counts.add(interval.getKey(), interval.getCount());
}
intervalIndex++;
}
res.add("start", rfr.getStartObj());
res.add("end", rfr.getEndObj());
return res;
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class RangeFacetProcessor method getFacetRangeCounts.
/**
* Returns a list of value constraints and the associated facet counts
* for each facet range specified by the given {@link RangeFacetRequest}
*/
public void getFacetRangeCounts(RangeFacetRequest rangeFacetRequest, NamedList<Object> resOuter) throws IOException, SyntaxError {
final IndexSchema schema = searcher.getSchema();
final String key = rangeFacetRequest.getKey();
final String f = rangeFacetRequest.facetOn;
FacetRangeMethod method = rangeFacetRequest.getMethod();
final SchemaField sf = schema.getField(f);
final FieldType ft = sf.getType();
if (method.equals(FacetRangeMethod.DV)) {
assert ft instanceof TrieField || ft.isPointField();
resOuter.add(key, getFacetRangeCountsDocValues(rangeFacetRequest));
} else {
resOuter.add(key, getFacetRangeCounts(rangeFacetRequest));
}
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class RealTimeGetComponent method getInputDocument.
/**
* Obtains the latest document for a given id from the tlog or through the realtime searcher (if not found in the tlog).
* @param versionReturned If a non-null AtomicLong is passed in, it is set to the version of the update returned from the TLog.
* @param avoidRetrievingStoredFields Setting this to true avoids fetching stored fields through the realtime searcher,
* however has no effect on documents obtained from the tlog.
* Non-stored docValues fields are populated anyway, and are not affected by this parameter. Note that if
* the id field is a stored field, it will not be populated if this parameter is true and the document is
* obtained from the index.
* @param onlyTheseNonStoredDVs If not-null, populate only these DV fields in the document fetched through the realtime searcher.
* If this is null, decorate all non-stored DVs (that are not targets of copy fields) from the searcher.
* @param resolveFullDocument In case the document is fetched from the tlog, it could only be a partial document if the last update
* was an in-place update. In that case, should this partial document be resolved to a full document (by following
* back prevPointer/prevVersion)?
*/
public static SolrInputDocument getInputDocument(SolrCore core, BytesRef idBytes, AtomicLong versionReturned, boolean avoidRetrievingStoredFields, Set<String> onlyTheseNonStoredDVs, boolean resolveFullDocument) throws IOException {
SolrInputDocument sid = null;
RefCounted<SolrIndexSearcher> searcherHolder = null;
try {
SolrIndexSearcher searcher = null;
sid = getInputDocumentFromTlog(core, idBytes, versionReturned, onlyTheseNonStoredDVs, resolveFullDocument);
if (sid == DELETED) {
return null;
}
if (sid == null) {
// didn't find it in the update log, so it should be in the newest searcher opened
if (searcher == null) {
searcherHolder = core.getRealtimeSearcher();
searcher = searcherHolder.get();
}
// SolrCore.verbose("RealTimeGet using searcher ", searcher);
SchemaField idField = core.getLatestSchema().getUniqueKeyField();
int docid = searcher.getFirstMatch(new Term(idField.getName(), idBytes));
if (docid < 0)
return null;
SolrDocumentFetcher docFetcher = searcher.getDocFetcher();
if (avoidRetrievingStoredFields) {
sid = new SolrInputDocument();
} else {
Document luceneDocument = docFetcher.doc(docid);
sid = toSolrInputDocument(luceneDocument, core.getLatestSchema());
}
if (onlyTheseNonStoredDVs != null) {
docFetcher.decorateDocValueFields(sid, docid, onlyTheseNonStoredDVs);
} else {
docFetcher.decorateDocValueFields(sid, docid, docFetcher.getNonStoredDVsWithoutCopyTargets());
}
}
} finally {
if (searcherHolder != null) {
searcherHolder.decref();
}
}
if (versionReturned != null) {
if (sid.containsKey(VERSION_FIELD)) {
versionReturned.set((long) sid.getFieldValue(VERSION_FIELD));
}
}
return sid;
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class TopGroupsResultTransformer method serializeTopDocs.
protected NamedList serializeTopDocs(QueryCommandResult result) throws IOException {
NamedList<Object> queryResult = new NamedList<>();
queryResult.add("matches", result.getMatches());
queryResult.add("totalHits", result.getTopDocs().totalHits);
// debug: assert !Float.isNaN(result.getTopDocs().getMaxScore()) == rb.getGroupingSpec().isNeedScore();
if (!Float.isNaN(result.getTopDocs().getMaxScore())) {
queryResult.add("maxScore", result.getTopDocs().getMaxScore());
}
List<NamedList> documents = new ArrayList<>();
queryResult.add("documents", documents);
final IndexSchema schema = rb.req.getSearcher().getSchema();
SchemaField uniqueField = schema.getUniqueKeyField();
for (ScoreDoc scoreDoc : result.getTopDocs().scoreDocs) {
NamedList<Object> document = new NamedList<>();
documents.add(document);
Document doc = retrieveDocument(uniqueField, scoreDoc.doc);
document.add(ID, uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
if (!Float.isNaN(scoreDoc.score)) {
document.add("score", scoreDoc.score);
}
if (!FieldDoc.class.isInstance(scoreDoc)) {
// thus don't add sortValues below
continue;
}
FieldDoc fieldDoc = (FieldDoc) scoreDoc;
Object[] convertedSortValues = new Object[fieldDoc.fields.length];
for (int j = 0; j < fieldDoc.fields.length; j++) {
Object sortValue = fieldDoc.fields[j];
Sort groupSort = rb.getGroupingSpec().getGroupSort();
SchemaField field = groupSort.getSort()[j].getField() != null ? schema.getFieldOrNull(groupSort.getSort()[j].getField()) : null;
convertedSortValues[j] = ShardResultTransformerUtils.marshalSortValue(sortValue, field);
}
document.add("sortValues", convertedSortValues);
}
return queryResult;
}
use of org.apache.solr.schema.SchemaField in project lucene-solr by apache.
the class TopGroupsResultTransformer method transformToNativeShardDoc.
protected ScoreDoc[] transformToNativeShardDoc(List<NamedList<Object>> documents, Sort groupSort, String shard, IndexSchema schema) {
ScoreDoc[] scoreDocs = new ScoreDoc[documents.size()];
int j = 0;
for (NamedList<Object> document : documents) {
Object docId = document.get(ID);
if (docId != null) {
docId = docId.toString();
} else {
log.error("doc {} has null 'id'", document);
}
Float score = (Float) document.get("score");
if (score == null) {
score = Float.NaN;
}
Object[] sortValues = null;
Object sortValuesVal = document.get("sortValues");
if (sortValuesVal != null) {
sortValues = ((List) sortValuesVal).toArray();
for (int k = 0; k < sortValues.length; k++) {
SchemaField field = groupSort.getSort()[k].getField() != null ? schema.getFieldOrNull(groupSort.getSort()[k].getField()) : null;
sortValues[k] = ShardResultTransformerUtils.unmarshalSortValue(sortValues[k], field);
}
} else {
log.debug("doc {} has null 'sortValues'", document);
}
scoreDocs[j++] = new ShardDoc(score, sortValues, docId, shard);
}
return scoreDocs;
}
Aggregations