use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class FacetField method createFacetProcessor.
@Override
public FacetProcessor createFacetProcessor(FacetContext fcontext) {
SchemaField sf = fcontext.searcher.getSchema().getField(field);
FieldType ft = sf.getType();
boolean multiToken = sf.multiValued() || ft.multiValuedFieldCache();
if (fcontext.facetInfo != null) {
// refinement... we will end up either skipping the entire facet, or doing calculating only specific facet buckets
return new FacetFieldProcessorByArrayDV(fcontext, this, sf);
}
NumberType ntype = ft.getNumberType();
// ensure we can support the requested options for numeric faceting:
if (ntype != null) {
if (prefix != null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Doesn't make sense to set facet prefix on a numeric field");
}
if (mincount == 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Numeric fields do not support facet mincount=0; try indexing as terms");
// TODO if indexed=true then we could add support
}
}
// TODO auto-pick ENUM/STREAM SOLR-9351 when index asc and DocSet cardinality is *not* much smaller than term cardinality
if (method == FacetMethod.ENUM) {
// at the moment these two are the same
method = FacetMethod.STREAM;
}
if (method == FacetMethod.STREAM && sf.indexed() && "index".equals(sortVariable) && sortDirection == SortDirection.asc) {
return new FacetFieldProcessorByEnumTermsStream(fcontext, this, sf);
}
if (!multiToken) {
if (mincount > 0 && prefix == null && (ntype != null || method == FacetMethod.DVHASH)) {
// or if we don't know cardinality but DocSet size is very small
return new FacetFieldProcessorByHashDV(fcontext, this, sf);
} else if (ntype == null) {
// single valued string...
return new FacetFieldProcessorByArrayDV(fcontext, this, sf);
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Couldn't pick facet algorithm for field " + sf);
}
}
if (sf.hasDocValues() || method == FacetMethod.DV) {
// single and multi-valued string docValues
return new FacetFieldProcessorByArrayDV(fcontext, this, sf);
}
// Top-level multi-valued field cache (UIF)
return new FacetFieldProcessorByArrayUIF(fcontext, this, sf);
}
use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class FacetFieldProcessor method refineBucket.
private SimpleOrderedMap<Object> refineBucket(Object bucketVal, boolean skip, Map<String, Object> facetInfo) throws IOException {
SimpleOrderedMap<Object> bucket = new SimpleOrderedMap<>();
FieldType ft = sf.getType();
bucket.add("val", bucketVal);
// String internal = ft.toInternal( tobj.toString() ); // TODO - we need a better way to get from object to query...
Query domainQ = ft.getFieldQuery(null, sf, bucketVal.toString());
fillBucket(bucket, domainQ, null, skip, facetInfo);
return bucket;
}
use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class FacetRangeProcessor method getNumericCalc.
public static Calc getNumericCalc(SchemaField sf) {
Calc calc;
final FieldType ft = sf.getType();
if (ft instanceof TrieField) {
switch(ft.getNumberType()) {
case FLOAT:
calc = new FloatCalc(sf);
break;
case DOUBLE:
calc = new DoubleCalc(sf);
break;
case INTEGER:
calc = new IntCalc(sf);
break;
case LONG:
calc = new LongCalc(sf);
break;
case DATE:
calc = new DateCalc(sf, null);
break;
default:
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Expected numeric field type :" + sf);
}
} else if (ft instanceof PointField) {
// TODO, this is the same in Trie and Point now
switch(ft.getNumberType()) {
case FLOAT:
calc = new FloatCalc(sf);
break;
case DOUBLE:
calc = new DoubleCalc(sf);
break;
case INTEGER:
calc = new IntCalc(sf);
break;
case LONG:
calc = new LongCalc(sf);
break;
case DATE:
calc = new DateCalc(sf, null);
break;
default:
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Expected numeric field type :" + sf);
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Expected numeric field type :" + sf);
}
return calc;
}
use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class TermsQParserPlugin method createParser.
@Override
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
return new QParser(qstr, localParams, params, req) {
@Override
public Query parse() throws SyntaxError {
String fname = localParams.get(QueryParsing.F);
FieldType ft = req.getSchema().getFieldTypeNoEx(fname);
String separator = localParams.get(SEPARATOR, ",");
//never null
String qstr = localParams.get(QueryParsing.V);
Method method = Method.valueOf(localParams.get(METHOD, Method.termsFilter.name()));
//TODO pick the default method based on various heuristics from benchmarks
//if space then split on all whitespace & trim, otherwise strictly interpret
final boolean sepIsSpace = separator.equals(" ");
if (sepIsSpace)
qstr = qstr.trim();
if (qstr.length() == 0)
return new MatchNoDocsQuery();
final String[] splitVals = sepIsSpace ? qstr.split("\\s+") : qstr.split(Pattern.quote(separator), -1);
assert splitVals.length > 0;
if (ft.isPointField()) {
if (localParams.get(METHOD) != null) {
throw new IllegalArgumentException(String.format(Locale.ROOT, "Method '%s' not supported in TermsQParser when using PointFields", localParams.get(METHOD)));
}
return ((PointField) ft).getSetQuery(this, req.getSchema().getField(fname), Arrays.asList(splitVals));
}
BytesRef[] bytesRefs = new BytesRef[splitVals.length];
BytesRefBuilder term = new BytesRefBuilder();
for (int i = 0; i < splitVals.length; i++) {
String stringVal = splitVals[i];
//logic same as TermQParserPlugin
if (ft != null) {
ft.readableToIndexed(stringVal, term);
} else {
term.copyChars(stringVal);
}
bytesRefs[i] = term.toBytesRef();
}
return new SolrConstantScoreQuery(method.makeFilter(fname, bytesRefs));
}
};
}
use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class TestValueSource method parseTerm.
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
private static TInfo parseTerm(FunctionQParser fp) throws SyntaxError {
TInfo tinfo = new TInfo();
tinfo.indexedField = tinfo.field = fp.parseArg();
tinfo.val = fp.parseArg();
tinfo.indexedBytes = new BytesRefBuilder();
FieldType ft = fp.getReq().getSchema().getFieldTypeNoEx(tinfo.field);
if (ft == null)
ft = new StrField();
if (ft instanceof TextField) {
// need to do analysis on the term
String indexedVal = tinfo.val;
Query q = ft.getFieldQuery(fp, fp.getReq().getSchema().getFieldOrNull(tinfo.field), tinfo.val);
if (q instanceof TermQuery) {
Term term = ((TermQuery) q).getTerm();
tinfo.indexedField = term.field();
indexedVal = term.text();
}
tinfo.indexedBytes.copyChars(indexedVal);
} else {
ft.readableToIndexed(tinfo.val, tinfo.indexedBytes);
}
return tinfo;
}
Aggregations