use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class CursorMark method parseSerializedTotem.
/**
* Parses the serialized version of a CursorMark from a client
* (which must conform to the existing sortSpec) and populates this object.
*
* @see #getSerializedTotem
*/
public void parseSerializedTotem(final String serialized) {
if (CURSOR_MARK_START.equals(serialized)) {
values = null;
return;
}
final SortField[] sortFields = sortSpec.getSort().getSort();
final List<SchemaField> schemaFields = sortSpec.getSchemaFields();
List<Object> pieces = null;
try {
final byte[] rawData = Base64.base64ToByteArray(serialized);
ByteArrayInputStream in = new ByteArrayInputStream(rawData);
try {
pieces = (List<Object>) new JavaBinCodec().unmarshal(in);
boolean b = false;
for (Object o : pieces) {
if (o instanceof BytesRefBuilder || o instanceof BytesRef || o instanceof String) {
b = true;
break;
}
}
if (b) {
in.reset();
pieces = (List<Object>) new JavaBinCodec().unmarshal(in);
}
} finally {
in.close();
}
} catch (Exception ex) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to parse '" + CURSOR_MARK_PARAM + "' after totem: " + "value must either be '" + CURSOR_MARK_START + "' or the " + "'" + CURSOR_MARK_NEXT + "' returned by a previous search: " + serialized, ex);
}
assert null != pieces : "pieces wasn't parsed?";
if (sortFields.length != pieces.size()) {
throw new SolrException(ErrorCode.BAD_REQUEST, CURSOR_MARK_PARAM + " does not work with current sort (wrong size): " + serialized);
}
this.values = new ArrayList<>(sortFields.length);
final BytesRef tmpBytes = new BytesRef();
for (int i = 0; i < sortFields.length; i++) {
SortField curSort = sortFields[i];
SchemaField curField = schemaFields.get(i);
Object rawValue = pieces.get(i);
if (null != curField) {
FieldType curType = curField.getType();
rawValue = curType.unmarshalSortValue(rawValue);
}
this.values.add(rawValue);
}
}
use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class CursorMark method getSerializedTotem.
/**
* Generates a Base64 encoded serialized representation of the sort values
* encapsulated by this object, for use in cursor requests.
*
* @see #parseSerializedTotem
*/
public String getSerializedTotem() {
if (null == this.values) {
return CURSOR_MARK_START;
}
final List<SchemaField> schemaFields = sortSpec.getSchemaFields();
final ArrayList<Object> marshalledValues = new ArrayList<>(values.size() + 1);
for (int i = 0; i < schemaFields.size(); i++) {
SchemaField fld = schemaFields.get(i);
Object safeValue = values.get(i);
if (null != fld) {
FieldType type = fld.getType();
safeValue = type.marshalSortValue(safeValue);
}
marshalledValues.add(safeValue);
}
try {
ByteArrayOutputStream out = new ByteArrayOutputStream(256);
try {
new JavaBinCodec().marshal(marshalledValues, out);
byte[] rawData = out.toByteArray();
return Base64.byteArrayToBase64(rawData, 0, rawData.length);
} finally {
out.close();
}
} catch (Exception ex) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to format search after totem", ex);
}
}
use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class PayloadScoreQParserPlugin method createParser.
@Override
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
return new QParser(qstr, localParams, params, req) {
@Override
public Query parse() throws SyntaxError {
String field = localParams.get(QueryParsing.F);
String value = localParams.get(QueryParsing.V);
String func = localParams.get("func");
boolean includeSpanScore = localParams.getBool("includeSpanScore", false);
if (field == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'f' not specified");
}
if (value == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "query string missing");
}
FieldType ft = req.getCore().getLatestSchema().getFieldType(field);
Analyzer analyzer = ft.getQueryAnalyzer();
SpanQuery query = null;
try {
query = PayloadUtils.createSpanQuery(field, value, analyzer);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
if (query == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SpanQuery is null");
}
// note: this query(/parser) does not support func=first; 'first' is a payload() value source feature only
PayloadFunction payloadFunction = PayloadUtils.getPayloadFunction(func);
if (payloadFunction == null)
throw new SyntaxError("Unknown payload function: " + func);
return new PayloadScoreQuery(query, payloadFunction, includeSpanScore);
}
};
}
use of org.apache.solr.schema.FieldType in project lucene-solr by apache.
the class QueryParsing method toString.
/**
* @see #toString(Query,IndexSchema)
*/
public static void toString(Query query, IndexSchema schema, Appendable out, int flags) throws IOException {
// clear the boosted / is clause flags for recursion
int subflag = flags & ~(FLAG_BOOSTED | FLAG_IS_CLAUSE);
if (query instanceof TermQuery) {
TermQuery q = (TermQuery) query;
Term t = q.getTerm();
FieldType ft = writeFieldName(t.field(), schema, out, flags);
writeFieldVal(t.bytes(), ft, out, flags);
} else if (query instanceof TermRangeQuery) {
TermRangeQuery q = (TermRangeQuery) query;
String fname = q.getField();
FieldType ft = writeFieldName(fname, schema, out, flags);
out.append(q.includesLower() ? '[' : '{');
BytesRef lt = q.getLowerTerm();
BytesRef ut = q.getUpperTerm();
if (lt == null) {
out.append('*');
} else {
writeFieldVal(lt, ft, out, flags);
}
out.append(" TO ");
if (ut == null) {
out.append('*');
} else {
writeFieldVal(ut, ft, out, flags);
}
out.append(q.includesUpper() ? ']' : '}');
} else if (query instanceof LegacyNumericRangeQuery) {
LegacyNumericRangeQuery q = (LegacyNumericRangeQuery) query;
String fname = q.getField();
FieldType ft = writeFieldName(fname, schema, out, flags);
out.append(q.includesMin() ? '[' : '{');
Number lt = q.getMin();
Number ut = q.getMax();
if (lt == null) {
out.append('*');
} else {
out.append(lt.toString());
}
out.append(" TO ");
if (ut == null) {
out.append('*');
} else {
out.append(ut.toString());
}
out.append(q.includesMax() ? ']' : '}');
} else if (query instanceof BooleanQuery) {
BooleanQuery q = (BooleanQuery) query;
boolean needParens = false;
if (q.getMinimumNumberShouldMatch() != 0 || (flags & (FLAG_IS_CLAUSE | FLAG_BOOSTED)) != 0) {
needParens = true;
}
if (needParens) {
out.append('(');
}
boolean first = true;
for (BooleanClause c : q.clauses()) {
if (!first) {
out.append(' ');
} else {
first = false;
}
if (c.isProhibited()) {
out.append('-');
} else if (c.isRequired()) {
out.append('+');
}
Query subQuery = c.getQuery();
toString(subQuery, schema, out, subflag | FLAG_IS_CLAUSE);
}
if (needParens) {
out.append(')');
}
if (q.getMinimumNumberShouldMatch() > 0) {
out.append('~');
out.append(Integer.toString(q.getMinimumNumberShouldMatch()));
}
} else if (query instanceof PrefixQuery) {
PrefixQuery q = (PrefixQuery) query;
Term prefix = q.getPrefix();
FieldType ft = writeFieldName(prefix.field(), schema, out, flags);
out.append(prefix.text());
out.append('*');
} else if (query instanceof WildcardQuery) {
out.append(query.toString());
} else if (query instanceof FuzzyQuery) {
out.append(query.toString());
} else if (query instanceof ConstantScoreQuery) {
out.append(query.toString());
} else if (query instanceof WrappedQuery) {
WrappedQuery q = (WrappedQuery) query;
out.append(q.getOptions());
toString(q.getWrappedQuery(), schema, out, subflag);
} else if (query instanceof BoostQuery) {
BoostQuery q = (BoostQuery) query;
toString(q.getQuery(), schema, out, subflag | FLAG_BOOSTED);
out.append("^");
out.append(Float.toString(q.getBoost()));
} else {
out.append(query.getClass().getSimpleName() + '(' + query.toString() + ')');
}
}
use of org.apache.solr.schema.FieldType in project Solbase by Photobucket.
the class SolbaseQueryComponent method process.
/**
* Actually run the query
*/
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
SolrIndexSearcher searcher = req.getSearcher();
if (rb.getQueryCommand().getOffset() < 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative");
}
// -1 as flag if not set.
long timeAllowed = (long) params.getInt(CommonParams.TIME_ALLOWED, -1);
// Optional: This could also be implemented by the top-level searcher
// sending
// a filter that lists the ids... that would be transparent to
// the request handler, but would be more expensive (and would preserve
// score
// too if desired).
String ids = params.get(ShardParams.IDS);
if (ids != null) {
List<String> idArr = StrUtils.splitSmart(ids, ",", true);
int[] luceneIds = new int[idArr.size()];
int docs = 0;
for (int i = 0; i < idArr.size(); i++) {
luceneIds[docs++] = Integer.parseInt(idArr.get(i));
}
// we are indexing docId as solr uniq_id. by doing this, we are
// bound to INTEGER.MAX_VALUE ~= 2 billion
// docs is number of docs
DocListAndSet res = new DocListAndSet();
res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0, null);
if (rb.isNeedDocSet()) {
List<Query> queries = new ArrayList<Query>();
queries.add(rb.getQuery());
List<Query> filters = rb.getFilters();
if (filters != null)
queries.addAll(filters);
res.docSet = searcher.getDocSet(queries);
}
rb.setResults(res);
rsp.add("response", rb.getResults().docList);
return;
}
SolrIndexSearcher.QueryCommand cmd = rb.getQueryCommand();
cmd.setTimeAllowed(timeAllowed);
SolrIndexSearcher.QueryResult result = new SolrIndexSearcher.QueryResult();
searcher.search(result, cmd);
rb.setResult(result);
rsp.add("response", rb.getResults().docList);
rsp.getToLog().add("hits", rb.getResults().docList.matches());
// The query cache doesn't currently store sort field values, and
// SolrIndexSearcher doesn't
// currently have an option to return sort field values. Because of
// this, we
// take the documents given and re-derive the sort values.
boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES, false);
if (fsv) {
Sort sort = rb.getSortSpec().getSort();
SortField[] sortFields = sort == null ? new SortField[] { SortField.FIELD_SCORE } : sort.getSort();
// order is important for the
NamedList sortVals = new NamedList();
// sort fields
// a
Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO);
// dummy
// Field
SolrIndexReader reader = searcher.getReader();
SolrIndexReader[] readers = reader.getLeafReaders();
SolrIndexReader subReader = reader;
if (readers.length == 1) {
// if there is a single segment, use that subReader and avoid
// looking up each time
subReader = readers[0];
readers = null;
}
int[] offsets = reader.getLeafOffsets();
//TODO: need to fetch sort value from collector instead of re-derive lookup from id
for (SortField sortField : sortFields) {
int type = sortField.getType();
if (type == SortField.SCORE || type == SortField.DOC)
continue;
FieldComparator comparator = null;
FieldComparator[] comparators = (readers == null) ? null : new FieldComparator[readers.length];
String fieldname = sortField.getField();
FieldType ft = fieldname == null ? null : req.getSchema().getFieldTypeNoEx(fieldname);
DocSlice docList = (DocSlice) rb.getResults().docList;
ArrayList<Object> vals = new ArrayList<Object>(docList.size());
for (int i = docList.offset; i < docList.len; i++) {
vals.add(new Integer(docList.sorts[i][((EmbeddedSortField) sortField).getFieldNumber() - 1]));
}
sortVals.add(fieldname, vals);
}
rsp.add("sort_values", sortVals);
}
// pre-fetch returned documents
if (!req.getParams().getBool(ShardParams.IS_SHARD, false) && rb.getResults().docList != null && rb.getResults().docList.size() <= 50) {
// TODO: this may depend on the highlighter component (or other
// components?)
SolrPluginUtils.optimizePreFetchDocs(rb.getResults().docList, rb.getQuery(), req, rsp);
}
}
Aggregations