use of org.apache.solr.common.util.JavaBinCodec in project lucene-solr by apache.
the class ExportWriter method write.
public void write(OutputStream os) throws IOException {
QueryResponseWriter rw = req.getCore().getResponseWriters().get(wt);
if (rw instanceof BinaryResponseWriter) {
//todo add support for other writers after testing
writer = new JavaBinCodec(os, null);
} else {
respWriter = new OutputStreamWriter(os, StandardCharsets.UTF_8);
writer = JSONResponseWriter.getPushWriter(respWriter, req, res);
}
Exception exception = res.getException();
if (exception != null) {
if (!(exception instanceof IgnoreException)) {
writeException(exception, writer, false);
}
return;
}
SolrRequestInfo info = SolrRequestInfo.getRequestInfo();
SortSpec sortSpec = info.getResponseBuilder().getSortSpec();
if (sortSpec == null) {
writeException((new IOException(new SyntaxError("No sort criteria was provided."))), writer, true);
return;
}
SolrIndexSearcher searcher = req.getSearcher();
Sort sort = searcher.weightSort(sortSpec.getSort());
if (sort == null) {
writeException((new IOException(new SyntaxError("No sort criteria was provided."))), writer, true);
return;
}
if (sort != null && sort.needsScores()) {
writeException((new IOException(new SyntaxError("Scoring is not currently supported with xsort."))), writer, true);
return;
}
// This came to light in the very artifical case of indexing a single doc to Cloud.
if (req.getContext().get("totalHits") != null) {
totalHits = ((Integer) req.getContext().get("totalHits")).intValue();
sets = (FixedBitSet[]) req.getContext().get("export");
if (sets == null) {
writeException((new IOException(new SyntaxError("xport RankQuery is required for xsort: rq={!xport}"))), writer, true);
return;
}
}
SolrParams params = req.getParams();
String fl = params.get("fl");
String[] fields = null;
if (fl == null) {
writeException((new IOException(new SyntaxError("export field list (fl) must be specified."))), writer, true);
return;
} else {
fields = fl.split(",");
for (int i = 0; i < fields.length; i++) {
fields[i] = fields[i].trim();
if (fields[i].equals("score")) {
writeException((new IOException(new SyntaxError("Scoring is not currently supported with xsort."))), writer, true);
return;
}
}
}
try {
fieldWriters = getFieldWriters(fields, req.getSearcher());
} catch (Exception e) {
writeException(e, writer, true);
return;
}
writer.writeMap(m -> {
m.put("responseHeader", singletonMap("status", 0));
m.put("response", (MapWriter) mw -> {
mw.put("numFound", totalHits);
mw.put("docs", (IteratorWriter) iw -> writeDocs(req, iw, sort));
});
});
}
use of org.apache.solr.common.util.JavaBinCodec in project lucene-solr by apache.
the class CursorMark method parseSerializedTotem.
/**
* Parses the serialized version of a CursorMark from a client
* (which must conform to the existing sortSpec) and populates this object.
*
* @see #getSerializedTotem
*/
public void parseSerializedTotem(final String serialized) {
if (CURSOR_MARK_START.equals(serialized)) {
values = null;
return;
}
final SortField[] sortFields = sortSpec.getSort().getSort();
final List<SchemaField> schemaFields = sortSpec.getSchemaFields();
List<Object> pieces = null;
try {
final byte[] rawData = Base64.base64ToByteArray(serialized);
ByteArrayInputStream in = new ByteArrayInputStream(rawData);
try {
pieces = (List<Object>) new JavaBinCodec().unmarshal(in);
boolean b = false;
for (Object o : pieces) {
if (o instanceof BytesRefBuilder || o instanceof BytesRef || o instanceof String) {
b = true;
break;
}
}
if (b) {
in.reset();
pieces = (List<Object>) new JavaBinCodec().unmarshal(in);
}
} finally {
in.close();
}
} catch (Exception ex) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Unable to parse '" + CURSOR_MARK_PARAM + "' after totem: " + "value must either be '" + CURSOR_MARK_START + "' or the " + "'" + CURSOR_MARK_NEXT + "' returned by a previous search: " + serialized, ex);
}
assert null != pieces : "pieces wasn't parsed?";
if (sortFields.length != pieces.size()) {
throw new SolrException(ErrorCode.BAD_REQUEST, CURSOR_MARK_PARAM + " does not work with current sort (wrong size): " + serialized);
}
this.values = new ArrayList<>(sortFields.length);
final BytesRef tmpBytes = new BytesRef();
for (int i = 0; i < sortFields.length; i++) {
SortField curSort = sortFields[i];
SchemaField curField = schemaFields.get(i);
Object rawValue = pieces.get(i);
if (null != curField) {
FieldType curType = curField.getType();
rawValue = curType.unmarshalSortValue(rawValue);
}
this.values.add(rawValue);
}
}
use of org.apache.solr.common.util.JavaBinCodec in project lucene-solr by apache.
the class CursorMark method getSerializedTotem.
/**
* Generates a Base64 encoded serialized representation of the sort values
* encapsulated by this object, for use in cursor requests.
*
* @see #parseSerializedTotem
*/
public String getSerializedTotem() {
if (null == this.values) {
return CURSOR_MARK_START;
}
final List<SchemaField> schemaFields = sortSpec.getSchemaFields();
final ArrayList<Object> marshalledValues = new ArrayList<>(values.size() + 1);
for (int i = 0; i < schemaFields.size(); i++) {
SchemaField fld = schemaFields.get(i);
Object safeValue = values.get(i);
if (null != fld) {
FieldType type = fld.getType();
safeValue = type.marshalSortValue(safeValue);
}
marshalledValues.add(safeValue);
}
try {
ByteArrayOutputStream out = new ByteArrayOutputStream(256);
try {
new JavaBinCodec().marshal(marshalledValues, out);
byte[] rawData = out.toByteArray();
return Base64.byteArrayToBase64(rawData, 0, rawData.length);
} finally {
out.close();
}
} catch (Exception ex) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to format search after totem", ex);
}
}
use of org.apache.solr.common.util.JavaBinCodec in project lucene-solr by apache.
the class BinaryResponseWriter method getParsedResponse.
/**
* TODO -- there may be a way to do this without marshal at all...
*
* @return a response object equivalent to what you get from the XML/JSON/javabin parser. Documents become
* SolrDocuments, DocList becomes SolrDocumentList etc.
*
* @since solr 1.4
*/
@SuppressWarnings("unchecked")
public static NamedList<Object> getParsedResponse(SolrQueryRequest req, SolrQueryResponse rsp) {
try {
Resolver resolver = new Resolver(req, rsp.getReturnFields());
ByteArrayOutputStream out = new ByteArrayOutputStream();
new JavaBinCodec(resolver).setWritableDocFields(resolver).marshal(rsp.getValues(), out);
InputStream in = out.toInputStream();
return (NamedList<Object>) new JavaBinCodec(resolver).unmarshal(in);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
use of org.apache.solr.common.util.JavaBinCodec in project lucene-solr by apache.
the class EmbeddedSolrServer method createJavaBinCodec.
private JavaBinCodec createJavaBinCodec(final StreamingResponseCallback callback, final BinaryResponseWriter.Resolver resolver) {
return new JavaBinCodec(resolver) {
@Override
public void writeSolrDocument(SolrDocument doc) {
callback.streamSolrDocument(doc);
//super.writeSolrDocument( doc, fields );
}
@Override
public void writeSolrDocumentList(SolrDocumentList docs) throws IOException {
if (docs.size() > 0) {
SolrDocumentList tmp = new SolrDocumentList();
tmp.setMaxScore(docs.getMaxScore());
tmp.setNumFound(docs.getNumFound());
tmp.setStart(docs.getStart());
docs = tmp;
}
callback.streamDocListInfo(docs.getNumFound(), docs.getStart(), docs.getMaxScore());
super.writeSolrDocumentList(docs);
}
};
}
Aggregations