use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class SmileWriterTest method constructSolrDocList.
public static SolrDocumentList constructSolrDocList(SolrQueryResponse response) {
SolrDocumentList l = new SolrDocumentList();
for (int i = 0; i < 10; i++) {
l.add(sampleDoc(random(), i));
}
response.getValues().add("results", l);
return l;
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class TestSubQueryTransformerDistrib method test.
@SuppressWarnings("serial")
@Test
public void test() throws SolrServerException, IOException {
int peopleMultiplier = atLeast(1);
int deptMultiplier = atLeast(1);
createIndex(people, peopleMultiplier, depts, deptMultiplier);
Random random1 = random();
{
final QueryRequest qr = new QueryRequest(params(new String[] { "q", "name_s:dave", "indent", "true", "fl", "*,depts:[subquery " + ((random1.nextBoolean() ? "" : "separator=,")) + "]", "rows", "" + peopleMultiplier, "depts.q", "{!terms f=dept_id_s v=$row.dept_ss_dv " + ((random1.nextBoolean() ? "" : "separator=,")) + "}", "depts.fl", "text_t" + (differentUniqueId ? ",id:notid" : ""), "depts.indent", "true", "depts.collection", "departments", differentUniqueId ? "depts.distrib.singlePass" : "notnecessary", "true", "depts.rows", "" + (deptMultiplier * 2), "depts.logParamsList", "q,fl,rows,row.dept_ss_dv", random().nextBoolean() ? "depts.wt" : "whatever", anyWt(), random().nextBoolean() ? "wt" : "whatever", anyWt() }));
final QueryResponse rsp = new QueryResponse();
rsp.setResponse(cluster.getSolrClient().request(qr, people));
final SolrDocumentList hits = rsp.getResults();
assertEquals(peopleMultiplier, hits.getNumFound());
int engineerCount = 0;
int supportCount = 0;
for (int res : new int[] { 0, (peopleMultiplier - 1) / 2, peopleMultiplier - 1 }) {
SolrDocument doc = hits.get(res);
assertEquals("dave", doc.getFieldValue("name_s_dv"));
SolrDocumentList relDepts = (SolrDocumentList) doc.getFieldValue("depts");
assertEquals("dave works in both depts " + rsp, deptMultiplier * 2, relDepts.getNumFound());
for (int deptN = 0; deptN < relDepts.getNumFound(); deptN++) {
SolrDocument deptDoc = relDepts.get(deptN);
String actual = (String) deptDoc.get("text_t");
assertTrue(deptDoc + "should be either " + engineering + " or " + support, (engineering.equals(actual) && ++engineerCount > 0) || (support.equals(actual) && ++supportCount > 0));
}
}
assertEquals(hits.toString(), engineerCount, supportCount);
}
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class TestPHPSerializedResponseWriter method testSolrDocuments.
@Test
public void testSolrDocuments() throws IOException {
SolrQueryRequest req = req("q", "*:*");
SolrQueryResponse rsp = new SolrQueryResponse();
QueryResponseWriter w = new PHPSerializedResponseWriter();
StringWriter buf = new StringWriter();
SolrDocument d = new SolrDocument();
SolrDocument d1 = d;
d.addField("id", "1");
d.addField("data1", "hello");
d.addField("data2", 42);
d.addField("data3", true);
// multivalued fields:
// extremely odd edge case: value is a map
// we use LinkedHashMap because we are doing a string comparison
// later and we need predictible ordering
LinkedHashMap<String, String> nl = new LinkedHashMap<>();
nl.put("data4.1", "hashmap");
nl.put("data4.2", "hello");
d.addField("data4", nl);
// array value
d.addField("data5", Arrays.asList("data5.1", "data5.2", "data5.3"));
// adding one more document to test array indexes
d = new SolrDocument();
SolrDocument d2 = d;
d.addField("id", "2");
SolrDocumentList sdl = new SolrDocumentList();
sdl.add(d1);
sdl.add(d2);
rsp.addResponse(sdl);
w.write(buf, req, rsp);
assertEquals("a:1:{s:8:\"response\";a:3:{s:8:\"numFound\";i:0;s:5:\"start\";i:0;s:4:\"docs\";a:2:{i:0;a:6:{s:2:\"id\";s:1:\"1\";s:5:\"data1\";s:5:\"hello\";s:5:\"data2\";i:42;s:5:\"data3\";b:1;s:5:\"data4\";a:2:{s:7:\"data4.1\";s:7:\"hashmap\";s:7:\"data4.2\";s:5:\"hello\";}s:5:\"data5\";a:3:{i:0;s:7:\"data5.1\";i:1;s:7:\"data5.2\";i:2;s:7:\"data5.3\";}}i:1;a:1:{s:2:\"id\";s:1:\"2\";}}}}", buf.toString());
req.close();
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class StreamingBinaryResponseParser method processResponse.
@Override
public NamedList<Object> processResponse(InputStream body, String encoding) {
try {
JavaBinCodec codec = new JavaBinCodec() {
@Override
public SolrDocument readSolrDocument(DataInputInputStream dis) throws IOException {
SolrDocument doc = super.readSolrDocument(dis);
callback.streamSolrDocument(doc);
return null;
}
@Override
public SolrDocumentList readSolrDocumentList(DataInputInputStream dis) throws IOException {
SolrDocumentList solrDocs = new SolrDocumentList();
List list = (List) readVal(dis);
solrDocs.setNumFound((Long) list.get(0));
solrDocs.setStart((Long) list.get(1));
solrDocs.setMaxScore((Float) list.get(2));
callback.streamDocListInfo(solrDocs.getNumFound(), solrDocs.getStart(), solrDocs.getMaxScore());
// Read the Array
tagByte = dis.readByte();
if ((tagByte >>> 5) != (ARR >>> 5)) {
throw new RuntimeException("doclist must have an array");
}
int sz = readSize(dis);
for (int i = 0; i < sz; i++) {
// must be a SolrDocument
readVal(dis);
}
return solrDocs;
}
};
return (NamedList<Object>) codec.unmarshal(body);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "parsing error", e);
}
}
use of org.apache.solr.common.SolrDocumentList in project lucene-solr by apache.
the class XMLResponseParser method readDocuments.
protected SolrDocumentList readDocuments(XMLStreamReader parser) throws XMLStreamException {
SolrDocumentList docs = new SolrDocumentList();
// Parse the attributes
for (int i = 0; i < parser.getAttributeCount(); i++) {
String n = parser.getAttributeLocalName(i);
String v = parser.getAttributeValue(i);
if ("numFound".equals(n)) {
docs.setNumFound(Long.parseLong(v));
} else if ("start".equals(n)) {
docs.setStart(Long.parseLong(v));
} else if ("maxScore".equals(n)) {
docs.setMaxScore(Float.parseFloat(v));
}
}
// Read through each document
int event;
while (true) {
event = parser.next();
if (XMLStreamConstants.START_ELEMENT == event) {
if (!"doc".equals(parser.getLocalName())) {
throw new RuntimeException("should be doc! " + parser.getLocalName() + " :: " + parser.getLocation());
}
docs.add(readDocument(parser));
} else if (XMLStreamConstants.END_ELEMENT == event) {
// only happens once
return docs;
}
}
}
Aggregations