use of org.apache.solr.client.solrj.io.SolrClientCache in project lucene-solr by apache.
the class StreamingTest method testUniqueStream.
@Test
public void testUniqueStream() throws Exception {
//Test CloudSolrStream and UniqueStream
new UpdateRequest().add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0").add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0").add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3").add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4").add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1").commit(cluster.getSolrClient(), COLLECTIONORALIAS);
StreamContext streamContext = new StreamContext();
SolrClientCache solrClientCache = new SolrClientCache();
streamContext.setSolrClientCache(solrClientCache);
try {
SolrParams sParams = StreamingTest.mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_f asc,a_i asc");
CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams);
UniqueStream ustream = new UniqueStream(stream, new FieldEqualitor("a_f"));
ustream.setStreamContext(streamContext);
List<Tuple> tuples = getTuples(ustream);
assertEquals(4, tuples.size());
assertOrder(tuples, 0, 1, 3, 4);
} finally {
solrClientCache.close();
}
}
use of org.apache.solr.client.solrj.io.SolrClientCache in project lucene-solr by apache.
the class StreamingTest method trySortWithQt.
private void trySortWithQt(String which) throws Exception {
//Basic CloudSolrStream Test bools desc
SolrParams sParams = mapParams("q", "*:*", "qt", which, "fl", "id,b_sing", "sort", "b_sing asc,id asc");
StreamContext streamContext = new StreamContext();
SolrClientCache solrClientCache = new SolrClientCache();
streamContext.setSolrClientCache(solrClientCache);
CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams);
try {
stream.setStreamContext(streamContext);
List<Tuple> tuples = getTuples(stream);
assertEquals(5, tuples.size());
assertOrder(tuples, 0, 2, 1, 3, 4);
//Basic CloudSolrStream Test bools desc
sParams = mapParams("q", "*:*", "qt", which, "fl", "id,b_sing", "sort", "b_sing desc,id desc");
stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams);
stream.setStreamContext(streamContext);
tuples = getTuples(stream);
assertEquals(5, tuples.size());
assertOrder(tuples, 4, 3, 1, 2, 0);
//Basic CloudSolrStream Test dates desc
sParams = mapParams("q", "*:*", "qt", which, "fl", "id,dt_sing", "sort", "dt_sing desc,id asc");
stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams);
stream.setStreamContext(streamContext);
tuples = getTuples(stream);
assertEquals(5, tuples.size());
assertOrder(tuples, 2, 0, 1, 4, 3);
//Basic CloudSolrStream Test ates desc
sParams = mapParams("q", "*:*", "qt", which, "fl", "id,dt_sing", "sort", "dt_sing asc,id desc");
stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParams);
stream.setStreamContext(streamContext);
tuples = getTuples(stream);
assertEquals(5, tuples.size());
assertOrder(tuples, 3, 4, 1, 0, 2);
} finally {
solrClientCache.close();
}
}
use of org.apache.solr.client.solrj.io.SolrClientCache in project lucene-solr by apache.
the class StreamingTest method testParallelMergeStream.
@Test
public void testParallelMergeStream() throws Exception {
new UpdateRequest().add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "0").add(id, "2", "a_s", "hello2", "a_i", "2", "a_f", "0").add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3").add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4").add(id, "1", "a_s", "hello1", "a_i", "1", "a_f", "1").add(id, "5", "a_s", "hello0", "a_i", "10", "a_f", "0").add(id, "6", "a_s", "hello2", "a_i", "8", "a_f", "0").add(id, "7", "a_s", "hello3", "a_i", "7", "a_f", "3").add(id, "8", "a_s", "hello4", "a_i", "11", "a_f", "4").add(id, "9", "a_s", "hello1", "a_i", "100", "a_f", "1").commit(cluster.getSolrClient(), COLLECTIONORALIAS);
StreamContext streamContext = new StreamContext();
SolrClientCache solrClientCache = new SolrClientCache();
streamContext.setSolrClientCache(solrClientCache);
try {
//Test ascending
SolrParams sParamsA = mapParams("q", "id:(4 1 8 7 9)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i");
CloudSolrStream streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA);
SolrParams sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i asc", "partitionKeys", "a_i");
CloudSolrStream streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB);
MergeStream mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i", ComparatorOrder.ASCENDING));
ParallelStream pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.ASCENDING));
attachStreamFactory(pstream);
pstream.setStreamContext(streamContext);
List<Tuple> tuples = getTuples(pstream);
assertEquals(9, tuples.size());
assertOrder(tuples, 0, 1, 2, 3, 4, 7, 6, 8, 9);
//Test descending
sParamsA = mapParams("q", "id:(4 1 8 9)", "fl", "id,a_s,a_i", "sort", "a_i desc", "partitionKeys", "a_i");
streamA = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA);
sParamsB = mapParams("q", "id:(0 2 3 6)", "fl", "id,a_s,a_i", "sort", "a_i desc", "partitionKeys", "a_i");
streamB = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsB);
mstream = new MergeStream(streamA, streamB, new FieldComparator("a_i", ComparatorOrder.DESCENDING));
pstream = parallelStream(mstream, new FieldComparator("a_i", ComparatorOrder.DESCENDING));
attachStreamFactory(pstream);
pstream.setStreamContext(streamContext);
tuples = getTuples(pstream);
assertEquals(8, tuples.size());
assertOrder(tuples, 9, 8, 6, 4, 3, 2, 1, 0);
} finally {
solrClientCache.close();
}
}
use of org.apache.solr.client.solrj.io.SolrClientCache in project lucene-solr by apache.
the class StreamingTest method checkSort.
// Select and export should be identical sort orders I think.
private void checkSort(JettySolrRunner jetty, String field, String sortDir, String[] fields) throws IOException, SolrServerException {
// Comes back after after LUCENE-7548
// SolrQuery query = new SolrQuery("*:*");
// query.addSort(field, ("asc".equals(sortDir) ? SolrQuery.ORDER.asc : SolrQuery.ORDER.desc));
// query.addSort("id", SolrQuery.ORDER.asc);
// query.addField("id");
// query.addField(field);
// query.setRequestHandler("standard");
// query.setRows(100);
//
// List<String> selectOrder = new ArrayList<>();
//
// String url = jetty.getBaseUrl() + "/" + COLLECTION;
//
// try (HttpSolrClient client = getHttpSolrClient(url)) {
// client.setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
// QueryResponse rsp = client.query(query);
// for (SolrDocument doc : rsp.getResults()) {
// selectOrder.add((String) doc.getFieldValue("id"));
// }
// }
// SolrParams exportParams = mapParams("q", "*:*", "qt", "/export", "fl", "id," + field, "sort", field + " " + sortDir + ",id asc");
// try (CloudSolrStream solrStream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, exportParams)) {
// List<Tuple> tuples = getTuples(solrStream);
// assertEquals("There should be exactly 32 responses returned", 32, tuples.size());
// // Since the getTuples method doesn't return the EOF tuple, these two entries should be the same size.
// assertEquals("Tuple count should exactly match sort array size for field " + field + " sort order " + sortDir, selectOrder.size(), tuples.size());
//
// for (int idx = 0; idx < selectOrder.size(); ++idx) { // Tuples should be in lock step with the orders from select.
// assertEquals("Order for missing docValues fields wrong for field '" + field + "' sort direction '" + sortDir,
// tuples.get(idx).getString("id"), selectOrder.get(idx));
// }
// }
// Remove below and uncomment above after LUCENE-7548
List<String> selectOrder = ("asc".equals(sortDir)) ? Arrays.asList(ascOrder) : Arrays.asList(descOrder);
List<String> selectOrderBool = ("asc".equals(sortDir)) ? Arrays.asList(ascOrderBool) : Arrays.asList(descOrderBool);
SolrParams exportParams = mapParams("q", "*:*", "qt", "/export", "fl", "id," + field, "sort", field + " " + sortDir + ",id asc");
StreamContext streamContext = new StreamContext();
SolrClientCache solrClientCache = new SolrClientCache();
streamContext.setSolrClientCache(solrClientCache);
try (CloudSolrStream solrStream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, exportParams)) {
solrStream.setStreamContext(streamContext);
List<Tuple> tuples = getTuples(solrStream);
assertEquals("There should be exactly 32 responses returned", 32, tuples.size());
// Since the getTuples method doesn't return the EOF tuple, these two entries should be the same size.
assertEquals("Tuple count should exactly match sort array size for field " + field + " sort order " + sortDir, selectOrder.size(), tuples.size());
for (int idx = 0; idx < selectOrder.size(); ++idx) {
// Tuples should be in lock step with the orders passed in.
assertEquals("Order for missing docValues fields wrong for field '" + field + "' sort direction '" + sortDir + "' RESTORE GETTING selectOrder from select statement after LUCENE-7548", tuples.get(idx).getString("id"), (field.startsWith("b_") ? selectOrderBool.get(idx) : selectOrder.get(idx)));
}
} finally {
solrClientCache.close();
}
}
use of org.apache.solr.client.solrj.io.SolrClientCache in project lucene-solr by apache.
the class StreamingTest method testParallelReducerStream.
@Test
public void testParallelReducerStream() throws Exception {
new UpdateRequest().add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1").add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2").add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3").add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4").add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5").add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6").add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7").add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8").add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9").add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10").commit(cluster.getSolrClient(), COLLECTIONORALIAS);
StreamContext streamContext = new StreamContext();
SolrClientCache solrClientCache = new SolrClientCache();
streamContext.setSolrClientCache(solrClientCache);
try {
SolrParams sParamsA = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s asc,a_f asc", "partitionKeys", "a_s");
CloudSolrStream stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA);
ReducerStream rstream = new ReducerStream(stream, new FieldEqualitor("a_s"), new GroupOperation(new FieldComparator("a_f", ComparatorOrder.DESCENDING), 5));
ParallelStream pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.ASCENDING));
attachStreamFactory(pstream);
pstream.setStreamContext(streamContext);
List<Tuple> tuples = getTuples(pstream);
assertEquals(3, tuples.size());
Tuple t0 = tuples.get(0);
List<Map> maps0 = t0.getMaps("group");
assertMaps(maps0, 9, 1, 2, 0);
Tuple t1 = tuples.get(1);
List<Map> maps1 = t1.getMaps("group");
assertMaps(maps1, 8, 7, 5, 3);
Tuple t2 = tuples.get(2);
List<Map> maps2 = t2.getMaps("group");
assertMaps(maps2, 6, 4);
//Test Descending with Ascending subsort
sParamsA = mapParams("q", "*:*", "fl", "id,a_s,a_i,a_f", "sort", "a_s desc,a_f asc", "partitionKeys", "a_s");
stream = new CloudSolrStream(zkHost, COLLECTIONORALIAS, sParamsA);
rstream = new ReducerStream(stream, new FieldEqualitor("a_s"), new GroupOperation(new FieldComparator("a_f", ComparatorOrder.ASCENDING), 3));
pstream = parallelStream(rstream, new FieldComparator("a_s", ComparatorOrder.DESCENDING));
attachStreamFactory(pstream);
pstream.setStreamContext(streamContext);
tuples = getTuples(pstream);
assertEquals(3, tuples.size());
t0 = tuples.get(0);
maps0 = t0.getMaps("group");
assertMaps(maps0, 4, 6);
t1 = tuples.get(1);
maps1 = t1.getMaps("group");
assertMaps(maps1, 3, 5, 7);
t2 = tuples.get(2);
maps2 = t2.getMaps("group");
assertMaps(maps2, 0, 2, 1);
} finally {
solrClientCache.close();
}
}
Aggregations