use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestCustomStream method setupHarnesses.
private void setupHarnesses() {
for (final SolrClient client : clients) {
RestTestHarness harness = new RestTestHarness(() -> ((HttpSolrClient) client).getBaseURL());
restTestHarnesses.add(harness);
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestJsonFacetRefinement method doBasicRefinement.
public void doBasicRefinement(ModifiableSolrParams p) throws Exception {
initServers();
Client client = servers.getClient(random().nextInt());
client.queryDefaults().set("shards", servers.getShards(), "debugQuery", Boolean.toString(random().nextBoolean()));
List<SolrClient> clients = client.getClientProvider().all();
assertTrue(clients.size() >= 3);
client.deleteByQuery("*:*", null);
String cat_s = p.get("cat_s");
String xy_s = p.get("xy_s");
String qw_s = p.get("qw_s");
// this field is designed to test numBuckets refinement... the first phase will only have a single bucket returned for the top count bucket of cat_s
String er_s = p.get("er_s");
String num_d = p.get("num_d");
// A wins count tie
clients.get(0).add(sdoc("id", "01", "all_s", "all", cat_s, "A", xy_s, "X", num_d, -1, qw_s, "Q", er_s, "E"));
clients.get(0).add(sdoc("id", "02", "all_s", "all", cat_s, "B", xy_s, "Y", num_d, 3));
// B highest count
clients.get(1).add(sdoc("id", "11", "all_s", "all", cat_s, "B", xy_s, "X", num_d, -5, er_s, "E"));
clients.get(1).add(sdoc("id", "12", "all_s", "all", cat_s, "B", xy_s, "Y", num_d, -11, qw_s, "W"));
// "R" will only be picked up via refinement when parent facet is cat_s
clients.get(1).add(sdoc("id", "13", "all_s", "all", cat_s, "A", xy_s, "X", num_d, 7, er_s, "R"));
// A highest count
clients.get(2).add(sdoc("id", "21", "all_s", "all", cat_s, "A", xy_s, "X", num_d, 17, qw_s, "W", er_s, "E"));
clients.get(2).add(sdoc("id", "22", "all_s", "all", cat_s, "A", xy_s, "Y", num_d, -19));
clients.get(2).add(sdoc("id", "23", "all_s", "all", cat_s, "B", xy_s, "X", num_d, 11));
client.commit();
// Shard responses should be A=1, B=2, A=2, merged should be "A=3, B=2"
// One shard will have _facet_={"refine":{"cat0":{"_l":["A"]}}} on the second phase
/****
// fake a refinement request... good for development/debugging
assertJQ(clients.get(1),
params(p, "q", "*:*", "_facet_","{refine:{cat0:{_l:[A]}}}", "isShard","true", "distrib","false", "shards.purpose","2097216", "ids","11,12,13",
"json.facet", "{" +
"cat0:{type:terms, field:cat_s, sort:'count desc', limit:1, overrequest:0, refine:true}" +
"}"
)
, "facets=={foo:555}"
);
****/
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + "cat0:{${terms} type:terms, field:${cat_s}, sort:'count desc', limit:1, overrequest:0, refine:false}" + "}"), "facets=={ count:8" + // w/o overrequest and refinement, count is lower than it should be (we don't see the A from the middle shard)
", cat0:{ buckets:[ {val:A,count:3} ] }" + "}");
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + "cat0:{${terms} type:terms, field:${cat_s}, sort:'count desc', limit:1, overrequest:0, refine:true}" + "}"), "facets=={ count:8" + // w/o overrequest, we need refining to get the correct count.
", cat0:{ buckets:[ {val:A,count:4} ] }" + "}");
// test that basic stats work for refinement
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + "cat0:{${terms} type:terms, field:${cat_s}, sort:'count desc', limit:1, overrequest:0, refine:true, facet:{ stat1:'sum(${num_d})'} }" + "}"), "facets=={ count:8" + ", cat0:{ buckets:[ {val:A,count:4, stat1:4.0} ] }" + "}");
// test sorting buckets by a different stat
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " cat0:{${terms} type:terms, field:${cat_s}, sort:'min1 asc', limit:1, overrequest:0, refine:false, facet:{ min1:'min(${num_d})'} }" + ",cat1:{${terms} type:terms, field:${cat_s}, sort:'min1 asc', limit:1, overrequest:0, refine:true, facet:{ min1:'min(${num_d})'} }" + // refinement needed through a query facet
",qfacet:{type:query, q:'*:*', facet:{ cat2:{${terms} type:terms, field:${cat_s}, sort:'min1 asc', limit:1, overrequest:0, refine:true, facet:{ min1:'min(${num_d})'} } }}" + // refinement needed through field facet
",allf:{${terms} type:terms, field:all_s, facet:{ cat3:{${terms} type:terms, field:${cat_s}, sort:'min1 asc', limit:1, overrequest:0, refine:true, facet:{ min1:'min(${num_d})'} } }}" + // make sure that root bucket stats aren't affected by refinement
",sum1:'sum(${num_d})'" + "}"), "facets=={ count:8" + // B wins in shard2, so we're missing the "A" count for that shard w/o refinement.
", cat0:{ buckets:[ {val:A,count:3, min1:-19.0} ] }" + // with refinement, we get the right count
", cat1:{ buckets:[ {val:A,count:4, min1:-19.0} ] }" + // just like the previous response, just nested under a query facet
", qfacet:{ count:8, cat2:{ buckets:[ {val:A,count:4, min1:-19.0} ] } }" + // just like the previous response, just nested under a field facet
", allf:{ buckets:[ {cat3:{ buckets:[ {val:A,count:4, min1:-19.0} ] } ,count:8,val:all }] }" + ", sum1:2.0" + "}");
// test partial buckets (field facet within field facet)
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + "ab:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true, facet:{ xy:{${terms} type:terms, field:${xy_s}, limit:1, overrequest:0, refine:true } }}" + "}"), "facets=={ count:8" + // just like the previous response, just nested under a field facet
", ab:{ buckets:[ {val:A, count:4, xy:{buckets:[ {val:X,count:3}]} }] }" + "}");
// test that sibling facets and stats are included for _p buckets, but skipped for _s buckets
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " ab :{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true, facet:{ xy:{${terms} type:terms, field:${xy_s}, limit:1, overrequest:0, refine:true}, qq:{query:'*:*'},ww:'sum(${num_d})' }}" + // top level refine=false shouldn't matter
",ab2:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false, facet:{ xy:{${terms} type:terms, field:${xy_s}, limit:1, overrequest:0, refine:true}, qq:{query:'*:*'},ww:'sum(${num_d})' }}" + ",allf :{${terms} type:terms, field:all_s, limit:1, overrequest:0, refine:true, facet:{cat:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true}, qq:{query:'*:*'},ww:'sum(${num_d})' }}" + // top level refine=false shouldn't matter
",allf2:{${terms} type:terms, field:all_s, limit:1, overrequest:0, refine:false, facet:{cat:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true}, qq:{query:'*:*'},ww:'sum(${num_d})' }}" + "}"), "facets=={ count:8" + // make sure qq and ww are included for _p buckets
", ab:{ buckets:[ {val:A, count:4, xy:{buckets:[ {val:X,count:3}]} ,qq:{count:4}, ww:4.0 }] }" + // make sure qq and ww are excluded (not calculated again in another phase) for _s buckets
", allf:{ buckets:[ {count:8, val:all, cat:{buckets:[{val:A,count:4}]} ,qq:{count:8}, ww:2.0 }] }" + // make sure qq and ww are included for _p buckets
", ab2:{ buckets:[ {val:A, count:4, xy:{buckets:[ {val:X,count:3}]} ,qq:{count:4}, ww:4.0 }] }" + // make sure qq and ww are excluded (not calculated again in another phase) for _s buckets
", allf2:{ buckets:[ {count:8, val:all, cat:{buckets:[{val:A,count:4}]} ,qq:{count:8}, ww:2.0 }] }" + "}");
// test refining under the special "missing" bucket of a field facet
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + "f:{${terms} type:terms, field:missing_s, limit:1, overrequest:0, missing:true, refine:true, facet:{ cat:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true } }}" + "}"), "facets=={ count:8" + // just like the previous response, just nested under a field facet
", f:{ buckets:[], missing:{count:8, cat:{buckets:[{val:A,count:4}]} } }" + "}");
// test filling in "missing" bucket for partially refined facets
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + // test all values missing in sub-facet
" ab :{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false, facet:{ zz:{${terms} type:terms, field:missing_s, limit:1, overrequest:0, refine:false, missing:true} }}" + ",ab2:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true , facet:{ zz:{${terms} type:terms, field:missing_s, limit:1, overrequest:0, refine:true , missing:true} }}" + // test some values missing in sub-facet (and test that this works with normal partial bucket refinement)
", cd :{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false, facet:{ qw:{${terms} type:terms, field:${qw_s}, limit:1, overrequest:0, refine:false, missing:true, facet:{qq:{query:'*:*'}} } }}" + ", cd2:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true , facet:{ qw:{${terms} type:terms, field:${qw_s}, limit:1, overrequest:0, refine:true , missing:true, facet:{qq:{query:'*:*'}} } }}" + "}"), "facets=={ count:8" + ", ab:{ buckets:[ {val:A, count:3, zz:{buckets:[], missing:{count:3}}}] }" + ",ab2:{ buckets:[ {val:A, count:4, zz:{buckets:[], missing:{count:4}}}] }" + ", cd:{ buckets:[ {val:A, count:3, qw:{buckets:[{val:Q, count:1, qq:{count:1}}], missing:{count:1,qq:{count:1}}}}] }" + ",cd2:{ buckets:[ {val:A, count:4, qw:{buckets:[{val:Q, count:1, qq:{count:1}}], missing:{count:2,qq:{count:2}}}}] }" + "}");
// test filling in missing "allBuckets"
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " cat :{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false, allBuckets:true, facet:{ xy:{${terms} type:terms, field:${xy_s}, limit:1, overrequest:0, allBuckets:true, refine:false} } }" + ", cat2:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true , allBuckets:true, facet:{ xy:{${terms} type:terms, field:${xy_s}, limit:1, overrequest:0, allBuckets:true, refine:true } } }" + ", cat3:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true , allBuckets:true, facet:{ xy:{${terms} type:terms, field:${xy_s}, limit:1, overrequest:0, allBuckets:true, refine:true , facet:{f:'sum(${num_d})'} } } }" + "}"), "facets=={ count:8" + ", cat:{ allBuckets:{count:8}, buckets:[ {val:A, count:3, xy:{buckets:[{count:2, val:X}], allBuckets:{count:3}}}] }" + ",cat2:{ allBuckets:{count:8}, buckets:[ {val:A, count:4, xy:{buckets:[{count:3, val:X}], allBuckets:{count:4}}}] }" + ",cat3:{ allBuckets:{count:8}, buckets:[ {val:A, count:4, xy:{buckets:[{count:3, val:X, f:23.0}], allBuckets:{count:4, f:4.0}}}] }" + "}");
// test filling in missing numBuckets
client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " cat :{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false, numBuckets:true, facet:{ er:{${terms} type:terms, field:${er_s}, limit:1, overrequest:0, numBuckets:true, refine:false} } }" + ", cat2:{${terms} type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true , numBuckets:true, facet:{ er:{${terms} type:terms, field:${er_s}, limit:1, overrequest:0, numBuckets:true, refine:true } } }" + "}"), "facets=={ count:8" + // the "R" bucket will not be seen w/o refinement
", cat:{ numBuckets:2, buckets:[ {val:A, count:3, er:{numBuckets:1,buckets:[{count:2, val:E}] }}] }" + ",cat2:{ numBuckets:2, buckets:[ {val:A, count:4, er:{numBuckets:2,buckets:[{count:2, val:E}] }}] }" + "}");
final String sort_limit_over = "sort:'count desc', limit:1, overrequest:0, ";
// simplistic join domain testing: no refinement == low count
client.testJQ(params(// query only matches one doc per shard
p, // query only matches one doc per shard
"q", // query only matches one doc per shard
"${xy_s}:Y", "json.facet", "{" + " cat0:{${terms} type:terms, field:${cat_s}, " + sort_limit_over + " refine:false," + // self join on all_s ensures every doc on every shard included in facets
" domain: { join: { from:all_s, to:all_s } } }" + "}"), "/response/numFound==3", "facets=={ count:3, " + // (we don't see the A from the middle shard)
" cat0:{ buckets:[ {val:A,count:3} ] } }");
// simplistic join domain testing: refinement == correct count
client.testJQ(params(// query only matches one doc per shard
p, // query only matches one doc per shard
"q", // query only matches one doc per shard
"${xy_s}:Y", "json.facet", "{" + " cat0:{${terms} type:terms, field:${cat_s}, " + sort_limit_over + " refine:true," + // self join on all_s ensures every doc on every shard included in facets
" domain: { join: { from:all_s, to:all_s } } }" + "}"), "/response/numFound==3", "facets=={ count:3," + // w/o overrequest, we need refining to get the correct count for 'A'.
" cat0:{ buckets:[ {val:A,count:4} ] } }");
// contrived join domain + refinement (at second level) + testing
client.testJQ(params(// query only matches one doc per shard
p, // query only matches one doc per shard
"q", // query only matches one doc per shard
"${xy_s}:Y", "json.facet", "{" + // top level facet has a single term
" all:{${terms} type:terms, field:all_s, " + sort_limit_over + " refine:true, " + " facet:{ " + // subfacet will facet on cat after joining on all (so all docs should be included in subfacet)
" cat0:{${terms} type:terms, field:${cat_s}, " + sort_limit_over + " refine:true," + " domain: { join: { from:all_s, to:all_s } } } } }" + "}"), "/response/numFound==3", "facets=={ count:3," + // all 3 docs matching base query have same 'all' value in top facet
" all:{ buckets:[ { val:all, count:3, " + // sub facet has refinement, so count for 'A' should be correct
" cat0:{ buckets: [{val:A,count:4}] } } ] } }");
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class DatabaseMetaDataImpl method getDatabaseProductVersion.
@Override
public String getDatabaseProductVersion() throws SQLException {
// Returns the version for the first live node in the Solr cluster.
SolrQuery sysQuery = new SolrQuery();
sysQuery.setRequestHandler("/admin/info/system");
CloudSolrClient cloudSolrClient = this.connection.getClient();
Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
SolrClient solrClient = null;
for (String node : liveNodes) {
try {
String nodeURL = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(node);
solrClient = new Builder(nodeURL).build();
QueryResponse rsp = solrClient.query(sysQuery);
return String.valueOf(((SimpleOrderedMap) rsp.getResponse().get("lucene")).get("solr-spec-version"));
} catch (SolrServerException | IOException ignore) {
return "";
} finally {
if (solrClient != null) {
try {
solrClient.close();
} catch (IOException ignore) {
// Don't worry about failing to close the Solr client
}
}
}
}
// If no version found just return empty string
return "";
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method createJettys.
protected List<JettySolrRunner> createJettys(int numJettys) throws Exception {
List<JettySolrRunner> jettys = new ArrayList<>();
List<SolrClient> clients = new ArrayList<>();
StringBuilder sb = new StringBuilder();
if ("2".equals(getStateFormat())) {
log.info("Creating " + DEFAULT_COLLECTION + " with stateFormat=2");
SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT);
Overseer.getStateUpdateQueue(zkClient).offer(Utils.toJSON(Utils.makeMap(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(), "name", DEFAULT_COLLECTION, "numShards", String.valueOf(sliceCount), DocCollection.STATE_FORMAT, getStateFormat(), ZkStateReader.NRT_REPLICAS, useTlogReplicas() ? "0" : "1", ZkStateReader.TLOG_REPLICAS, useTlogReplicas() ? "1" : "0", ZkStateReader.PULL_REPLICAS, String.valueOf(getPullReplicaCount()))));
zkClient.close();
}
int numPullReplicas = getPullReplicaCount() * sliceCount;
for (int i = 1; i <= numJettys; i++) {
if (sb.length() > 0)
sb.append(',');
int cnt = this.jettyIntCntr.incrementAndGet();
File jettyDir = createTempDir("shard-" + i).toFile();
jettyDir.mkdirs();
setupJettySolrHome(jettyDir);
JettySolrRunner j;
if (numPullReplicas > 0) {
numPullReplicas--;
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.PULL);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty" + cnt) : null, null, "solrconfig.xml", null, Replica.Type.PULL);
} else if (useTlogReplicas()) {
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.TLOG);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty" + cnt) : null, null, "solrconfig.xml", null, Replica.Type.TLOG);
} else {
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.NRT);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty" + cnt) : null, null, "solrconfig.xml", null, null);
}
jettys.add(j);
SolrClient client = createNewSolrClient(j.getLocalPort());
clients.add(client);
}
this.jettys.addAll(jettys);
this.clients.addAll(clients);
int numReplicas = getTotalReplicas(DEFAULT_COLLECTION);
int expectedNumReplicas = numJettys;
// now wait until we see that the number of shards in the cluster state
// matches what we expect
int retries = 0;
while (numReplicas != expectedNumReplicas) {
numReplicas = getTotalReplicas(DEFAULT_COLLECTION);
if (numReplicas == expectedNumReplicas)
break;
if (retries++ == 60) {
printLayoutOnTearDown = true;
fail("Number of replicas in the state does not match what we set:" + numReplicas + " vs " + expectedNumReplicas);
}
Thread.sleep(500);
}
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
// make sure we have a leader for each shard
for (int i = 1; i <= sliceCount; i++) {
zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + i, 10000);
}
if (numReplicas > 0) {
updateMappingsFromZk(this.jettys, this.clients);
}
// build the shard string
for (int i = 1; i <= numJettys / 2; i++) {
JettySolrRunner j = this.jettys.get(i);
JettySolrRunner j2 = this.jettys.get(i + (numJettys / 2 - 1));
if (sb.length() > 0)
sb.append(',');
sb.append(buildUrl(j.getLocalPort()));
sb.append("|").append(buildUrl(j2.getLocalPort()));
}
shards = sb.toString();
return jettys;
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method checkShardConsistency.
/* Checks shard consistency and optionally checks against the control shard.
* The test will be failed if differences are found.
*/
protected void checkShardConsistency(boolean checkVsControl, boolean verbose, Set<String> addFails, Set<String> deleteFails) throws Exception {
updateMappingsFromZk(jettys, clients, true);
Set<String> theShards = shardToJetty.keySet();
String failMessage = null;
for (String shard : theShards) {
String shardFailMessage = checkShardConsistency(shard, false, verbose);
if (shardFailMessage != null && failMessage == null) {
failMessage = shardFailMessage;
}
}
if (failMessage != null) {
fail(failMessage);
}
if (!checkVsControl)
return;
// add a tag to aid in debugging via logs
SolrParams q = params("q", "*:*", "rows", "0", "tests", "checkShardConsistency(vsControl)");
SolrDocumentList controlDocList = controlClient.query(q).getResults();
long controlDocs = controlDocList.getNumFound();
SolrDocumentList cloudDocList = cloudClient.query(q).getResults();
long cloudClientDocs = cloudDocList.getNumFound();
// now check that the right # are on each shard
theShards = shardToJetty.keySet();
int cnt = 0;
for (String s : theShards) {
int times = shardToJetty.get(s).size();
for (int i = 0; i < times; i++) {
try {
CloudJettyRunner cjetty = shardToJetty.get(s).get(i);
ZkNodeProps props = cjetty.info;
SolrClient client = cjetty.client.solrClient;
boolean active = Replica.State.getState(props.getStr(ZkStateReader.STATE_PROP)) == Replica.State.ACTIVE;
if (active) {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
long results = client.query(query).getResults().getNumFound();
if (verbose)
System.err.println(new ZkCoreNodeProps(props).getCoreUrl() + " : " + results);
if (verbose)
System.err.println("shard:" + props.getStr(ZkStateReader.SHARD_ID_PROP));
cnt += results;
break;
}
} catch (Exception e) {
// if we have a problem, try the next one
if (i == times - 1) {
throw e;
}
}
}
}
if (controlDocs != cnt || cloudClientDocs != controlDocs) {
String msg = "document count mismatch. control=" + controlDocs + " sum(shards)=" + cnt + " cloudClient=" + cloudClientDocs;
log.error(msg);
boolean shouldFail = CloudInspectUtil.compareResults(controlClient, cloudClient, addFails, deleteFails);
if (shouldFail) {
fail(msg);
}
}
}
Aggregations