use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestDistributedSearch method queryRandomUpServer.
protected QueryResponse queryRandomUpServer(ModifiableSolrParams params, List<SolrClient> upClients) throws SolrServerException, IOException {
// query a random "up" server
int which = r.nextInt(upClients.size());
SolrClient client = upClients.get(which);
QueryResponse rsp = client.query(params);
return rsp;
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestDistributedSearch method queryPartialResults.
protected void queryPartialResults(final List<String> upShards, final List<SolrClient> upClients, Object... q) throws Exception {
final ModifiableSolrParams params = new ModifiableSolrParams();
for (int i = 0; i < q.length; i += 2) {
params.add(q[i].toString(), q[i + 1].toString());
}
// TODO: look into why passing true causes fails
params.set("distrib", "false");
final QueryResponse controlRsp = controlClient.query(params);
// if time.allowed is specified then even a control response can return a partialResults header
if (params.get(CommonParams.TIME_ALLOWED) == null) {
validateControlData(controlRsp);
}
params.remove("distrib");
setDistributedParams(params);
QueryResponse rsp = queryRandomUpServer(params, upClients);
comparePartialResponses(rsp, controlRsp, upShards);
if (stress > 0) {
log.info("starting stress...");
Thread[] threads = new Thread[nThreads];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
for (int j = 0; j < stress; j++) {
int which = r.nextInt(upClients.size());
SolrClient client = upClients.get(which);
try {
QueryResponse rsp = client.query(new ModifiableSolrParams(params));
if (verifyStress) {
comparePartialResponses(rsp, controlRsp, upShards);
}
} catch (SolrServerException | IOException e) {
throw new RuntimeException(e);
}
}
}
};
threads[i].start();
}
for (Thread thread : threads) {
thread.join();
}
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class PeerSyncTest method test.
@Test
@ShardsFixed(num = 3)
public void test() throws Exception {
Set<Integer> docsAdded = new LinkedHashSet<>();
handle.clear();
handle.put("timestamp", SKIPVAL);
handle.put("score", SKIPVAL);
handle.put("maxScore", SKIPVAL);
SolrClient client0 = clients.get(0);
SolrClient client1 = clients.get(1);
SolrClient client2 = clients.get(2);
long v = 0;
add(client0, seenLeader, sdoc("id", "1", "_version_", ++v));
// this fails because client0 has no context (i.e. no updates of its own to judge if applying the updates
// from client1 will bring it into sync with client1)
assertSync(client1, numVersions, false, shardsArr[0]);
// bring client1 back into sync with client0 by adding the doc
add(client1, seenLeader, sdoc("id", "1", "_version_", v));
// both have the same version list, so sync should now return true
assertSync(client1, numVersions, true, shardsArr[0]);
// TODO: test that updates weren't necessary
client0.commit();
client1.commit();
queryAndCompare(params("q", "*:*"), client0, client1);
add(client0, seenLeader, addRandFields(sdoc("id", "2", "_version_", ++v)));
// now client1 has the context to sync
assertSync(client1, numVersions, true, shardsArr[0]);
client0.commit();
client1.commit();
queryAndCompare(params("q", "*:*"), client0, client1);
add(client0, seenLeader, addRandFields(sdoc("id", "3", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "4", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "5", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "6", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "7", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "8", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "9", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "10", "_version_", ++v)));
for (int i = 0; i < 10; i++) docsAdded.add(i + 1);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
int toAdd = (int) (numVersions * .95);
for (int i = 0; i < toAdd; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString(i + 11), "_version_", v + i + 1));
docsAdded.add(i + 11);
}
// sync should fail since there's not enough overlap to give us confidence
assertSync(client1, numVersions, false, shardsArr[0]);
// add some of the docs that were missing... just enough to give enough overlap
int toAdd2 = (int) (numVersions * .25);
for (int i = 0; i < toAdd2; i++) {
add(client1, seenLeader, sdoc("id", Integer.toString(i + 11), "_version_", v + i + 1));
}
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// test delete and deleteByQuery
v = 1000;
SolrInputDocument doc = sdoc("id", "1000", "_version_", ++v);
add(client0, seenLeader, doc);
add(client0, seenLeader, sdoc("id", "1001", "_version_", ++v));
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:1001 OR id:1002");
add(client0, seenLeader, sdoc("id", "1002", "_version_", ++v));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "1000");
// 1002 added
docsAdded.add(1002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// test that delete by query is returned even if not requested, and that it doesn't delete newer stuff than it should
v = 2000;
SolrClient client = client0;
add(client, seenLeader, sdoc("id", "2000", "_version_", ++v));
add(client, seenLeader, sdoc("id", "2001", "_version_", ++v));
delQ(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:2001 OR id:2002");
add(client, seenLeader, sdoc("id", "2002", "_version_", ++v));
del(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "2000");
// 2002 added
docsAdded.add(2002);
v = 2000;
client = client1;
add(client, seenLeader, sdoc("id", "2000", "_version_", ++v));
// pretend we missed the add of 2001. peersync should retrieve it, but should also retrieve any deleteByQuery objects after it
++v;
// add(client, seenLeader, sdoc("id","2001","_version_",++v));
delQ(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:2001 OR id:2002");
add(client, seenLeader, sdoc("id", "2002", "_version_", ++v));
del(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "2000");
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
//
// Test that handling reorders work when applying docs retrieved from peer
//
// this should cause us to retrieve the delete (but not the following add)
// the reorder in application shouldn't affect anything
add(client0, seenLeader, sdoc("id", "3000", "_version_", 3001));
add(client1, seenLeader, sdoc("id", "3000", "_version_", 3001));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3000"), "3000");
docsAdded.add(3000);
// this should cause us to retrieve an add tha was previously deleted
add(client0, seenLeader, sdoc("id", "3001", "_version_", 3003));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3001"), "3004");
del(client1, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3001"), "3004");
// this should cause us to retrieve an older add that was overwritten
add(client0, seenLeader, sdoc("id", "3002", "_version_", 3004));
add(client0, seenLeader, sdoc("id", "3002", "_version_", 3005));
add(client1, seenLeader, sdoc("id", "3002", "_version_", 3005));
// 3001 added
docsAdded.add(3001);
// 3002 added
docsAdded.add(3002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// now lets check fingerprinting causes appropriate fails
v = 4000;
add(client0, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v));
docsAdded.add(4000);
toAdd = numVersions + 10;
for (int i = 0; i < toAdd; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
add(client1, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
docsAdded.add((int) v + i + 1);
}
// client0 now has an additional add beyond our window and the fingerprint should cause this to fail
assertSync(client1, numVersions, false, shardsArr[0]);
// if we turn of fingerprinting, it should succeed
System.setProperty("solr.disableFingerprint", "true");
try {
assertSync(client1, numVersions, true, shardsArr[0]);
} finally {
System.clearProperty("solr.disableFingerprint");
}
// lets add the missing document and verify that order doesn't matter
add(client1, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v));
assertSync(client1, numVersions, true, shardsArr[0]);
// lets do some overwrites to ensure that repeated updates and maxDoc don't matter
for (int i = 0; i < 10; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
}
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// lets add some in-place updates
// full update
add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000));
docsAdded.add(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(0, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
ModifiableSolrParams inPlaceParams = new ModifiableSolrParams(seenLeader);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5000");
// in-place update
add(client0, inPlaceParams, sdoc("id", "5000", "val_i_dvo", 1, "_version_", 5001));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(1, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
// interleave the in-place updates with a few deletes to other documents
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5002"), 4001);
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5003"), "id:4002");
docsAdded.remove(4001);
docsAdded.remove(4002);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5001");
// in-place update
add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 2, "_version_", 5004));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(2, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
// a DBQ with value
// current val is 2, so this should not delete anything
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5005"), "val_i_dvo:1");
assertSync(client1, numVersions, true, shardsArr[0]);
// full update
add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000));
docsAdded.add(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5004");
add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 3, "_version_", 5006));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(3, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
validateDocs(docsAdded, client0, client1);
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5007"), 5000);
docsAdded.remove(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// if doc with id=6000 is deleted, further in-place-updates should fail
// full update
add(client0, seenLeader, sdoc("id", "6000", "val_i_dvo", 6, "title", "mytitle", "_version_", 6000));
// current val is 6000, this will delete id=6000
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "6004"), "val_i_dvo:6");
assertSync(client1, numVersions, true, shardsArr[0]);
SolrException ex = expectThrows(SolrException.class, () -> {
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "6000");
add(client0, inPlaceParams, sdoc("id", 6000, "val_i_dvo", 6003, "_version_", 5007));
});
assertEquals(ex.toString(), SolrException.ErrorCode.SERVER_ERROR.code, ex.code());
assertThat(ex.getMessage(), containsString("Can't find document with id=6000"));
// Reordered DBQ with Child-nodes (SOLR-10114)
docsAdded.clear();
// Reordered full delete should not delete child-docs
// add with later version
add(client0, seenLeader, sdocWithChildren(7001, "7001", 2));
docsAdded.add(7001);
docsAdded.add(7001001);
docsAdded.add(7001002);
// reordered delete
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "7000"), "id:*");
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// Reordered DBQ should not affect update
// add with later version
add(client0, seenLeader, sdocWithChildren(8000, "8000", 5));
// not found, arrives earlier
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "8002"), "id:8500");
// update with two childs
add(client0, seenLeader, sdocWithChildren(8000, "8001", 2));
docsAdded.add(8000);
docsAdded.add(8000001);
docsAdded.add(8000002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class CloudSolrClientTest method stateVersionParamTest.
@Test
public void stateVersionParamTest() throws Exception {
DocCollection coll = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
Replica r = coll.getSlices().iterator().next().getReplicas().iterator().next();
SolrQuery q = new SolrQuery().setQuery("*:*");
HttpSolrClient.RemoteSolrException sse = null;
final String url = r.getStr(ZkStateReader.BASE_URL_PROP) + "/" + COLLECTION;
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
log.info("should work query, result {}", solrClient.query(q));
//no problem
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + coll.getZNodeVersion());
log.info("2nd query , result {}", solrClient.query(q));
//no error yet good
//an older version expect error
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + (coll.getZNodeVersion() - 1));
QueryResponse rsp = solrClient.query(q);
Map m = (Map) rsp.getResponse().get(CloudSolrClient.STATE_VERSION, rsp.getResponse().size() - 1);
assertNotNull("Expected an extra information from server with the list of invalid collection states", m);
assertNotNull(m.get(COLLECTION));
}
//now send the request to another node that does not serve the collection
Set<String> allNodesOfColl = new HashSet<>();
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas()) {
allNodesOfColl.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
}
}
String theNode = null;
Set<String> liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes();
for (String s : liveNodes) {
String n = cluster.getSolrClient().getZkStateReader().getBaseUrlForNodeName(s);
if (!allNodesOfColl.contains(n)) {
theNode = n;
break;
}
}
log.info("the node which does not serve this collection{} ", theNode);
assertNotNull(theNode);
final String solrClientUrl = theNode + "/" + COLLECTION;
try (SolrClient solrClient = getHttpSolrClient(solrClientUrl)) {
q.setParam(CloudSolrClient.STATE_VERSION, COLLECTION + ":" + (coll.getZNodeVersion() - 1));
try {
QueryResponse rsp = solrClient.query(q);
log.info("error was expected");
} catch (HttpSolrClient.RemoteSolrException e) {
sse = e;
}
assertNotNull(sse);
assertEquals(" Error code should be 510", SolrException.ErrorCode.INVALID_STATE.code, sse.code());
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestSolrProperties method testProperties.
@Test
public void testProperties() throws Exception {
UpdateRequest up = new UpdateRequest();
up.setAction(ACTION.COMMIT, true, true);
up.deleteByQuery("*:*");
up.process(getSolrCore0());
up.process(getSolrCore1());
up.clear();
// Add something to each core
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "AAA");
doc.setField("core0", "yup stopfra stopfrb stopena stopenb");
// Add to core0
up.add(doc);
up.process(getSolrCore0());
SolrTestCaseJ4.ignoreException("unknown field");
// You can't add it to core1
try {
up.process(getSolrCore1());
fail("Can't add core0 field to core1!");
} catch (Exception ex) {
}
// Add to core1
doc.setField("id", "BBB");
doc.setField("core1", "yup stopfra stopfrb stopena stopenb");
doc.removeField("core0");
up.add(doc);
up.process(getSolrCore1());
// You can't add it to core1
try {
SolrTestCaseJ4.ignoreException("core0");
up.process(getSolrCore0());
fail("Can't add core1 field to core0!");
} catch (Exception ex) {
}
SolrTestCaseJ4.resetExceptionIgnores();
// now Make sure AAA is in 0 and BBB in 1
SolrQuery q = new SolrQuery();
QueryRequest r = new QueryRequest(q);
q.setQuery("id:AAA");
assertEquals(1, r.process(getSolrCore0()).getResults().size());
assertEquals(0, r.process(getSolrCore1()).getResults().size());
// Now test Changing the default core
assertEquals(1, getSolrCore0().query(new SolrQuery("id:AAA")).getResults().size());
assertEquals(0, getSolrCore0().query(new SolrQuery("id:BBB")).getResults().size());
assertEquals(0, getSolrCore1().query(new SolrQuery("id:AAA")).getResults().size());
assertEquals(1, getSolrCore1().query(new SolrQuery("id:BBB")).getResults().size());
// Now test reloading it should have a newer open time
String name = "core0";
SolrClient coreadmin = getSolrAdmin();
CoreAdminResponse mcr = CoreAdminRequest.getStatus(name, coreadmin);
long before = mcr.getStartTime(name).getTime();
CoreAdminRequest.reloadCore(name, coreadmin);
mcr = CoreAdminRequest.getStatus(name, coreadmin);
long after = mcr.getStartTime(name).getTime();
assertTrue("should have more recent time: " + after + "," + before, after > before);
}
Aggregations