use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class BasicDistributedZk2Test method brindDownShardIndexSomeDocsAndRecover.
private void brindDownShardIndexSomeDocsAndRecover() throws Exception {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
commit();
long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient.query(query).getResults().getNumFound();
query("q", "*:*", "sort", "n_tl1 desc");
int oldLiveNodes = cloudClient.getZkStateReader().getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true).size();
assertEquals(5, oldLiveNodes);
// kill a shard
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
// ensure shard is dead
try {
index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1, "specific doc!");
fail("This server should be down and this update should have failed");
} catch (SolrServerException e) {
// expected..
}
commit();
query("q", "*:*", "sort", "n_tl1 desc");
// long cloudClientDocs = cloudClient.query(new
// SolrQuery("*:*")).getResults().getNumFound();
// System.out.println("clouddocs:" + cloudClientDocs);
// try to index to a living shard at shard2
long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 60000);
index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1, "specific doc!");
commit();
checkShardConsistency(true, false);
query("q", "*:*", "sort", "n_tl1 desc");
cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
long numFound2 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(numFound1 + 1, numFound2);
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", 1001);
controlClient.add(doc);
// try adding a doc with CloudSolrServer
UpdateRequest ureq = new UpdateRequest();
ureq.add(doc);
try {
ureq.process(cloudClient);
} catch (SolrServerException e) {
// try again
Thread.sleep(3500);
ureq.process(cloudClient);
}
commit();
query("q", "*:*", "sort", "n_tl1 desc");
long numFound3 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
// lets just check that the one doc since last commit made it in...
assertEquals(numFound2 + 1, numFound3);
// test debugging
testDebugQueries();
if (VERBOSE) {
System.err.println(controlClient.query(new SolrQuery("*:*")).getResults().getNumFound());
for (SolrClient client : clients) {
try {
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
System.err.println(client.query(q).getResults().getNumFound());
} catch (Exception e) {
}
}
}
// TODO: This test currently fails because debug info is obtained only
// on shards with matches.
// query("q","matchesnothing","fl","*,score", "debugQuery", "true");
// this should trigger a recovery phase on deadShard
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForRecoveriesToFinish(false);
deadShardCount = shardToJetty.get(SHARD1).get(0).client.solrClient.query(query).getResults().getNumFound();
// if we properly recovered, we should now have the couple missing docs that
// came in while shard was down
checkShardConsistency(true, false);
// recover over 100 docs so we do more than just peer sync (replicate recovery)
chaosMonkey.stopJetty(deadShard);
for (int i = 0; i < 226; i++) {
doc = new SolrInputDocument();
doc.addField("id", 2000 + i);
controlClient.add(doc);
ureq = new UpdateRequest();
ureq.add(doc);
// ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
ureq.process(cloudClient);
}
commit();
Thread.sleep(1500);
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForThingsToLevelOut(60);
Thread.sleep(500);
waitForRecoveriesToFinish(false);
checkShardConsistency(true, false);
// try a backup command
try (final HttpSolrClient client = getHttpSolrClient((String) shardToJetty.get(SHARD2).get(0).info.get("base_url"))) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", ReplicationHandler.PATH);
params.set("command", "backup");
Path location = createTempDir();
location = FilterPath.unwrap(location).toRealPath();
params.set("location", location.toString());
QueryRequest request = new QueryRequest(params);
client.request(request, DEFAULT_TEST_COLLECTION_NAME);
checkForBackupSuccess(client, location);
client.close();
}
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class CdcrBootstrapTest method testConvertClusterToCdcrAndBootstrap.
/**
* Starts a source cluster with no CDCR configuration, indexes enough documents such that
* the at least one old tlog is closed and thrown away so that the source cluster does not have
* all updates available in tlogs only.
* <p>
* Then we start a target cluster with CDCR configuration and we change the source cluster configuration
* to use CDCR (i.e. CdcrUpdateLog, CdcrRequestHandler and CdcrUpdateProcessor) and restart it.
* <p>
* We test that all updates eventually make it to the target cluster and that the collectioncheckpoint
* call returns the same version as the last update indexed on the source.
*/
@Test
public void testConvertClusterToCdcrAndBootstrap() throws Exception {
// start the target first so that we know its zkhost
MiniSolrCloudCluster target = new MiniSolrCloudCluster(1, createTempDir("cdcr-target"), buildJettyConfig("/solr"));
try {
target.waitForAllNodes(30);
System.out.println("Target zkHost = " + target.getZkServer().getZkAddress());
System.setProperty("cdcr.target.zkHost", target.getZkServer().getZkAddress());
// start a cluster with no cdcr
MiniSolrCloudCluster source = new MiniSolrCloudCluster(1, createTempDir("cdcr-source"), buildJettyConfig("/solr"));
try {
source.waitForAllNodes(30);
source.uploadConfigSet(configset("cdcr-source-disabled"), "cdcr-source");
// create a collection with the cdcr-source-disabled configset
CollectionAdminRequest.createCollection("cdcr-source", "cdcr-source", 1, 1).withProperty("solr.directoryFactory", "solr.StandardDirectoryFactory").process(source.getSolrClient());
CloudSolrClient sourceSolrClient = source.getSolrClient();
sourceSolrClient.setDefaultCollection("cdcr-source");
int docs = (TEST_NIGHTLY ? 100 : 10);
int numDocs = 0;
for (int k = 0; k < docs; k++) {
UpdateRequest req = new UpdateRequest();
for (; numDocs < (k + 1) * 100; numDocs++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "source_" + numDocs);
doc.addField("xyz", numDocs);
req.add(doc);
}
req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
System.out.println("Adding " + docs + " docs with commit=true, numDocs=" + numDocs);
req.process(sourceSolrClient);
}
QueryResponse response = sourceSolrClient.query(new SolrQuery("*:*"));
assertEquals("", numDocs, response.getResults().getNumFound());
// lets find and keep the maximum version assigned by source cluster across all our updates
long maxVersion = Long.MIN_VALUE;
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CommonParams.QT, "/get");
params.set("getVersions", numDocs);
response = sourceSolrClient.query(params);
List<Long> versions = (List<Long>) response.getResponse().get("versions");
for (Long version : versions) {
maxVersion = Math.max(maxVersion, version);
}
// upload the cdcr-enabled config and restart source cluster
source.uploadConfigSet(configset("cdcr-source"), "cdcr-source");
JettySolrRunner runner = source.stopJettySolrRunner(0);
source.startJettySolrRunner(runner);
assertTrue(runner.isRunning());
AbstractDistribZkTestBase.waitForRecoveriesToFinish("cdcr-source", source.getSolrClient().getZkStateReader(), true, true, 330);
response = sourceSolrClient.query(new SolrQuery("*:*"));
assertEquals("Document mismatch on source after restart", numDocs, response.getResults().getNumFound());
// setup the target cluster
target.uploadConfigSet(configset("cdcr-target"), "cdcr-target");
CollectionAdminRequest.createCollection("cdcr-target", "cdcr-target", 1, 1).process(target.getSolrClient());
CloudSolrClient targetSolrClient = target.getSolrClient();
targetSolrClient.setDefaultCollection("cdcr-target");
Thread.sleep(1000);
cdcrStart(targetSolrClient);
cdcrStart(sourceSolrClient);
response = getCdcrQueue(sourceSolrClient);
System.out.println("Cdcr queue response: " + response.getResponse());
long foundDocs = waitForTargetToSync(numDocs, targetSolrClient);
assertEquals("Document mismatch on target after sync", numDocs, foundDocs);
params = new ModifiableSolrParams();
params.set(CommonParams.ACTION, CdcrParams.CdcrAction.COLLECTIONCHECKPOINT.toString());
params.set(CommonParams.QT, "/cdcr");
response = targetSolrClient.query(params);
Long checkpoint = (Long) response.getResponse().get(CdcrParams.CHECKPOINT);
assertNotNull(checkpoint);
assertEquals("COLLECTIONCHECKPOINT from target cluster should have returned the maximum " + "version across all updates made to source", maxVersion, checkpoint.longValue());
} finally {
source.shutdown();
}
} finally {
target.shutdown();
}
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class CdcrBootstrapTest method waitForTargetToSync.
private long waitForTargetToSync(int numDocs, CloudSolrClient targetSolrClient) throws SolrServerException, IOException, InterruptedException {
long start = System.nanoTime();
QueryResponse response = null;
while (System.nanoTime() - start <= TimeUnit.NANOSECONDS.convert(120, TimeUnit.SECONDS)) {
try {
targetSolrClient.commit();
response = targetSolrClient.query(new SolrQuery("*:*"));
if (response.getResults().getNumFound() == numDocs) {
break;
}
} catch (Exception e) {
log.warn("Exception trying to commit on target. This is expected and safe to ignore.", e);
}
Thread.sleep(1000);
}
return response != null ? response.getResults().getNumFound() : 0;
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class BasicDistributedZkTest method testANewCollectionInOneInstanceWithManualShardAssignement.
private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
System.clearProperty("numShards");
List<SolrClient> collectionClients = new ArrayList<>();
SolrClient client = clients.get(0);
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(0, ((HttpSolrClient) client).getBaseURL().length() - DEFAULT_COLLECTION.length() - 1);
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 1, "slice1");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 2, "slice2");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 3, "slice2");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 4, "slice1");
while (pending != null && pending.size() > 0) {
Future<Object> future = completionService.take();
pending.remove(future);
}
SolrClient client1 = collectionClients.get(0);
SolrClient client2 = collectionClients.get(1);
SolrClient client3 = collectionClients.get(2);
SolrClient client4 = collectionClients.get(3);
// no one should be recovering
waitForRecoveriesToFinish(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader(), false, true);
assertAllActive(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader());
//printLayout();
// TODO: enable when we don't falsely get slice1...
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice1", 30000);
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice2", 30000);
client2.add(getDoc(id, "1"));
client3.add(getDoc(id, "2"));
client4.add(getDoc(id, "3"));
client1.commit();
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
long oneDocs = client1.query(query).getResults().getNumFound();
long twoDocs = client2.query(query).getResults().getNumFound();
long threeDocs = client3.query(query).getResults().getNumFound();
long fourDocs = client4.query(query).getResults().getNumFound();
query.set("collection", oneInstanceCollection2);
query.set("distrib", true);
long allDocs = getCommonCloudSolrClient().query(query).getResults().getNumFound();
// System.out.println("1:" + oneDocs);
// System.out.println("2:" + twoDocs);
// System.out.println("3:" + threeDocs);
// System.out.println("4:" + fourDocs);
// System.out.println("All Docs:" + allDocs);
// assertEquals(oneDocs, threeDocs);
// assertEquals(twoDocs, fourDocs);
// assertNotSame(oneDocs, twoDocs);
assertEquals(3, allDocs);
// we added a role of none on these creates - check for it
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
zkStateReader.forceUpdateCollection(oneInstanceCollection2);
Map<String, Slice> slices = zkStateReader.getClusterState().getSlicesMap(oneInstanceCollection2);
assertNotNull(slices);
String roles = slices.get("slice1").getReplicasMap().values().iterator().next().getStr(ZkStateReader.ROLES_PROP);
assertEquals("none", roles);
ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "slice1"));
// now test that unloading a core gets us a new leader
try (HttpSolrClient unloadClient = getHttpSolrClient(baseUrl)) {
unloadClient.setConnectionTimeout(15000);
unloadClient.setSoTimeout(60000);
Unload unloadCmd = new Unload(true);
unloadCmd.setCoreName(props.getCoreName());
String leader = props.getCoreUrl();
unloadClient.request(unloadCmd);
int tries = 50;
while (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "slice1", 10000))) {
Thread.sleep(100);
if (tries-- == 0) {
fail("Leader never changed");
}
}
}
IOUtils.close(collectionClients);
}
use of org.apache.solr.client.solrj.SolrQuery in project lucene-solr by apache.
the class TestSolrEntityProcessorUnit method testCursorQuery.
public void testCursorQuery() {
SolrEntityProcessor processor = new NoNextMockProcessor();
HashMap<String, String> entityAttrs = new HashMap<String, String>() {
{
put(SolrEntityProcessor.SOLR_SERVER, "http://route:66/no");
put(CursorMarkParams.CURSOR_MARK_PARAM, "true");
}
};
processor.init(getContext(null, null, null, null, Collections.emptyList(), entityAttrs));
try {
processor.buildIterator();
SolrQuery query = new SolrQuery();
((SolrDocumentListIterator) processor.rowIterator).passNextPage(query);
assertNull(query.get(CommonParams.START));
assertEquals(CursorMarkParams.CURSOR_MARK_START, query.get(CursorMarkParams.CURSOR_MARK_PARAM));
assertNull(query.get(CommonParams.TIME_ALLOWED));
} finally {
processor.destroy();
}
}
Aggregations