use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class BasicDistributedZk2Test method brindDownShardIndexSomeDocsAndRecover.
private void brindDownShardIndexSomeDocsAndRecover() throws Exception {
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
commit();
long deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient.query(query).getResults().getNumFound();
query("q", "*:*", "sort", "n_tl1 desc");
int oldLiveNodes = cloudClient.getZkStateReader().getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true).size();
assertEquals(5, oldLiveNodes);
// kill a shard
CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
// ensure shard is dead
try {
index_specific(deadShard.client.solrClient, id, 999, i1, 107, t1, "specific doc!");
fail("This server should be down and this update should have failed");
} catch (SolrServerException e) {
// expected..
}
commit();
query("q", "*:*", "sort", "n_tl1 desc");
// long cloudClientDocs = cloudClient.query(new
// SolrQuery("*:*")).getResults().getNumFound();
// System.out.println("clouddocs:" + cloudClientDocs);
// try to index to a living shard at shard2
long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 60000);
index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1, "specific doc!");
commit();
checkShardConsistency(true, false);
query("q", "*:*", "sort", "n_tl1 desc");
cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
long numFound2 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(numFound1 + 1, numFound2);
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", 1001);
controlClient.add(doc);
// try adding a doc with CloudSolrServer
UpdateRequest ureq = new UpdateRequest();
ureq.add(doc);
try {
ureq.process(cloudClient);
} catch (SolrServerException e) {
// try again
Thread.sleep(3500);
ureq.process(cloudClient);
}
commit();
query("q", "*:*", "sort", "n_tl1 desc");
long numFound3 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
// lets just check that the one doc since last commit made it in...
assertEquals(numFound2 + 1, numFound3);
// test debugging
testDebugQueries();
if (VERBOSE) {
System.err.println(controlClient.query(new SolrQuery("*:*")).getResults().getNumFound());
for (SolrClient client : clients) {
try {
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
System.err.println(client.query(q).getResults().getNumFound());
} catch (Exception e) {
}
}
}
// TODO: This test currently fails because debug info is obtained only
// on shards with matches.
// query("q","matchesnothing","fl","*,score", "debugQuery", "true");
// this should trigger a recovery phase on deadShard
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForRecoveriesToFinish(false);
deadShardCount = shardToJetty.get(SHARD1).get(0).client.solrClient.query(query).getResults().getNumFound();
// if we properly recovered, we should now have the couple missing docs that
// came in while shard was down
checkShardConsistency(true, false);
// recover over 100 docs so we do more than just peer sync (replicate recovery)
chaosMonkey.stopJetty(deadShard);
for (int i = 0; i < 226; i++) {
doc = new SolrInputDocument();
doc.addField("id", 2000 + i);
controlClient.add(doc);
ureq = new UpdateRequest();
ureq.add(doc);
// ureq.setParam("update.chain", DISTRIB_UPDATE_CHAIN);
ureq.process(cloudClient);
}
commit();
Thread.sleep(1500);
ChaosMonkey.start(deadShard.jetty);
// make sure we have published we are recovering
Thread.sleep(1500);
waitForThingsToLevelOut(60);
Thread.sleep(500);
waitForRecoveriesToFinish(false);
checkShardConsistency(true, false);
// try a backup command
try (final HttpSolrClient client = getHttpSolrClient((String) shardToJetty.get(SHARD2).get(0).info.get("base_url"))) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", ReplicationHandler.PATH);
params.set("command", "backup");
Path location = createTempDir();
location = FilterPath.unwrap(location).toRealPath();
params.set("location", location.toString());
QueryRequest request = new QueryRequest(params);
client.request(request, DEFAULT_TEST_COLLECTION_NAME);
checkForBackupSuccess(client, location);
client.close();
}
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class CdcrBootstrapTest method waitForTargetToSync.
private long waitForTargetToSync(int numDocs, CloudSolrClient targetSolrClient) throws SolrServerException, IOException, InterruptedException {
long start = System.nanoTime();
QueryResponse response = null;
while (System.nanoTime() - start <= TimeUnit.NANOSECONDS.convert(120, TimeUnit.SECONDS)) {
try {
targetSolrClient.commit();
response = targetSolrClient.query(new SolrQuery("*:*"));
if (response.getResults().getNumFound() == numDocs) {
break;
}
} catch (Exception e) {
log.warn("Exception trying to commit on target. This is expected and safe to ignore.", e);
}
Thread.sleep(1000);
}
return response != null ? response.getResults().getNumFound() : 0;
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class BasicDistributedZkTest method createCores.
protected void createCores(final HttpSolrClient client, ThreadPoolExecutor executor, final String collection, final int numShards, int cnt) {
for (int i = 0; i < cnt; i++) {
final int freezeI = i;
executor.execute(() -> {
Create createCmd = new Create();
createCmd.setCoreName(collection + freezeI);
createCmd.setCollection(collection);
createCmd.setNumShards(numShards);
try {
String core3dataDir = createTempDir(collection).toFile().getAbsolutePath();
createCmd.setDataDir(getDataDir(core3dataDir));
client.request(createCmd);
} catch (SolrServerException | IOException e) {
throw new RuntimeException(e);
}
});
}
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class SolrSchema method getFieldInfo.
private Map<String, LukeResponse.FieldInfo> getFieldInfo(String collection) {
String zk = this.properties.getProperty("zk");
try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder().withZkHost(zk).build()) {
cloudSolrClient.connect();
LukeRequest lukeRequest = new LukeRequest();
lukeRequest.setNumTerms(0);
LukeResponse lukeResponse = lukeRequest.process(cloudSolrClient, collection);
return lukeResponse.getFieldInfo();
} catch (SolrServerException | IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class SolrCLI method getJson.
/**
* Utility function for sending HTTP GET request to Solr and then doing some
* validation of the response.
*/
@SuppressWarnings({ "unchecked" })
public static Map<String, Object> getJson(HttpClient httpClient, String getUrl) throws Exception {
try {
// ensure we're requesting JSON back from Solr
HttpGet httpGet = new HttpGet(new URIBuilder(getUrl).setParameter(CommonParams.WT, CommonParams.JSON).build());
// make the request and get back a parsed JSON object
Map<String, Object> json = httpClient.execute(httpGet, new SolrResponseHandler(), HttpClientUtil.createNewHttpClientRequestContext());
// check the response JSON from Solr to see if it is an error
Long statusCode = asLong("/responseHeader/status", json);
if (statusCode == -1) {
throw new SolrServerException("Unable to determine outcome of GET request to: " + getUrl + "! Response: " + json);
} else if (statusCode != 0) {
String errMsg = asString("/error/msg", json);
if (errMsg == null)
errMsg = String.valueOf(json);
throw new SolrServerException(errMsg);
} else {
// make sure no "failure" object in there either
Object failureObj = json.get("failure");
if (failureObj != null) {
if (failureObj instanceof Map) {
Object err = ((Map) failureObj).get("");
if (err != null)
throw new SolrServerException(err.toString());
}
throw new SolrServerException(failureObj.toString());
}
}
return json;
} catch (ClientProtocolException cpe) {
// Perhaps SolrClient should have thrown an exception itself??
if (cpe.getMessage().contains("HTTP ERROR 401") || cpe.getMessage().contentEquals("HTTP ERROR 403")) {
int code = cpe.getMessage().contains("HTTP ERROR 401") ? 401 : 403;
throw new SolrException(SolrException.ErrorCode.getErrorCode(code), "Solr requires authentication for " + getUrl + ". Please supply valid credentials. HTTP code=" + code);
} else {
throw cpe;
}
}
}
Aggregations