use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class HttpSolrCall method autoCreateSystemColl.
protected void autoCreateSystemColl(String corename) throws Exception {
if (core == null && SYSTEM_COLL.equals(corename) && "POST".equals(req.getMethod()) && !cores.getZkController().getClusterState().hasCollection(SYSTEM_COLL)) {
log.info("Going to auto-create .system collection");
SolrQueryResponse rsp = new SolrQueryResponse();
String repFactor = String.valueOf(Math.min(3, cores.getZkController().getClusterState().getLiveNodes().size()));
cores.getCollectionsHandler().handleRequestBody(new LocalSolrQueryRequest(null, new ModifiableSolrParams().add(ACTION, CREATE.toString()).add(NAME, SYSTEM_COLL).add(REPLICATION_FACTOR, repFactor)), rsp);
if (rsp.getValues().get("success") == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not auto-create .system collection: " + Utils.toJSONString(rsp.getValues()));
}
TimeOut timeOut = new TimeOut(3, TimeUnit.SECONDS);
for (; ; ) {
if (cores.getZkController().getClusterState().getCollectionOrNull(SYSTEM_COLL) != null) {
break;
} else {
if (timeOut.hasTimedOut()) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find .system collection even after 3 seconds");
}
Thread.sleep(50);
}
}
action = RETRY;
}
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class TransformerProvider method getTemplates.
/** Return a Templates object for the given filename */
private Templates getTemplates(ResourceLoader loader, String filename, int cacheLifetimeSeconds) throws IOException {
Templates result = null;
lastFilename = null;
try {
if (log.isDebugEnabled()) {
log.debug("compiling XSLT templates:" + filename);
}
final String fn = "xslt/" + filename;
final TransformerFactory tFactory = TransformerFactory.newInstance();
tFactory.setURIResolver(new SystemIdResolver(loader).asURIResolver());
tFactory.setErrorListener(xmllog);
final StreamSource src = new StreamSource(loader.openResource(fn), SystemIdResolver.createSystemIdFromResourceName(fn));
try {
result = tFactory.newTemplates(src);
} finally {
// some XML parsers are broken and don't close the byte stream (but they should according to spec)
IOUtils.closeQuietly(src.getInputStream());
}
} catch (Exception e) {
log.error(getClass().getName(), "newTemplates", e);
throw new IOException("Unable to initialize Templates '" + filename + "'", e);
}
lastFilename = filename;
lastTemplates = result;
cacheExpiresTimeout = new TimeOut(cacheLifetimeSeconds, TimeUnit.SECONDS);
return result;
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class AutoCommitTest method waitForNewSearcher.
boolean waitForNewSearcher(int timeoutMs) {
TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS);
while (!timeout.hasTimedOut()) {
if (triggered) {
// check if the new searcher has been registered yet
RefCounted<SolrIndexSearcher> registeredSearcherH = newSearcher.getCore().getSearcher();
SolrIndexSearcher registeredSearcher = registeredSearcherH.get();
registeredSearcherH.decref();
if (registeredSearcher == newSearcher)
return true;
// log.info("TEST: waiting for searcher " + newSearcher + " to be registered. current=" + registeredSearcher);
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
}
}
return false;
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class TestLBHttpSolrClient method waitForServer.
// wait maximum ms for serverName to come back up
private void waitForServer(int maxSeconds, LBHttpSolrClient client, int nServers, String serverName) throws Exception {
final TimeOut timeout = new TimeOut(maxSeconds, TimeUnit.SECONDS);
while (!timeout.hasTimedOut()) {
QueryResponse resp;
try {
resp = client.query(new SolrQuery("*:*"));
} catch (Exception e) {
log.warn("", e);
continue;
}
String name = resp.getResults().get(0).getFieldValue("name").toString();
if (name.equals(serverName))
return;
Thread.sleep(500);
}
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class PeerSyncReplicationTest method test.
@Test
public void test() throws Exception {
handle.clear();
handle.put("timestamp", SKIPVAL);
waitForThingsToLevelOut(30);
del("*:*");
// index enough docs and commit to establish frame of reference for PeerSync
for (int i = 0; i < 100; i++) {
indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
}
commit();
waitForThingsToLevelOut(30);
try {
checkShardConsistency(false, true);
long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(docId, cloudClientDocs);
CloudJettyRunner initialLeaderJetty = shardToLeaderJetty.get("shard1");
List<CloudJettyRunner> otherJetties = getOtherAvailableJetties(initialLeaderJetty);
CloudJettyRunner neverLeader = otherJetties.get(otherJetties.size() - 1);
otherJetties.remove(neverLeader);
// first shutdown a node that will never be a leader
forceNodeFailures(singletonList(neverLeader));
// node failure and recovery via PeerSync
log.info("Forcing PeerSync");
CloudJettyRunner nodePeerSynced = forceNodeFailureAndDoPeerSync(false);
// add a few more docs
indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
indexDoc(id, docId, i1, 50, tlong, 50, t1, "document number " + docId++);
commit();
cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
assertEquals(docId, cloudClientDocs);
// now shutdown all other nodes except for 'nodeShutDownForFailure'
otherJetties.remove(nodePeerSynced);
forceNodeFailures(otherJetties);
waitForThingsToLevelOut(30);
checkShardConsistency(false, true);
// now shutdown the original leader
log.info("Now shutting down initial leader");
forceNodeFailures(singletonList(initialLeaderJetty));
log.info("Updating mappings from zk");
waitForNewLeader(cloudClient, "shard1", (Replica) initialLeaderJetty.client.info, new TimeOut(15, SECONDS));
updateMappingsFromZk(jettys, clients, true);
assertEquals("PeerSynced node did not become leader", nodePeerSynced, shardToLeaderJetty.get("shard1"));
// bring up node that was down all along, and let it PeerSync from the node that was forced to PeerSynce
bringUpDeadNodeAndEnsureNoReplication(neverLeader, false);
waitTillNodesActive();
checkShardConsistency(false, true);
// bring back all the nodes including initial leader
// (commented as reports Maximum concurrent create/delete watches above limit violation and reports thread leaks)
/*for(int i = 0 ; i < nodesDown.size(); i++) {
bringUpDeadNodeAndEnsureNoReplication(shardToLeaderJetty.get("shard1"), neverLeader, false);
}
checkShardConsistency(false, true);*/
// make sure leader has not changed after bringing initial leader back
assertEquals(nodePeerSynced, shardToLeaderJetty.get("shard1"));
// assert metrics
MetricRegistry registry = nodePeerSynced.jetty.getCoreContainer().getMetricManager().registry("solr.core.collection1");
Map<String, Metric> metrics = registry.getMetrics();
assertTrue("REPLICATION.time present", metrics.containsKey("REPLICATION.time"));
assertTrue("REPLICATION.errors present", metrics.containsKey("REPLICATION.errors"));
Timer timer = (Timer) metrics.get("REPLICATION.time");
assertEquals(1L, timer.getCount());
Counter counter = (Counter) metrics.get("REPLICATION.errors");
assertEquals(0L, counter.getCount());
success = true;
} finally {
System.clearProperty("solr.disableFingerprint");
}
}
Aggregations