use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method waitForNon403or404or503.
public static void waitForNon403or404or503(HttpSolrClient collectionClient) throws Exception {
SolrException exp = null;
final TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
while (!timeout.hasTimedOut()) {
boolean missing = false;
try {
collectionClient.query(new SolrQuery("*:*"));
} catch (SolrException e) {
if (!(e.code() == 403 || e.code() == 503 || e.code() == 404)) {
throw e;
}
exp = e;
missing = true;
}
if (!missing) {
return;
}
Thread.sleep(50);
}
fail("Could not find the new collection - " + exp.code() + " : " + collectionClient.getBaseURL());
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class ChaosMonkey method wait.
/**
* You can call this method to wait while the ChaosMonkey is running, it waits approximately the specified time, and periodically
* logs the status of the collection
* @param runLength The time in ms to wait
* @param collectionName The main collection being used for the ChaosMonkey
* @param zkStateReader current state reader
*/
public static void wait(long runLength, String collectionName, ZkStateReader zkStateReader) throws InterruptedException {
TimeOut t = new TimeOut(runLength, TimeUnit.MILLISECONDS);
while (!t.hasTimedOut()) {
Thread.sleep(Math.min(1000, t.timeLeft(TimeUnit.MILLISECONDS)));
logCollectionStateSummary(collectionName, zkStateReader);
}
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method getRequestStateAfterCompletion.
static RequestStatusState getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client) throws IOException, SolrServerException {
RequestStatusState state = null;
final TimeOut timeout = new TimeOut(waitForSeconds, TimeUnit.SECONDS);
while (!timeout.hasTimedOut()) {
state = getRequestState(requestId, client);
if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
return state;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
return state;
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class StressHdfsTest method createAndDeleteCollection.
private void createAndDeleteCollection() throws SolrServerException, IOException, Exception, KeeperException, InterruptedException, URISyntaxException {
boolean overshard = random().nextBoolean();
int rep;
int nShards;
int maxReplicasPerNode;
if (overshard) {
nShards = getShardCount() * 2;
maxReplicasPerNode = 8;
rep = 1;
} else {
nShards = getShardCount() / 2;
maxReplicasPerNode = 1;
rep = 2;
if (nShards == 0)
nShards = 1;
}
createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode);
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
// data dirs should be in zk, SOLR-8913
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Slice slice = clusterState.getSlice(DELETE_DATA_DIR_COLLECTION, "shard1");
assertNotNull(clusterState.getSlices(DELETE_DATA_DIR_COLLECTION).toString(), slice);
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
assertNotNull(replica.getProperties().toString(), replica.get("dataDir"));
assertNotNull(replica.getProperties().toString(), replica.get("ulogDir"));
}
cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
for (int i = 1; i < nShards + 1; i++) {
cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
}
// collect the data dirs
List<String> dataDirs = new ArrayList<>();
int i = 0;
for (SolrClient client : clients) {
try (HttpSolrClient c = getHttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION)) {
int docCnt = random().nextInt(1000) + 1;
for (int j = 0; j < docCnt; j++) {
c.add(getDoc("id", i++, "txt_t", "just some random text for a doc"));
}
if (random().nextBoolean()) {
c.commit();
} else {
c.commit(true, true, true);
}
c.setConnectionTimeout(30000);
NamedList<Object> response = c.query(new SolrQuery().setRequestHandler("/admin/system")).getResponse();
NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
String dataDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("data");
dataDirs.add(dataDir);
}
}
if (random().nextBoolean()) {
cloudClient.deleteByQuery("*:*");
cloudClient.commit();
assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
}
cloudClient.commit();
cloudClient.query(new SolrQuery("*:*"));
// delete collection
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.DELETE.toString());
params.set("name", DELETE_DATA_DIR_COLLECTION);
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
cloudClient.request(request);
final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
while (cloudClient.getZkStateReader().getClusterState().hasCollection(DELETE_DATA_DIR_COLLECTION)) {
if (timeout.hasTimedOut()) {
throw new AssertionError("Timeout waiting to see removed collection leave clusterstate");
}
Thread.sleep(200);
}
// check that all dirs are gone
for (String dataDir : dataDirs) {
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf);
assertFalse("Data directory exists after collection removal : " + dataDir, fs.exists(new Path(dataDir)));
fs.close();
}
}
use of org.apache.solr.util.TimeOut in project lucene-solr by apache.
the class PeerSyncReplicationTest method waitTillNodesActive.
private void waitTillNodesActive() throws Exception {
for (int i = 0; i < 60; i++) {
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection collection1 = clusterState.getCollection("collection1");
Slice slice = collection1.getSlice("shard1");
Collection<Replica> replicas = slice.getReplicas();
boolean allActive = true;
Collection<String> nodesDownNames = nodesDown.stream().map(n -> n.coreNodeName).collect(Collectors.toList());
Collection<Replica> replicasToCheck = replicas.stream().filter(r -> !nodesDownNames.contains(r.getName())).collect(Collectors.toList());
for (Replica replica : replicasToCheck) {
if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
allActive = false;
break;
}
}
if (allActive) {
return;
}
}
printLayout();
fail("timeout waiting to see all nodes active");
}
Aggregations