Search in sources :

Example 26 with TimeOut

use of org.apache.solr.util.TimeOut in project lucene-solr by apache.

the class AbstractFullDistribZkTestBase method waitForNon403or404or503.

public static void waitForNon403or404or503(HttpSolrClient collectionClient) throws Exception {
    SolrException exp = null;
    final TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
    while (!timeout.hasTimedOut()) {
        boolean missing = false;
        try {
            collectionClient.query(new SolrQuery("*:*"));
        } catch (SolrException e) {
            if (!(e.code() == 403 || e.code() == 503 || e.code() == 404)) {
                throw e;
            }
            exp = e;
            missing = true;
        }
        if (!missing) {
            return;
        }
        Thread.sleep(50);
    }
    fail("Could not find the new collection - " + exp.code() + " : " + collectionClient.getBaseURL());
}
Also used : TimeOut(org.apache.solr.util.TimeOut) SolrException(org.apache.solr.common.SolrException) SolrQuery(org.apache.solr.client.solrj.SolrQuery)

Example 27 with TimeOut

use of org.apache.solr.util.TimeOut in project lucene-solr by apache.

the class ChaosMonkey method wait.

/**
   * You can call this method to wait while the ChaosMonkey is running, it waits approximately the specified time, and periodically
   * logs the status of the collection
   * @param runLength The time in ms to wait
   * @param collectionName The main collection being used for the ChaosMonkey
   * @param zkStateReader current state reader
   */
public static void wait(long runLength, String collectionName, ZkStateReader zkStateReader) throws InterruptedException {
    TimeOut t = new TimeOut(runLength, TimeUnit.MILLISECONDS);
    while (!t.hasTimedOut()) {
        Thread.sleep(Math.min(1000, t.timeLeft(TimeUnit.MILLISECONDS)));
        logCollectionStateSummary(collectionName, zkStateReader);
    }
}
Also used : TimeOut(org.apache.solr.util.TimeOut)

Example 28 with TimeOut

use of org.apache.solr.util.TimeOut in project lucene-solr by apache.

the class AbstractFullDistribZkTestBase method getRequestStateAfterCompletion.

static RequestStatusState getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client) throws IOException, SolrServerException {
    RequestStatusState state = null;
    final TimeOut timeout = new TimeOut(waitForSeconds, TimeUnit.SECONDS);
    while (!timeout.hasTimedOut()) {
        state = getRequestState(requestId, client);
        if (state == RequestStatusState.COMPLETED || state == RequestStatusState.FAILED) {
            return state;
        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        }
    }
    return state;
}
Also used : RequestStatusState(org.apache.solr.client.solrj.response.RequestStatusState) TimeOut(org.apache.solr.util.TimeOut)

Example 29 with TimeOut

use of org.apache.solr.util.TimeOut in project lucene-solr by apache.

the class StressHdfsTest method createAndDeleteCollection.

private void createAndDeleteCollection() throws SolrServerException, IOException, Exception, KeeperException, InterruptedException, URISyntaxException {
    boolean overshard = random().nextBoolean();
    int rep;
    int nShards;
    int maxReplicasPerNode;
    if (overshard) {
        nShards = getShardCount() * 2;
        maxReplicasPerNode = 8;
        rep = 1;
    } else {
        nShards = getShardCount() / 2;
        maxReplicasPerNode = 1;
        rep = 2;
        if (nShards == 0)
            nShards = 1;
    }
    createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode);
    waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
    // data dirs should be in zk, SOLR-8913
    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
    Slice slice = clusterState.getSlice(DELETE_DATA_DIR_COLLECTION, "shard1");
    assertNotNull(clusterState.getSlices(DELETE_DATA_DIR_COLLECTION).toString(), slice);
    Collection<Replica> replicas = slice.getReplicas();
    for (Replica replica : replicas) {
        assertNotNull(replica.getProperties().toString(), replica.get("dataDir"));
        assertNotNull(replica.getProperties().toString(), replica.get("ulogDir"));
    }
    cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
    cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
    for (int i = 1; i < nShards + 1; i++) {
        cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
    }
    // collect the data dirs
    List<String> dataDirs = new ArrayList<>();
    int i = 0;
    for (SolrClient client : clients) {
        try (HttpSolrClient c = getHttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION)) {
            int docCnt = random().nextInt(1000) + 1;
            for (int j = 0; j < docCnt; j++) {
                c.add(getDoc("id", i++, "txt_t", "just some random text for a doc"));
            }
            if (random().nextBoolean()) {
                c.commit();
            } else {
                c.commit(true, true, true);
            }
            c.setConnectionTimeout(30000);
            NamedList<Object> response = c.query(new SolrQuery().setRequestHandler("/admin/system")).getResponse();
            NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
            String dataDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("data");
            dataDirs.add(dataDir);
        }
    }
    if (random().nextBoolean()) {
        cloudClient.deleteByQuery("*:*");
        cloudClient.commit();
        assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
    }
    cloudClient.commit();
    cloudClient.query(new SolrQuery("*:*"));
    // delete collection
    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set("action", CollectionAction.DELETE.toString());
    params.set("name", DELETE_DATA_DIR_COLLECTION);
    QueryRequest request = new QueryRequest(params);
    request.setPath("/admin/collections");
    cloudClient.request(request);
    final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
    while (cloudClient.getZkStateReader().getClusterState().hasCollection(DELETE_DATA_DIR_COLLECTION)) {
        if (timeout.hasTimedOut()) {
            throw new AssertionError("Timeout waiting to see removed collection leave clusterstate");
        }
        Thread.sleep(200);
    }
    // check that all dirs are gone
    for (String dataDir : dataDirs) {
        Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
        conf.setBoolean("fs.hdfs.impl.disable.cache", true);
        FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf);
        assertFalse("Data directory exists after collection removal : " + dataDir, fs.exists(new Path(dataDir)));
        fs.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ClusterState(org.apache.solr.common.cloud.ClusterState) QueryRequest(org.apache.solr.client.solrj.request.QueryRequest) Configuration(org.apache.hadoop.conf.Configuration) NamedList(org.apache.solr.common.util.NamedList) TimeOut(org.apache.solr.util.TimeOut) ArrayList(java.util.ArrayList) Replica(org.apache.solr.common.cloud.Replica) URI(java.net.URI) SolrQuery(org.apache.solr.client.solrj.SolrQuery) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) SolrClient(org.apache.solr.client.solrj.SolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) Slice(org.apache.solr.common.cloud.Slice) FileSystem(org.apache.hadoop.fs.FileSystem)

Example 30 with TimeOut

use of org.apache.solr.util.TimeOut in project lucene-solr by apache.

the class PeerSyncReplicationTest method waitTillNodesActive.

private void waitTillNodesActive() throws Exception {
    for (int i = 0; i < 60; i++) {
        Thread.sleep(3000);
        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
        ClusterState clusterState = zkStateReader.getClusterState();
        DocCollection collection1 = clusterState.getCollection("collection1");
        Slice slice = collection1.getSlice("shard1");
        Collection<Replica> replicas = slice.getReplicas();
        boolean allActive = true;
        Collection<String> nodesDownNames = nodesDown.stream().map(n -> n.coreNodeName).collect(Collectors.toList());
        Collection<Replica> replicasToCheck = replicas.stream().filter(r -> !nodesDownNames.contains(r.getName())).collect(Collectors.toList());
        for (Replica replica : replicasToCheck) {
            if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
                allActive = false;
                break;
            }
        }
        if (allActive) {
            return;
        }
    }
    printLayout();
    fail("timeout waiting to see all nodes active");
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) Arrays(java.util.Arrays) Slow(org.apache.lucene.util.LuceneTestCase.Slow) DocCollection(org.apache.solr.common.cloud.DocCollection) ClusterState(org.apache.solr.common.cloud.ClusterState) LoggerFactory(org.slf4j.LoggerFactory) ArrayList(java.util.ArrayList) Collections.singletonList(java.util.Collections.singletonList) HashSet(java.util.HashSet) SolrServerException(org.apache.solr.client.solrj.SolrServerException) Map(java.util.Map) Counter(com.codahale.metrics.Counter) RandomStringUtils(org.apache.commons.lang.RandomStringUtils) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) MetricRegistry(com.codahale.metrics.MetricRegistry) Slice(org.apache.solr.common.cloud.Slice) Logger(org.slf4j.Logger) Files(java.nio.file.Files) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) MethodHandles(java.lang.invoke.MethodHandles) Collection(java.util.Collection) Set(java.util.Set) Metric(com.codahale.metrics.Metric) IOException(java.io.IOException) Test(org.junit.Test) TimeOut(org.apache.solr.util.TimeOut) Collectors(java.util.stream.Collectors) Replica(org.apache.solr.common.cloud.Replica) BadApple(org.apache.lucene.util.LuceneTestCase.BadApple) List(java.util.List) Paths(java.nio.file.Paths) SolrQuery(org.apache.solr.client.solrj.SolrQuery) UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) LimitViolationAction(org.apache.solr.cloud.ZkTestServer.LimitViolationAction) Timer(com.codahale.metrics.Timer) SECONDS(java.util.concurrent.TimeUnit.SECONDS) SolrInputDocument(org.apache.solr.common.SolrInputDocument) ClusterState(org.apache.solr.common.cloud.ClusterState) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) Replica(org.apache.solr.common.cloud.Replica)

Aggregations

TimeOut (org.apache.solr.util.TimeOut)48 SolrException (org.apache.solr.common.SolrException)15 Slice (org.apache.solr.common.cloud.Slice)15 DocCollection (org.apache.solr.common.cloud.DocCollection)14 Replica (org.apache.solr.common.cloud.Replica)13 SolrQuery (org.apache.solr.client.solrj.SolrQuery)11 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)8 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)8 HashMap (java.util.HashMap)7 Test (org.junit.Test)7 IOException (java.io.IOException)6 ArrayList (java.util.ArrayList)6 SolrInputDocument (org.apache.solr.common.SolrInputDocument)6 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)6 NamedList (org.apache.solr.common.util.NamedList)6 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)5 Map (java.util.Map)4 SolrServerException (org.apache.solr.client.solrj.SolrServerException)4 Collections.singletonList (java.util.Collections.singletonList)3 HashSet (java.util.HashSet)3