Search in sources :

Example 6 with ZkStateReader

use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.

the class HttpPartitionTest method testMinRf.

protected void testMinRf() throws Exception {
    // create a collection that has 1 shard and 3 replicas
    String testCollectionName = "collMinRf_1x3";
    createCollection(testCollectionName, 1, 3, 1);
    cloudClient.setDefaultCollection(testCollectionName);
    sendDoc(1, 2);
    List<Replica> notLeaders = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 3, maxWaitSecsToSeeAllActive);
    assertTrue("Expected 2 non-leader replicas for collection " + testCollectionName + " but found " + notLeaders.size() + "; clusterState: " + printClusterStateInfo(testCollectionName), notLeaders.size() == 2);
    assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 1);
    // Now introduce a network partition between the leader and 1 replica, so a minRf of 2 is still achieved
    SocketProxy proxy0 = getProxyForReplica(notLeaders.get(0));
    proxy0.close();
    // indexing during a partition
    int achievedRf = sendDoc(2, 2);
    assertEquals("Unexpected achieved replication factor", 2, achievedRf);
    Thread.sleep(sleepMsBeforeHealPartition);
    // Verify that the partitioned replica is DOWN
    ZkStateReader zkr = cloudClient.getZkStateReader();
    // force the state to be fresh
    zkr.forceUpdateCollection(testCollectionName);
    // force the state to be fresh
    ;
    ClusterState cs = zkr.getClusterState();
    Collection<Slice> slices = cs.getActiveSlices(testCollectionName);
    Slice slice = slices.iterator().next();
    Replica partitionedReplica = slice.getReplica(notLeaders.get(0).getName());
    assertEquals("The partitioned replica did not get marked down", Replica.State.DOWN.toString(), partitionedReplica.getStr(ZkStateReader.STATE_PROP));
    proxy0.reopen();
    notLeaders = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 3, maxWaitSecsToSeeAllActive);
    // Since minRf is achieved, we expect recovery, so we expect seeing 2 documents
    assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 2);
    // Now introduce a network partition between the leader and both of its replicas, so a minRf of 2 is NOT achieved
    proxy0 = getProxyForReplica(notLeaders.get(0));
    proxy0.close();
    SocketProxy proxy1 = getProxyForReplica(notLeaders.get(1));
    proxy1.close();
    achievedRf = sendDoc(3, 2);
    assertEquals("Unexpected achieved replication factor", 1, achievedRf);
    Thread.sleep(sleepMsBeforeHealPartition);
    // Verify that the partitioned replicas are NOT DOWN since minRf wasn't achieved
    ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 3, 1);
    proxy0.reopen();
    proxy1.reopen();
    notLeaders = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 3, maxWaitSecsToSeeAllActive);
    // Check that doc 3 is on the leader but not on the notLeaders
    Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
    try (HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName)) {
        assertDocExists(leaderSolr, testCollectionName, "3");
    }
    for (Replica notLeader : notLeaders) {
        try (HttpSolrClient notLeaderSolr = getHttpSolrClient(notLeader, testCollectionName)) {
            assertDocNotExists(notLeaderSolr, testCollectionName, "3");
        }
    }
    // Retry sending doc 3
    achievedRf = sendDoc(3, 2);
    assertEquals("Unexpected achieved replication factor", 3, achievedRf);
    // Now doc 3 should be on all replicas
    assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 3);
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) ClusterState(org.apache.solr.common.cloud.ClusterState) Slice(org.apache.solr.common.cloud.Slice) Replica(org.apache.solr.common.cloud.Replica)

Example 7 with ZkStateReader

use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.

the class ForceLeaderTest method unsetLeader.

protected void unsetLeader(String collection, String slice) throws Exception {
    DistributedQueue inQueue = Overseer.getStateUpdateQueue(cloudClient.getZkStateReader().getZkClient());
    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(), ZkStateReader.SHARD_ID_PROP, slice, ZkStateReader.COLLECTION_PROP, collection);
    inQueue.offer(Utils.toJSON(m));
    ClusterState clusterState = null;
    boolean transition = false;
    for (int counter = 10; counter > 0; counter--) {
        clusterState = zkStateReader.getClusterState();
        Replica newLeader = clusterState.getSlice(collection, slice).getLeader();
        if (newLeader == null) {
            transition = true;
            break;
        }
        Thread.sleep(1000);
    }
    if (!transition) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not unset replica leader" + ". Cluster state: " + printClusterStateInfo(collection));
    }
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) ClusterState(org.apache.solr.common.cloud.ClusterState) ZkNodeProps(org.apache.solr.common.cloud.ZkNodeProps) Replica(org.apache.solr.common.cloud.Replica) SolrException(org.apache.solr.common.SolrException)

Example 8 with ZkStateReader

use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.

the class TestMiniSolrCloudCluster method testStopAllStartAll.

@Test
public void testStopAllStartAll() throws Exception {
    final String collectionName = "testStopAllStartAllCollection";
    final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
    try {
        assertNotNull(miniCluster.getZkServer());
        List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
        assertEquals(NUM_SERVERS, jettys.size());
        for (JettySolrRunner jetty : jettys) {
            assertTrue(jetty.isRunning());
        }
        createCollection(miniCluster, collectionName, null, null, Boolean.TRUE, null);
        final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
        cloudSolrClient.setDefaultCollection(collectionName);
        final SolrQuery query = new SolrQuery("*:*");
        final SolrInputDocument doc = new SolrInputDocument();
        try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
            ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
            zkStateReader.createClusterStateWatchersAndUpdate();
            AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
            // modify collection
            final int numDocs = 1 + random().nextInt(10);
            for (int ii = 1; ii <= numDocs; ++ii) {
                doc.setField("id", "" + ii);
                cloudSolrClient.add(doc);
                if (ii * 2 == numDocs)
                    cloudSolrClient.commit();
            }
            cloudSolrClient.commit();
            // query collection
            {
                final QueryResponse rsp = cloudSolrClient.query(query);
                assertEquals(numDocs, rsp.getResults().getNumFound());
            }
            // the test itself
            zkStateReader.forceUpdateCollection(collectionName);
            final ClusterState clusterState = zkStateReader.getClusterState();
            final HashSet<Integer> leaderIndices = new HashSet<Integer>();
            final HashSet<Integer> followerIndices = new HashSet<Integer>();
            {
                final HashMap<String, Boolean> shardLeaderMap = new HashMap<String, Boolean>();
                for (final Slice slice : clusterState.getSlices(collectionName)) {
                    for (final Replica replica : slice.getReplicas()) {
                        shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
                    }
                    shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
                }
                for (int ii = 0; ii < jettys.size(); ++ii) {
                    final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
                    final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
                    final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
                    if (Boolean.TRUE.equals(isLeader)) {
                        leaderIndices.add(new Integer(ii));
                    } else if (Boolean.FALSE.equals(isLeader)) {
                        followerIndices.add(new Integer(ii));
                    }
                // else neither leader nor follower i.e. node without a replica (for our collection)
                }
            }
            final List<Integer> leaderIndicesList = new ArrayList<Integer>(leaderIndices);
            final List<Integer> followerIndicesList = new ArrayList<Integer>(followerIndices);
            // first stop the followers (in no particular order)
            Collections.shuffle(followerIndicesList, random());
            for (Integer ii : followerIndicesList) {
                if (!leaderIndices.contains(ii)) {
                    miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
                }
            }
            // then stop the leaders (again in no particular order)
            Collections.shuffle(leaderIndicesList, random());
            for (Integer ii : leaderIndicesList) {
                miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
            }
            // calculate restart order
            final List<Integer> restartIndicesList = new ArrayList<Integer>();
            Collections.shuffle(leaderIndicesList, random());
            restartIndicesList.addAll(leaderIndicesList);
            Collections.shuffle(followerIndicesList, random());
            restartIndicesList.addAll(followerIndicesList);
            if (random().nextBoolean())
                Collections.shuffle(restartIndicesList, random());
            // and then restart jettys in that order
            for (Integer ii : restartIndicesList) {
                final JettySolrRunner jetty = jettys.get(ii.intValue());
                if (!jetty.isRunning()) {
                    miniCluster.startJettySolrRunner(jetty);
                    assertTrue(jetty.isRunning());
                }
            }
            AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
            zkStateReader.forceUpdateCollection(collectionName);
            // re-query collection
            {
                final QueryResponse rsp = cloudSolrClient.query(query);
                assertEquals(numDocs, rsp.getResults().getNumFound());
            }
        }
    } finally {
        miniCluster.shutdown();
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) ArrayList(java.util.ArrayList) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient) Replica(org.apache.solr.common.cloud.Replica) SolrQuery(org.apache.solr.client.solrj.SolrQuery) URL(java.net.URL) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) SolrInputDocument(org.apache.solr.common.SolrInputDocument) Slice(org.apache.solr.common.cloud.Slice) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 9 with ZkStateReader

use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.

the class TestMiniSolrCloudCluster method testCollectionCreateSearchDelete.

@Test
public void testCollectionCreateSearchDelete() throws Exception {
    final String collectionName = "testcollection";
    MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
    final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
    try {
        assertNotNull(miniCluster.getZkServer());
        List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
        assertEquals(NUM_SERVERS, jettys.size());
        for (JettySolrRunner jetty : jettys) {
            assertTrue(jetty.isRunning());
        }
        // shut down a server
        log.info("#### Stopping a server");
        JettySolrRunner stoppedServer = miniCluster.stopJettySolrRunner(0);
        assertTrue(stoppedServer.isStopped());
        assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
        // create a server
        log.info("#### Starting a server");
        JettySolrRunner startedServer = miniCluster.startJettySolrRunner();
        assertTrue(startedServer.isRunning());
        assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
        // create collection
        log.info("#### Creating a collection");
        final String asyncId = (random().nextBoolean() ? null : "asyncId(" + collectionName + ".create)=" + random().nextInt());
        createCollection(miniCluster, collectionName, null, asyncId, null, null);
        ZkStateReader zkStateReader = miniCluster.getSolrClient().getZkStateReader();
        AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
        // modify/query collection
        log.info("#### updating a querying collection");
        cloudSolrClient.setDefaultCollection(collectionName);
        SolrInputDocument doc = new SolrInputDocument();
        doc.setField("id", "1");
        cloudSolrClient.add(doc);
        cloudSolrClient.commit();
        SolrQuery query = new SolrQuery();
        query.setQuery("*:*");
        QueryResponse rsp = cloudSolrClient.query(query);
        assertEquals(1, rsp.getResults().getNumFound());
        // remove a server not hosting any replicas
        zkStateReader.forceUpdateCollection(collectionName);
        ClusterState clusterState = zkStateReader.getClusterState();
        HashMap<String, JettySolrRunner> jettyMap = new HashMap<String, JettySolrRunner>();
        for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) {
            String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
            jettyMap.put(key, jetty);
        }
        Collection<Slice> slices = clusterState.getSlices(collectionName);
        // track the servers not host repliacs
        for (Slice slice : slices) {
            jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
            for (Replica replica : slice.getReplicas()) {
                jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
            }
        }
        assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
        log.info("#### Stopping a server");
        JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
        jettys = miniCluster.getJettySolrRunners();
        for (int i = 0; i < jettys.size(); ++i) {
            if (jettys.get(i).equals(jettyToStop)) {
                miniCluster.stopJettySolrRunner(i);
                assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
            }
        }
        // re-create a server (to restore original NUM_SERVERS count)
        log.info("#### Starting a server");
        startedServer = miniCluster.startJettySolrRunner(jettyToStop);
        assertTrue(startedServer.isRunning());
        assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
        CollectionAdminRequest.deleteCollection(collectionName).process(miniCluster.getSolrClient());
        // create it again
        String asyncId2 = (random().nextBoolean() ? null : "asyncId(" + collectionName + ".create)=" + random().nextInt());
        createCollection(miniCluster, collectionName, null, asyncId2, null, null);
        AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
        // check that there's no left-over state
        assertEquals(0, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
        cloudSolrClient.add(doc);
        cloudSolrClient.commit();
        assertEquals(1, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
    } finally {
        miniCluster.shutdown();
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) Replica(org.apache.solr.common.cloud.Replica) SolrQuery(org.apache.solr.client.solrj.SolrQuery) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) SolrInputDocument(org.apache.solr.common.SolrInputDocument) Slice(org.apache.solr.common.cloud.Slice) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) Test(org.junit.Test)

Example 10 with ZkStateReader

use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.

the class TestMiniSolrCloudClusterSSL method checkCreateCollection.

/**
   * Verify that we can create a collection that involves one replica per node using the
   * CloudSolrClient available for the cluster
   */
private static void checkCreateCollection(final MiniSolrCloudCluster cluster, final String collection) throws Exception {
    final CloudSolrClient cloudClient = cluster.getSolrClient();
    CollectionAdminRequest.createCollection(collection, CONF_NAME, NUM_SERVERS, 1).withProperty("config", "solrconfig-tlog.xml").process(cloudClient);
    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
    AbstractDistribZkTestBase.waitForRecoveriesToFinish(collection, zkStateReader, true, true, 330);
    assertEquals("sanity query", 0, cloudClient.query(collection, params("q", "*:*")).getStatus());
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient)

Aggregations

ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)129 ClusterState (org.apache.solr.common.cloud.ClusterState)54 Replica (org.apache.solr.common.cloud.Replica)47 Slice (org.apache.solr.common.cloud.Slice)47 HashMap (java.util.HashMap)33 SolrZkClient (org.apache.solr.common.cloud.SolrZkClient)33 ArrayList (java.util.ArrayList)32 DocCollection (org.apache.solr.common.cloud.DocCollection)30 Test (org.junit.Test)26 SolrException (org.apache.solr.common.SolrException)25 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)20 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)20 IOException (java.io.IOException)18 Map (java.util.Map)18 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)18 SolrQuery (org.apache.solr.client.solrj.SolrQuery)15 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)15 KeeperException (org.apache.zookeeper.KeeperException)14 SolrServerException (org.apache.solr.client.solrj.SolrServerException)12 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)12