Search in sources :

Example 41 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class ShardSplitTest method splitByUniqueKeyTest.

private void splitByUniqueKeyTest() throws Exception {
    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
    Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
    DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
    List<DocRouter.Range> subRanges = new ArrayList<>();
    if (usually()) {
        List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
        // 75% of range goes to shard1_0 and the rest to shard1_1
        subRanges.add(new DocRouter.Range(ranges.get(0).min, ranges.get(2).max));
        subRanges.add(ranges.get(3));
    } else {
        subRanges = router.partitionRange(2, shard1Range);
    }
    final List<DocRouter.Range> ranges = subRanges;
    final int[] docCounts = new int[ranges.size()];
    int numReplicas = shard1.getReplicas().size();
    del("*:*");
    for (int id = 0; id <= 100; id++) {
        // See comment in ShardRoutingTest for hash distribution
        String shardKey = "" + (char) ('a' + (id % 26));
        indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id), id);
    }
    commit();
    Thread indexThread = new Thread() {

        @Override
        public void run() {
            Random random = random();
            int max = atLeast(random, 401);
            int sleep = atLeast(random, 25);
            log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms");
            Set<String> deleted = new HashSet<>();
            for (int id = 101; id < max; id++) {
                try {
                    indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
                    Thread.sleep(sleep);
                    if (usually(random)) {
                        String delId = String.valueOf(random.nextInt(id - 101 + 1) + 101);
                        if (deleted.contains(delId))
                            continue;
                        try {
                            deleteAndUpdateCount(router, ranges, docCounts, delId);
                            deleted.add(delId);
                        } catch (Exception e) {
                            log.error("Exception while deleting docs", e);
                        }
                    }
                } catch (Exception e) {
                    log.error("Exception while adding doc id = " + id, e);
                    // do not select this id for deletion ever
                    deleted.add(String.valueOf(id));
                }
            }
        }
    };
    indexThread.start();
    try {
        for (int i = 0; i < 3; i++) {
            try {
                splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
                log.info("Layout after split: \n");
                printLayout();
                break;
            } catch (HttpSolrClient.RemoteSolrException e) {
                if (e.code() != 500) {
                    throw e;
                }
                log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
                if (i == 2) {
                    fail("SPLITSHARD was not successful even after three tries");
                }
            }
        }
    } finally {
        try {
            indexThread.join();
        } catch (InterruptedException e) {
            log.error("Indexing thread interrupted", e);
        }
    }
    waitForRecoveriesToFinish(true);
    checkDocCountsAndShardStates(docCounts, numReplicas);
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) ArrayList(java.util.ArrayList) SolrServerException(org.apache.solr.client.solrj.SolrServerException) IOException(java.io.IOException) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) Random(java.util.Random) Slice(org.apache.solr.common.cloud.Slice) DocRouter(org.apache.solr.common.cloud.DocRouter) HashSet(java.util.HashSet)

Example 42 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class ShardSplitTest method checkSubShardConsistency.

protected void checkSubShardConsistency(String shard) throws SolrServerException, IOException {
    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
    query.set("distrib", false);
    ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
    Slice slice = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, shard);
    long[] numFound = new long[slice.getReplicasMap().size()];
    int c = 0;
    for (Replica replica : slice.getReplicas()) {
        String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
        QueryResponse response;
        try (HttpSolrClient client = getHttpSolrClient(coreUrl)) {
            response = client.query(query);
        }
        numFound[c++] = response.getResults().getNumFound();
        log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
        assertTrue("Shard: " + shard + " Replica: " + coreUrl + " has 0 docs", response.getResults().getNumFound() > 0);
    }
    for (int i = 0; i < slice.getReplicasMap().size(); i++) {
        assertEquals(shard + " is not consistent", numFound[0], numFound[i]);
    }
}
Also used : HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) ClusterState(org.apache.solr.common.cloud.ClusterState) ZkCoreNodeProps(org.apache.solr.common.cloud.ZkCoreNodeProps) Slice(org.apache.solr.common.cloud.Slice) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) Replica(org.apache.solr.common.cloud.Replica) SolrQuery(org.apache.solr.client.solrj.SolrQuery)

Example 43 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class DistribJoinFromCollectionTest method setupCluster.

@BeforeClass
public static void setupCluster() throws Exception {
    final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");
    String configName = "solrCloudCollectionConfig";
    int nodeCount = 5;
    configureCluster(nodeCount).addConfig(configName, configDir).configure();
    Map<String, String> collectionProperties = new HashMap<>();
    collectionProperties.put("config", "solrconfig-tlog.xml");
    collectionProperties.put("schema", "schema.xml");
    // create a collection holding data for the "to" side of the JOIN
    int shards = 2;
    int replicas = 2;
    CollectionAdminRequest.createCollection(toColl, configName, shards, replicas).setProperties(collectionProperties).process(cluster.getSolrClient());
    // get the set of nodes where replicas for the "to" collection exist
    Set<String> nodeSet = new HashSet<>();
    ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
    ClusterState cs = zkStateReader.getClusterState();
    for (Slice slice : cs.getCollection(toColl).getActiveSlices()) for (Replica replica : slice.getReplicas()) nodeSet.add(replica.getNodeName());
    assertTrue(nodeSet.size() > 0);
    // deploy the "from" collection to all nodes where the "to" collection exists
    CollectionAdminRequest.createCollection(fromColl, configName, 1, 4).setCreateNodeSet(StringUtils.join(nodeSet, ",")).setProperties(collectionProperties).process(cluster.getSolrClient());
    toDocId = indexDoc(toColl, 1001, "a", null, "b");
    indexDoc(fromColl, 2001, "a", "c", null);
    // so the commits fire
    Thread.sleep(1000);
}
Also used : Path(java.nio.file.Path) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) Slice(org.apache.solr.common.cloud.Slice) Replica(org.apache.solr.common.cloud.Replica) HashSet(java.util.HashSet) BeforeClass(org.junit.BeforeClass)

Example 44 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class SyncSliceTest method waitTillAllNodesActive.

private void waitTillAllNodesActive() throws Exception {
    for (int i = 0; i < 60; i++) {
        Thread.sleep(3000);
        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
        ClusterState clusterState = zkStateReader.getClusterState();
        DocCollection collection1 = clusterState.getCollection("collection1");
        Slice slice = collection1.getSlice("shard1");
        Collection<Replica> replicas = slice.getReplicas();
        boolean allActive = true;
        for (Replica replica : replicas) {
            if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
                allActive = false;
                break;
            }
        }
        if (allActive) {
            return;
        }
    }
    printLayout();
    fail("timeout waiting to see all nodes active");
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) ClusterState(org.apache.solr.common.cloud.ClusterState) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) Replica(org.apache.solr.common.cloud.Replica)

Example 45 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class SliceStateTest method testDefaultSliceState.

@Test
public void testDefaultSliceState() {
    Map<String, DocCollection> collectionStates = new HashMap<>();
    Set<String> liveNodes = new HashSet<>();
    liveNodes.add("node1");
    Map<String, Slice> slices = new HashMap<>();
    Map<String, Replica> sliceToProps = new HashMap<>();
    Map<String, Object> props = new HashMap<>();
    Replica replica = new Replica("node1", props);
    sliceToProps.put("node1", replica);
    Slice slice = new Slice("shard1", sliceToProps, null);
    assertSame("Default state not set to active", Slice.State.ACTIVE, slice.getState());
    slices.put("shard1", slice);
    collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));
    ClusterState clusterState = new ClusterState(-1, liveNodes, collectionStates);
    byte[] bytes = Utils.toJSON(clusterState);
    ClusterState loadedClusterState = ClusterState.load(-1, bytes, liveNodes);
    assertSame("Default state not set to active", Slice.State.ACTIVE, loadedClusterState.getSlice("collection1", "shard1").getState());
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) Replica(org.apache.solr.common.cloud.Replica) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

ClusterState (org.apache.solr.common.cloud.ClusterState)122 Slice (org.apache.solr.common.cloud.Slice)78 Replica (org.apache.solr.common.cloud.Replica)65 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)56 DocCollection (org.apache.solr.common.cloud.DocCollection)49 HashMap (java.util.HashMap)42 ArrayList (java.util.ArrayList)36 Map (java.util.Map)25 IOException (java.io.IOException)20 Test (org.junit.Test)18 HashSet (java.util.HashSet)17 SolrException (org.apache.solr.common.SolrException)16 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)15 SolrQuery (org.apache.solr.client.solrj.SolrQuery)13 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)13 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)13 ZkCoreNodeProps (org.apache.solr.common.cloud.ZkCoreNodeProps)13 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)13 List (java.util.List)12 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)12