Search in sources :

Example 71 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class ZkStateWriterTest method testZkStateWriterBatching.

public void testZkStateWriterBatching() throws Exception {
    String zkDir = createTempDir("testZkStateWriterBatching").toFile().getAbsolutePath();
    ZkTestServer server = new ZkTestServer(zkDir);
    SolrZkClient zkClient = null;
    try {
        server.run();
        AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
        AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
        zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
        ZkController.createClusterZkNodes(zkClient);
        try (ZkStateReader reader = new ZkStateReader(zkClient)) {
            reader.createClusterStateWatchersAndUpdate();
            ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
            assertFalse("Deletes can always be batched", writer.maybeFlushBefore(new ZkWriteCommand("xyz", null)));
            assertFalse("Deletes can always be batched", writer.maybeFlushAfter(new ZkWriteCommand("xyz", null)));
            zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
            zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c2", true);
            // create new collection with stateFormat = 2
            ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1"));
            assertFalse("First requests can always be batched", writer.maybeFlushBefore(c1));
            ClusterState clusterState = writer.enqueueUpdate(reader.getClusterState(), c1, null);
            ZkWriteCommand c2 = new ZkWriteCommand("c2", new DocCollection("c2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c2"));
            assertFalse("Different (new) collection create can be batched together with another create", writer.maybeFlushBefore(c2));
            // simulate three state changes on same collection, all should be batched together before
            assertFalse(writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushBefore(c1));
            // and after too
            assertFalse(writer.maybeFlushAfter(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            // simulate three state changes on two different collections with stateFormat=2, all should be batched
            assertFalse(writer.maybeFlushBefore(c1));
            // flushAfter has to be called as it updates the internal batching related info
            assertFalse(writer.maybeFlushAfter(c1));
            assertFalse(writer.maybeFlushBefore(c2));
            assertFalse(writer.maybeFlushAfter(c2));
            assertFalse(writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            // create a collection in stateFormat = 1 i.e. inside the main cluster state
            ZkWriteCommand c3 = new ZkWriteCommand("c3", new DocCollection("c3", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE));
            clusterState = writer.enqueueUpdate(clusterState, c3, null);
            // simulate three state changes in c3, all should be batched
            for (int i = 0; i < 3; i++) {
                assertFalse(writer.maybeFlushBefore(c3));
                assertFalse(writer.maybeFlushAfter(c3));
            }
            // simulate state change in c3 (stateFormat=1) interleaved with state changes from c1,c2 (stateFormat=2)
            // none should be batched together
            assertFalse(writer.maybeFlushBefore(c3));
            assertFalse(writer.maybeFlushAfter(c3));
            assertTrue("different stateFormat, should be flushed", writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            assertTrue("different stateFormat, should be flushed", writer.maybeFlushBefore(c3));
            assertFalse(writer.maybeFlushAfter(c3));
            assertTrue("different stateFormat, should be flushed", writer.maybeFlushBefore(c2));
            assertFalse(writer.maybeFlushAfter(c2));
        }
    } finally {
        IOUtils.close(zkClient);
        server.shutdown();
    }
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) ClusterState(org.apache.solr.common.cloud.ClusterState) ZkTestServer(org.apache.solr.cloud.ZkTestServer) Overseer(org.apache.solr.cloud.Overseer) HashMap(java.util.HashMap) DocCollection(org.apache.solr.common.cloud.DocCollection) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient)

Example 72 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class ZkStateWriterTest method testSingleLegacyCollection.

public void testSingleLegacyCollection() throws Exception {
    String zkDir = createTempDir("testSingleLegacyCollection").toFile().getAbsolutePath();
    ZkTestServer server = new ZkTestServer(zkDir);
    SolrZkClient zkClient = null;
    try {
        server.run();
        AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
        AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
        zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
        ZkController.createClusterZkNodes(zkClient);
        try (ZkStateReader reader = new ZkStateReader(zkClient)) {
            reader.createClusterStateWatchersAndUpdate();
            ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
            zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
            // create new collection with stateFormat = 1
            ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE));
            ClusterState clusterState = writer.enqueueUpdate(reader.getClusterState(), c1, null);
            writer.writePendingUpdates();
            Map map = (Map) Utils.fromJSON(zkClient.getData("/clusterstate.json", null, null, true));
            assertNotNull(map.get("c1"));
            boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true);
            assertFalse(exists);
        }
    } finally {
        IOUtils.close(zkClient);
        server.shutdown();
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) ZkTestServer(org.apache.solr.cloud.ZkTestServer) Overseer(org.apache.solr.cloud.Overseer) HashMap(java.util.HashMap) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) DocCollection(org.apache.solr.common.cloud.DocCollection) HashMap(java.util.HashMap) Map(java.util.Map)

Example 73 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class ShardSplitTest method splitByRouteKeyTest.

private void splitByRouteKeyTest() throws Exception {
    log.info("Starting splitByRouteKeyTest");
    String collectionName = "splitByRouteKeyTest";
    int numShards = 4;
    int replicationFactor = 2;
    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
    try (CloudSolrClient client = createCloudClient(null)) {
        Map<String, Object> props = Utils.makeMap(REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards);
        createCollection(collectionInfos, collectionName, props, client);
    }
    List<Integer> list = collectionInfos.get(collectionName);
    checkForCollection(collectionName, list, null);
    waitForRecoveriesToFinish(false);
    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
        String splitKey = "b!";
        ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
        final DocRouter router = clusterState.getCollection(collectionName).getRouter();
        Slice shard1 = clusterState.getSlice(collectionName, SHARD1);
        DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
        final List<DocRouter.Range> ranges = ((CompositeIdRouter) router).partitionRangeByKey(splitKey, shard1Range);
        final int[] docCounts = new int[ranges.size()];
        int uniqIdentifier = (1 << 12);
        int splitKeyDocCount = 0;
        for (int i = 100; i <= 200; i++) {
            // See comment in ShardRoutingTest for hash distribution
            String shardKey = "" + (char) ('a' + (i % 26));
            String idStr = shardKey + "!" + i;
            collectionClient.add(getDoc(id, idStr, "n_ti", (shardKey + "!").equals(splitKey) ? uniqIdentifier : i));
            int idx = getHashRangeIdx(router, ranges, idStr);
            if (idx != -1) {
                docCounts[idx]++;
            }
            if (splitKey.equals(shardKey + "!"))
                splitKeyDocCount++;
        }
        for (int i = 0; i < docCounts.length; i++) {
            int docCount = docCounts[i];
            log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
        }
        log.info("Route key doc count = {}", splitKeyDocCount);
        collectionClient.commit();
        for (int i = 0; i < 3; i++) {
            try {
                splitShard(collectionName, null, null, splitKey);
                break;
            } catch (HttpSolrClient.RemoteSolrException e) {
                if (e.code() != 500) {
                    throw e;
                }
                log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
                if (i == 2) {
                    fail("SPLITSHARD was not successful even after three tries");
                }
            }
        }
        waitForRecoveriesToFinish(collectionName, false);
        SolrQuery solrQuery = new SolrQuery("*:*");
        assertEquals("DocCount on shard1_0 does not match", docCounts[0], collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
        assertEquals("DocCount on shard1_1 does not match", docCounts[1], collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
        assertEquals("DocCount on shard1_2 does not match", docCounts[2], collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
        solrQuery = new SolrQuery("n_ti:" + uniqIdentifier);
        assertEquals("shard1_0 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
        assertEquals("Wrong number of docs on shard1_1 for route key: " + splitKey, splitKeyDocCount, collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
        assertEquals("shard1_2 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) SolrQuery(org.apache.solr.client.solrj.SolrQuery) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) CompositeIdRouter(org.apache.solr.common.cloud.CompositeIdRouter) Slice(org.apache.solr.common.cloud.Slice) DocRouter(org.apache.solr.common.cloud.DocRouter) List(java.util.List) ArrayList(java.util.ArrayList)

Example 74 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class ShardSplitTest method testSplitWithChaosMonkey.

@Test
public void testSplitWithChaosMonkey() throws Exception {
    waitForThingsToLevelOut(15);
    List<StoppableIndexingThread> indexers = new ArrayList<>();
    try {
        for (int i = 0; i < 1; i++) {
            StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, cloudClient, String.valueOf(i), true);
            indexers.add(thread);
            thread.start();
        }
        // give the indexers some time to do their work
        Thread.sleep(1000);
    } catch (Exception e) {
        log.error("Error in test", e);
    } finally {
        for (StoppableIndexingThread indexer : indexers) {
            indexer.safeStop();
            indexer.join();
        }
    }
    cloudClient.commit();
    controlClient.commit();
    AtomicBoolean stop = new AtomicBoolean();
    AtomicBoolean killed = new AtomicBoolean(false);
    Runnable monkey = new Runnable() {

        @Override
        public void run() {
            ZkStateReader zkStateReader = cloudClient.getZkStateReader();
            zkStateReader.registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {

                @Override
                public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
                    if (stop.get()) {
                        // abort and remove the watch
                        return true;
                    }
                    Slice slice = collectionState.getSlice(SHARD1_0);
                    if (slice != null && slice.getReplicas().size() > 1) {
                        // ensure that only one watcher invocation thread can kill!
                        if (killed.compareAndSet(false, true)) {
                            log.info("Monkey thread found 2 replicas for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
                            CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
                            try {
                                Thread.sleep(1000 + random().nextInt(500));
                                ChaosMonkey.kill(cjetty);
                                stop.set(true);
                                return true;
                            } catch (Exception e) {
                                log.error("Monkey unable to kill jetty at port " + cjetty.jetty.getLocalPort(), e);
                            }
                        }
                    }
                    log.info("Monkey thread found only one replica for {} {}", AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
                    return false;
                }
            });
        }
    };
    Thread monkeyThread = null;
    /*
     somehow the cluster state object inside this zk state reader has static copy of the collection which is never updated
     so any call to waitForRecoveriesToFinish just keeps looping until timeout.
     We workaround by explicitly registering the collection as an interesting one so that it is watched by ZkStateReader
     see SOLR-9440. Todo remove this hack after SOLR-9440 is fixed.
    */
    cloudClient.getZkStateReader().registerCore(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
    monkeyThread = new Thread(monkey);
    monkeyThread.start();
    try {
        CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
        splitShard.setShardName(SHARD1);
        String asyncId = splitShard.processAsync(cloudClient);
        RequestStatusState splitStatus = null;
        try {
            splitStatus = CollectionAdminRequest.requestStatus(asyncId).waitFor(cloudClient, 120);
        } catch (Exception e) {
            log.warn("Failed to get request status, maybe because the overseer node was shutdown by monkey", e);
        }
        // we don't care if the split failed because we are injecting faults and it is likely
        // that the split has failed but in any case we want to assert that all docs that got
        // indexed are available in SolrCloud and if the split succeeded then all replicas of the sub-shard
        // must be consistent (i.e. have same numdocs)
        log.info("Shard split request state is COMPLETED");
        stop.set(true);
        monkeyThread.join();
        Set<String> addFails = new HashSet<>();
        Set<String> deleteFails = new HashSet<>();
        for (StoppableIndexingThread indexer : indexers) {
            addFails.addAll(indexer.getAddFails());
            deleteFails.addAll(indexer.getDeleteFails());
        }
        CloudJettyRunner cjetty = shardToLeaderJetty.get(SHARD1);
        log.info("Starting shard1 leader jetty at port {}", cjetty.jetty.getLocalPort());
        ChaosMonkey.start(cjetty.jetty);
        cloudClient.getZkStateReader().forceUpdateCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
        log.info("Current collection state: {}", printClusterStateInfo(AbstractDistribZkTestBase.DEFAULT_COLLECTION));
        boolean replicaCreationsFailed = false;
        if (splitStatus == RequestStatusState.FAILED) {
            // either one or more replica creation failed (because it may have been created on the same parent shard leader node)
            // or the split may have failed while trying to soft-commit *after* all replicas have been created
            // the latter counts as a successful switch even if the API doesn't say so
            // so we must find a way to distinguish between the two
            // an easy way to do that is to look at the sub-shard replicas and check if the replica core actually exists
            // instead of existing solely inside the cluster state
            DocCollection collectionState = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
            Slice slice10 = collectionState.getSlice(SHARD1_0);
            Slice slice11 = collectionState.getSlice(SHARD1_1);
            if (slice10 != null && slice11 != null) {
                for (Replica replica : slice10) {
                    if (!doesReplicaCoreExist(replica)) {
                        replicaCreationsFailed = true;
                        break;
                    }
                }
                for (Replica replica : slice11) {
                    if (!doesReplicaCoreExist(replica)) {
                        replicaCreationsFailed = true;
                        break;
                    }
                }
            }
        }
        // true if sub-shard states switch to 'active' eventually
        AtomicBoolean areSubShardsActive = new AtomicBoolean(false);
        if (!replicaCreationsFailed) {
            // all sub-shard replicas were created successfully so all cores must recover eventually
            waitForRecoveriesToFinish(AbstractDistribZkTestBase.DEFAULT_COLLECTION, true);
            // let's wait for the overseer to switch shard states
            CountDownLatch latch = new CountDownLatch(1);
            cloudClient.getZkStateReader().registerCollectionStateWatcher(AbstractDistribZkTestBase.DEFAULT_COLLECTION, new CollectionStateWatcher() {

                @Override
                public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
                    Slice parent = collectionState.getSlice(SHARD1);
                    Slice slice10 = collectionState.getSlice(SHARD1_0);
                    Slice slice11 = collectionState.getSlice(SHARD1_1);
                    if (slice10 != null && slice11 != null && parent.getState() == Slice.State.INACTIVE && slice10.getState() == Slice.State.ACTIVE && slice11.getState() == Slice.State.ACTIVE) {
                        areSubShardsActive.set(true);
                        latch.countDown();
                        // removes the watch
                        return true;
                    } else if (slice10 != null && slice11 != null && parent.getState() == Slice.State.ACTIVE && slice10.getState() == Slice.State.RECOVERY_FAILED && slice11.getState() == Slice.State.RECOVERY_FAILED) {
                        areSubShardsActive.set(false);
                        latch.countDown();
                        return true;
                    }
                    return false;
                }
            });
            latch.await(2, TimeUnit.MINUTES);
            if (latch.getCount() != 0) {
                // sanity check
                fail("We think that split was successful but sub-shard states were not updated even after 2 minutes.");
            }
        }
        // for visibility of results on sub-shards
        cloudClient.commit();
        checkShardConsistency(true, true, addFails, deleteFails);
        long ctrlDocs = controlClient.query(new SolrQuery("*:*")).getResults().getNumFound();
        // ensure we have added more than 0 docs
        long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
        assertTrue("Found " + ctrlDocs + " control docs", cloudClientDocs > 0);
        assertEquals("Found " + ctrlDocs + " control docs and " + cloudClientDocs + " cloud docs", ctrlDocs, cloudClientDocs);
        // handle new shards/replica so well.
        if (areSubShardsActive.get()) {
            ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
            DocCollection collection = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
            int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
            assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
            numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_1));
            assertEquals("We should have checked consistency for exactly 2 replicas of shard1_1", 2, numReplicasChecked);
        }
    } finally {
        stop.set(true);
        monkeyThread.join();
    }
}
Also used : ArrayList(java.util.ArrayList) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) CollectionStateWatcher(org.apache.solr.common.cloud.CollectionStateWatcher) SolrQuery(org.apache.solr.client.solrj.SolrQuery) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) DocCollection(org.apache.solr.common.cloud.DocCollection) HashSet(java.util.HashSet) ClusterState(org.apache.solr.common.cloud.ClusterState) CountDownLatch(java.util.concurrent.CountDownLatch) Replica(org.apache.solr.common.cloud.Replica) SolrServerException(org.apache.solr.client.solrj.SolrServerException) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Slice(org.apache.solr.common.cloud.Slice) RequestStatusState(org.apache.solr.client.solrj.response.RequestStatusState) Test(org.junit.Test)

Example 75 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class TestCloudDeleteByQuery method createMiniSolrCloudCluster.

@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
    final String configName = "solrCloudCollectionConfig";
    final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");
    configureCluster(NUM_SERVERS).addConfig(configName, configDir).configure();
    Map<String, String> collectionProperties = new HashMap<>();
    collectionProperties.put("config", "solrconfig-tlog.xml");
    // string id for doc routing prefix
    collectionProperties.put("schema", "schema15.xml");
    CollectionAdminRequest.createCollection(COLLECTION_NAME, configName, NUM_SHARDS, REPLICATION_FACTOR).setProperties(collectionProperties).process(cluster.getSolrClient());
    CLOUD_CLIENT = cluster.getSolrClient();
    CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
    ZkStateReader zkStateReader = CLOUD_CLIENT.getZkStateReader();
    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION_NAME, zkStateReader, true, true, 330);
    // really hackish way to get a URL for specific nodes based on shard/replica hosting
    // inspired by TestMiniSolrCloudCluster
    HashMap<String, String> urlMap = new HashMap<>();
    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
        URL jettyURL = jetty.getBaseUrl();
        String nodeKey = jettyURL.getHost() + ":" + jettyURL.getPort() + jettyURL.getPath().replace("/", "_");
        urlMap.put(nodeKey, jettyURL.toString());
    }
    ClusterState clusterState = zkStateReader.getClusterState();
    for (Slice slice : clusterState.getSlices(COLLECTION_NAME)) {
        String shardName = slice.getName();
        Replica leader = slice.getLeader();
        assertNotNull("slice has null leader: " + slice.toString(), leader);
        assertNotNull("slice leader has null node name: " + slice.toString(), leader.getNodeName());
        String leaderUrl = urlMap.remove(leader.getNodeName());
        assertNotNull("could not find URL for " + shardName + " leader: " + leader.getNodeName(), leaderUrl);
        assertEquals("expected two total replicas for: " + slice.getName(), 2, slice.getReplicas().size());
        String passiveUrl = null;
        for (Replica replica : slice.getReplicas()) {
            if (!replica.equals(leader)) {
                passiveUrl = urlMap.remove(replica.getNodeName());
                assertNotNull("could not find URL for " + shardName + " replica: " + replica.getNodeName(), passiveUrl);
            }
        }
        assertNotNull("could not find URL for " + shardName + " replica", passiveUrl);
        if (shardName.equals("shard1")) {
            S_ONE_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
            S_ONE_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
        } else if (shardName.equals("shard2")) {
            S_TWO_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
            S_TWO_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
        } else {
            fail("unexpected shard: " + shardName);
        }
    }
    assertEquals("Should be exactly one server left (nost hosting either shard)", 1, urlMap.size());
    NO_COLLECTION_CLIENT = getHttpSolrClient(urlMap.values().iterator().next() + "/" + COLLECTION_NAME + "/");
    assertNotNull(S_ONE_LEADER_CLIENT);
    assertNotNull(S_TWO_LEADER_CLIENT);
    assertNotNull(S_ONE_NON_LEADER_CLIENT);
    assertNotNull(S_TWO_NON_LEADER_CLIENT);
    assertNotNull(NO_COLLECTION_CLIENT);
    // sanity check that our S_ONE_PRE & S_TWO_PRE really do map to shard1 & shard2 with default routing
    assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_ONE_PRE + random().nextInt()), f("expected_shard_s", "shard1"))).getStatus());
    assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_TWO_PRE + random().nextInt()), f("expected_shard_s", "shard2"))).getStatus());
    assertEquals(0, CLOUD_CLIENT.commit().getStatus());
    SolrDocumentList docs = CLOUD_CLIENT.query(params("q", "*:*", "fl", "id,expected_shard_s,[shard]")).getResults();
    assertEquals(2, docs.getNumFound());
    assertEquals(2, docs.size());
    for (SolrDocument doc : docs) {
        String expected = COLLECTION_NAME + "_" + doc.getFirstValue("expected_shard_s") + "_replica";
        String docShard = doc.getFirstValue("[shard]").toString();
        assertTrue("shard routing prefixes don't seem to be aligned anymore, " + "did someone change the default routing rules? " + "and/or the the default core name rules? " + "and/or the numShards used by this test? ... " + "couldn't find " + expected + " as substring of [shard] == '" + docShard + "' ... for docId == " + doc.getFirstValue("id"), docShard.contains(expected));
    }
}
Also used : Path(java.nio.file.Path) ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) SolrDocumentList(org.apache.solr.common.SolrDocumentList) Replica(org.apache.solr.common.cloud.Replica) URL(java.net.URL) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) SolrDocument(org.apache.solr.common.SolrDocument) Slice(org.apache.solr.common.cloud.Slice) BeforeClass(org.junit.BeforeClass)

Aggregations

ClusterState (org.apache.solr.common.cloud.ClusterState)122 Slice (org.apache.solr.common.cloud.Slice)78 Replica (org.apache.solr.common.cloud.Replica)65 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)56 DocCollection (org.apache.solr.common.cloud.DocCollection)49 HashMap (java.util.HashMap)42 ArrayList (java.util.ArrayList)36 Map (java.util.Map)25 IOException (java.io.IOException)20 Test (org.junit.Test)18 HashSet (java.util.HashSet)17 SolrException (org.apache.solr.common.SolrException)16 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)15 SolrQuery (org.apache.solr.client.solrj.SolrQuery)13 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)13 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)13 ZkCoreNodeProps (org.apache.solr.common.cloud.ZkCoreNodeProps)13 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)13 List (java.util.List)12 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)12