Search in sources :

Example 76 with DocCollection

use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.

the class DistribDocExpirationUpdateProcessorTest method test.

@Test
public void test() throws Exception {
    // some docs with no expiration
    UpdateRequest req1 = new UpdateRequest();
    for (int i = 1; i <= 100; i++) {
        req1.add(sdoc("id", i));
    }
    req1.commit(cluster.getSolrClient(), COLLECTION);
    // this doc better not already exist
    waitForNoResults(0, params("q", "id:999", "rows", "0", "_trace", "sanity_check"));
    // record the indexversion for each server so we can check later
    // that it only changes for one shard
    final Map<String, Long> initIndexVersions = getIndexVersionOfAllReplicas();
    assertTrue("WTF? no versions?", 0 < initIndexVersions.size());
    // add a doc with a short TTL 
    new UpdateRequest().add(sdoc("id", "999", "tTl_s", "+30SECONDS")).commit(cluster.getSolrClient(), COLLECTION);
    // wait for one doc to be deleted
    waitForNoResults(180, params("q", "id:999", "rows", "0", "_trace", "did_it_expire_yet"));
    // verify only one shard changed
    final Map<String, Long> finalIndexVersions = getIndexVersionOfAllReplicas();
    assertEquals("WTF? not same num versions?", initIndexVersions.size(), finalIndexVersions.size());
    final Set<String> nodesThatChange = new HashSet<String>();
    final Set<String> shardsThatChange = new HashSet<String>();
    int coresCompared = 0;
    DocCollection collectionState = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
    for (Replica replica : collectionState.getReplicas()) {
        coresCompared++;
        String name = replica.getName();
        String core = replica.getCoreName();
        Long initVersion = initIndexVersions.get(core);
        Long finalVersion = finalIndexVersions.get(core);
        assertNotNull(name + ": no init version for core: " + core, initVersion);
        assertNotNull(name + ": no final version for core: " + core, finalVersion);
        if (!initVersion.equals(finalVersion)) {
            nodesThatChange.add(core + "(" + name + ")");
            shardsThatChange.add(name);
        }
    }
    assertEquals("Exactly one shard should have changed, instead: " + shardsThatChange + " nodes=(" + nodesThatChange + ")", 1, shardsThatChange.size());
    assertEquals("somehow we missed some cores?", initIndexVersions.size(), coresCompared);
// TODO: above logic verifies that deleteByQuery happens on all nodes, and ...
// doesn't affect searcher re-open on shards w/o expired docs ... can we also verify 
// that *only* one node is sending the deletes ?
// (ie: no flood of redundant deletes?)
}
Also used : UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) DocCollection(org.apache.solr.common.cloud.DocCollection) Replica(org.apache.solr.common.cloud.Replica) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 77 with DocCollection

use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.

the class SyncSliceTest method waitTillAllNodesActive.

private void waitTillAllNodesActive() throws Exception {
    for (int i = 0; i < 60; i++) {
        Thread.sleep(3000);
        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
        ClusterState clusterState = zkStateReader.getClusterState();
        DocCollection collection1 = clusterState.getCollection("collection1");
        Slice slice = collection1.getSlice("shard1");
        Collection<Replica> replicas = slice.getReplicas();
        boolean allActive = true;
        for (Replica replica : replicas) {
            if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
                allActive = false;
                break;
            }
        }
        if (allActive) {
            return;
        }
    }
    printLayout();
    fail("timeout waiting to see all nodes active");
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) ClusterState(org.apache.solr.common.cloud.ClusterState) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) Replica(org.apache.solr.common.cloud.Replica)

Example 78 with DocCollection

use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.

the class SliceStateTest method testDefaultSliceState.

@Test
public void testDefaultSliceState() {
    Map<String, DocCollection> collectionStates = new HashMap<>();
    Set<String> liveNodes = new HashSet<>();
    liveNodes.add("node1");
    Map<String, Slice> slices = new HashMap<>();
    Map<String, Replica> sliceToProps = new HashMap<>();
    Map<String, Object> props = new HashMap<>();
    Replica replica = new Replica("node1", props);
    sliceToProps.put("node1", replica);
    Slice slice = new Slice("shard1", sliceToProps, null);
    assertSame("Default state not set to active", Slice.State.ACTIVE, slice.getState());
    slices.put("shard1", slice);
    collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));
    ClusterState clusterState = new ClusterState(-1, liveNodes, collectionStates);
    byte[] bytes = Utils.toJSON(clusterState);
    ClusterState loadedClusterState = ClusterState.load(-1, bytes, liveNodes);
    assertSame("Default state not set to active", Slice.State.ACTIVE, loadedClusterState.getSlice("collection1", "shard1").getState());
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) Replica(org.apache.solr.common.cloud.Replica) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 79 with DocCollection

use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.

the class TestTlogReplica method testKillTlogReplica.

public void testKillTlogReplica() throws Exception {
    DocCollection docCollection = createAndWaitForCollection(1, 0, 2, 0);
    waitForNumDocsInAllActiveReplicas(0);
    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
    cluster.getSolrClient().commit(collectionName);
    waitForNumDocsInAllActiveReplicas(1);
    JettySolrRunner pullReplicaJetty = cluster.getReplicaJetty(docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.TLOG)).get(0));
    ChaosMonkey.kill(pullReplicaJetty);
    waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
    //    // Also wait for the replica to be placed in state="down"
    //    waitForState("Didn't update state", collectionName, clusterStateReflectsActiveAndDownReplicas());
    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "bar"));
    cluster.getSolrClient().commit(collectionName);
    waitForNumDocsInAllActiveReplicas(2);
    ChaosMonkey.start(pullReplicaJetty);
    waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
    waitForNumDocsInAllActiveReplicas(2);
}
Also used : SolrInputDocument(org.apache.solr.common.SolrInputDocument) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) DocCollection(org.apache.solr.common.cloud.DocCollection)

Example 80 with DocCollection

use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.

the class TestTlogReplica method doReplaceLeader.

/*
   * validate leader election and that replication still happens on a new leader
   */
private void doReplaceLeader(boolean removeReplica) throws Exception {
    DocCollection docCollection = createAndWaitForCollection(1, 0, 2, 0);
    // Add a document and commit
    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
    cluster.getSolrClient().commit(collectionName);
    Slice s = docCollection.getSlices().iterator().next();
    try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
        assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
    }
    waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)), REPLICATION_TIMEOUT_SECS);
    // Delete leader replica from shard1
    JettySolrRunner leaderJetty = null;
    if (removeReplica) {
        CollectionAdminRequest.deleteReplica(collectionName, "shard1", s.getLeader().getName()).process(cluster.getSolrClient());
    } else {
        leaderJetty = cluster.getReplicaJetty(s.getLeader());
        ChaosMonkey.kill(leaderJetty);
        waitForState("Leader replica not removed", collectionName, clusterShape(1, 1));
        // Wait for cluster state to be updated
        waitForState("Replica state not updated in cluster state", collectionName, clusterStateReflectsActiveAndDownReplicas());
    }
    docCollection = assertNumberOfReplicas(0, 1, 0, true, true);
    // Wait until a new leader is elected
    TimeOut t = new TimeOut(30, TimeUnit.SECONDS);
    while (!t.hasTimedOut()) {
        docCollection = getCollectionState(collectionName);
        Replica leader = docCollection.getSlice("shard1").getLeader();
        if (leader != null && leader.isActive(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes())) {
            break;
        }
        Thread.sleep(500);
    }
    assertFalse("Timeout waiting for a new leader to be elected", t.hasTimedOut());
    // There is a new leader, I should be able to add and commit
    cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"));
    cluster.getSolrClient().commit(collectionName);
    // Queries should still work
    waitForNumDocsInAllReplicas(2, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)), REPLICATION_TIMEOUT_SECS);
    // Start back the node
    if (removeReplica) {
        CollectionAdminRequest.addReplicaToShard(collectionName, "shard1", Replica.Type.TLOG).process(cluster.getSolrClient());
    } else {
        ChaosMonkey.start(leaderJetty);
    }
    waitForState("Expected collection to be 1x2", collectionName, clusterShape(1, 2));
    // added replica should replicate from the leader
    waitForNumDocsInAllReplicas(2, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)), REPLICATION_TIMEOUT_SECS);
}
Also used : HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) SolrInputDocument(org.apache.solr.common.SolrInputDocument) Slice(org.apache.solr.common.cloud.Slice) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) TimeOut(org.apache.solr.util.TimeOut) DocCollection(org.apache.solr.common.cloud.DocCollection) Replica(org.apache.solr.common.cloud.Replica) SolrQuery(org.apache.solr.client.solrj.SolrQuery)

Aggregations

DocCollection (org.apache.solr.common.cloud.DocCollection)187 Slice (org.apache.solr.common.cloud.Slice)120 Replica (org.apache.solr.common.cloud.Replica)86 HashMap (java.util.HashMap)55 ClusterState (org.apache.solr.common.cloud.ClusterState)52 ArrayList (java.util.ArrayList)50 Map (java.util.Map)42 SolrException (org.apache.solr.common.SolrException)41 Test (org.junit.Test)39 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)32 List (java.util.List)23 NamedList (org.apache.solr.common.util.NamedList)23 HashSet (java.util.HashSet)21 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)19 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)19 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)19 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)17 SolrQuery (org.apache.solr.client.solrj.SolrQuery)16 SolrInputDocument (org.apache.solr.common.SolrInputDocument)16 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)15