Search in sources :

Example 71 with SolrZkClient

use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.

the class ChaosMonkey method causeConnectionLoss.

public static void causeConnectionLoss(JettySolrRunner jetty) {
    CoreContainer cores = jetty.getCoreContainer();
    if (cores != null) {
        monkeyLog("Will cause connection loss on " + jetty.getLocalPort());
        SolrZkClient zkClient = cores.getZkController().getZkClient();
        zkClient.getSolrZooKeeper().closeCnxn();
    }
}
Also used : CoreContainer(org.apache.solr.core.CoreContainer) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient)

Example 72 with SolrZkClient

use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.

the class MiniSolrCloudCluster method uploadConfigSet.

/**
   * Upload a config set
   * @param configDir a path to the config set to upload
   * @param configName the name to give the configset
   */
public void uploadConfigSet(Path configDir, String configName) throws IOException, KeeperException, InterruptedException {
    try (SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null)) {
        ZkConfigManager manager = new ZkConfigManager(zkClient);
        manager.uploadConfigDir(configDir, configName);
    }
}
Also used : ZkConfigManager(org.apache.solr.common.cloud.ZkConfigManager) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient)

Example 73 with SolrZkClient

use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.

the class ZkStateReaderTest method testStateFormatUpdate.

public void testStateFormatUpdate(boolean explicitRefresh, boolean isInteresting) throws Exception {
    String zkDir = createTempDir("testStateFormatUpdate").toFile().getAbsolutePath();
    ZkTestServer server = new ZkTestServer(zkDir);
    SolrZkClient zkClient = null;
    ZkStateReader reader = null;
    try {
        server.run();
        AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
        AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
        zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
        ZkController.createClusterZkNodes(zkClient);
        reader = new ZkStateReader(zkClient);
        reader.createClusterStateWatchersAndUpdate();
        if (isInteresting) {
            reader.registerCore("c1");
        }
        ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
        zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
        {
            // create new collection with stateFormat = 1
            DocCollection stateV1 = new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE);
            ZkWriteCommand c1 = new ZkWriteCommand("c1", stateV1);
            writer.enqueueUpdate(reader.getClusterState(), c1, null);
            writer.writePendingUpdates();
            Map map = (Map) Utils.fromJSON(zkClient.getData("/clusterstate.json", null, null, true));
            assertNotNull(map.get("c1"));
            boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true);
            assertFalse(exists);
            if (explicitRefresh) {
                reader.forceUpdateCollection("c1");
            } else {
                reader.waitForState("c1", TIMEOUT, TimeUnit.SECONDS, (n, c) -> c != null);
            }
            DocCollection collection = reader.getClusterState().getCollection("c1");
            assertEquals(1, collection.getStateFormat());
        }
        {
            // Now update the collection to stateFormat = 2
            DocCollection stateV2 = new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json");
            ZkWriteCommand c2 = new ZkWriteCommand("c1", stateV2);
            writer.enqueueUpdate(reader.getClusterState(), c2, null);
            writer.writePendingUpdates();
            Map map = (Map) Utils.fromJSON(zkClient.getData("/clusterstate.json", null, null, true));
            assertNull(map.get("c1"));
            boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true);
            assertTrue(exists);
            if (explicitRefresh) {
                reader.forceUpdateCollection("c1");
            } else {
                reader.waitForState("c1", TIMEOUT, TimeUnit.SECONDS, (n, c) -> c != null && c.getStateFormat() == 2);
            }
            DocCollection collection = reader.getClusterState().getCollection("c1");
            assertEquals(2, collection.getStateFormat());
        }
    } finally {
        IOUtils.close(reader, zkClient);
        server.shutdown();
    }
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) DocCollection(org.apache.solr.common.cloud.DocCollection) ClusterState(org.apache.solr.common.cloud.ClusterState) Utils(org.apache.solr.common.util.Utils) IOUtils(org.apache.lucene.util.IOUtils) HashMap(java.util.HashMap) SolrTestCaseJ4(org.apache.solr.SolrTestCaseJ4) OverseerTest(org.apache.solr.cloud.OverseerTest) TimeUnit(java.util.concurrent.TimeUnit) Map(java.util.Map) Overseer(org.apache.solr.cloud.Overseer) DocRouter(org.apache.solr.common.cloud.DocRouter) AbstractZkTestCase(org.apache.solr.cloud.AbstractZkTestCase) ZkController(org.apache.solr.cloud.ZkController) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient) ZkTestServer(org.apache.solr.cloud.ZkTestServer) ZkTestServer(org.apache.solr.cloud.ZkTestServer) Overseer(org.apache.solr.cloud.Overseer) HashMap(java.util.HashMap) DocCollection(org.apache.solr.common.cloud.DocCollection) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient) HashMap(java.util.HashMap) Map(java.util.Map)

Example 74 with SolrZkClient

use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.

the class ZkStateWriterTest method testZkStateWriterBatching.

public void testZkStateWriterBatching() throws Exception {
    String zkDir = createTempDir("testZkStateWriterBatching").toFile().getAbsolutePath();
    ZkTestServer server = new ZkTestServer(zkDir);
    SolrZkClient zkClient = null;
    try {
        server.run();
        AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
        AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
        zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
        ZkController.createClusterZkNodes(zkClient);
        try (ZkStateReader reader = new ZkStateReader(zkClient)) {
            reader.createClusterStateWatchersAndUpdate();
            ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
            assertFalse("Deletes can always be batched", writer.maybeFlushBefore(new ZkWriteCommand("xyz", null)));
            assertFalse("Deletes can always be batched", writer.maybeFlushAfter(new ZkWriteCommand("xyz", null)));
            zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
            zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c2", true);
            // create new collection with stateFormat = 2
            ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1"));
            assertFalse("First requests can always be batched", writer.maybeFlushBefore(c1));
            ClusterState clusterState = writer.enqueueUpdate(reader.getClusterState(), c1, null);
            ZkWriteCommand c2 = new ZkWriteCommand("c2", new DocCollection("c2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c2"));
            assertFalse("Different (new) collection create can be batched together with another create", writer.maybeFlushBefore(c2));
            // simulate three state changes on same collection, all should be batched together before
            assertFalse(writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushBefore(c1));
            // and after too
            assertFalse(writer.maybeFlushAfter(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            // simulate three state changes on two different collections with stateFormat=2, all should be batched
            assertFalse(writer.maybeFlushBefore(c1));
            // flushAfter has to be called as it updates the internal batching related info
            assertFalse(writer.maybeFlushAfter(c1));
            assertFalse(writer.maybeFlushBefore(c2));
            assertFalse(writer.maybeFlushAfter(c2));
            assertFalse(writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            // create a collection in stateFormat = 1 i.e. inside the main cluster state
            ZkWriteCommand c3 = new ZkWriteCommand("c3", new DocCollection("c3", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE));
            clusterState = writer.enqueueUpdate(clusterState, c3, null);
            // simulate three state changes in c3, all should be batched
            for (int i = 0; i < 3; i++) {
                assertFalse(writer.maybeFlushBefore(c3));
                assertFalse(writer.maybeFlushAfter(c3));
            }
            // simulate state change in c3 (stateFormat=1) interleaved with state changes from c1,c2 (stateFormat=2)
            // none should be batched together
            assertFalse(writer.maybeFlushBefore(c3));
            assertFalse(writer.maybeFlushAfter(c3));
            assertTrue("different stateFormat, should be flushed", writer.maybeFlushBefore(c1));
            assertFalse(writer.maybeFlushAfter(c1));
            assertTrue("different stateFormat, should be flushed", writer.maybeFlushBefore(c3));
            assertFalse(writer.maybeFlushAfter(c3));
            assertTrue("different stateFormat, should be flushed", writer.maybeFlushBefore(c2));
            assertFalse(writer.maybeFlushAfter(c2));
        }
    } finally {
        IOUtils.close(zkClient);
        server.shutdown();
    }
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) ClusterState(org.apache.solr.common.cloud.ClusterState) ZkTestServer(org.apache.solr.cloud.ZkTestServer) Overseer(org.apache.solr.cloud.Overseer) HashMap(java.util.HashMap) DocCollection(org.apache.solr.common.cloud.DocCollection) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient)

Example 75 with SolrZkClient

use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.

the class ZkStateWriterTest method testSingleLegacyCollection.

public void testSingleLegacyCollection() throws Exception {
    String zkDir = createTempDir("testSingleLegacyCollection").toFile().getAbsolutePath();
    ZkTestServer server = new ZkTestServer(zkDir);
    SolrZkClient zkClient = null;
    try {
        server.run();
        AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
        AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
        zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
        ZkController.createClusterZkNodes(zkClient);
        try (ZkStateReader reader = new ZkStateReader(zkClient)) {
            reader.createClusterStateWatchersAndUpdate();
            ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
            zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
            // create new collection with stateFormat = 1
            ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE));
            ClusterState clusterState = writer.enqueueUpdate(reader.getClusterState(), c1, null);
            writer.writePendingUpdates();
            Map map = (Map) Utils.fromJSON(zkClient.getData("/clusterstate.json", null, null, true));
            assertNotNull(map.get("c1"));
            boolean exists = zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true);
            assertFalse(exists);
        }
    } finally {
        IOUtils.close(zkClient);
        server.shutdown();
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) ZkTestServer(org.apache.solr.cloud.ZkTestServer) Overseer(org.apache.solr.cloud.Overseer) HashMap(java.util.HashMap) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) DocCollection(org.apache.solr.common.cloud.DocCollection) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

SolrZkClient (org.apache.solr.common.cloud.SolrZkClient)130 Test (org.junit.Test)46 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)34 HashMap (java.util.HashMap)21 KeeperException (org.apache.zookeeper.KeeperException)18 SolrException (org.apache.solr.common.SolrException)15 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)14 IOException (java.io.IOException)13 ClusterState (org.apache.solr.common.cloud.ClusterState)13 DocCollection (org.apache.solr.common.cloud.DocCollection)12 Map (java.util.Map)11 Slice (org.apache.solr.common.cloud.Slice)11 Replica (org.apache.solr.common.cloud.Replica)10 ArrayList (java.util.ArrayList)9 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)8 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)8 Overseer (org.apache.solr.cloud.Overseer)8 ZkTestServer (org.apache.solr.cloud.ZkTestServer)8 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)7 NamedList (org.apache.solr.common.util.NamedList)7