use of org.apache.solr.cloud.ZkTestServer in project lucene-solr by apache.
the class TestZKPropertiesWriter method dihZk_beforeClass.
@BeforeClass
public static void dihZk_beforeClass() throws Exception {
zkDir = createTempDir("zkData").toFile().getAbsolutePath();
zkServer = new ZkTestServer(zkDir);
zkServer.run();
System.setProperty("solrcloud.skip.autorecovery", "true");
System.setProperty("zkHost", zkServer.getZkAddress());
System.setProperty("jetty.port", "0000");
AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), getFile("dih/solr"), "dataimport-solrconfig.xml", "dataimport-schema.xml");
//initCore("solrconfig.xml", "schema.xml", getFile("dih/solr").getAbsolutePath());
cc = createDefaultCoreContainer(getFile("dih/solr").toPath());
}
use of org.apache.solr.cloud.ZkTestServer in project lucene-solr by apache.
the class ZkStateReaderTest method testWatchedCollectionCreation.
public void testWatchedCollectionCreation() throws Exception {
String zkDir = createTempDir("testWatchedCollectionCreation").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
ZkStateReader reader = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
reader.registerCore("c1");
// Initially there should be no c1 collection.
assertNull(reader.getClusterState().getCollectionRef("c1"));
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
reader.forceUpdateCollection("c1");
// Still no c1 collection, despite a collection path.
assertNull(reader.getClusterState().getCollectionRef("c1"));
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
// create new collection with stateFormat = 2
DocCollection state = new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE + "/c1/state.json");
ZkWriteCommand wc = new ZkWriteCommand("c1", state);
writer.enqueueUpdate(reader.getClusterState(), wc, null);
writer.writePendingUpdates();
assertTrue(zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true));
//reader.forceUpdateCollection("c1");
reader.waitForState("c1", TIMEOUT, TimeUnit.SECONDS, (n, c) -> c != null);
ClusterState.CollectionRef ref = reader.getClusterState().getCollectionRef("c1");
assertNotNull(ref);
assertFalse(ref.isLazilyLoaded());
assertEquals(2, ref.get().getStateFormat());
} finally {
IOUtils.close(reader, zkClient);
server.shutdown();
}
}
use of org.apache.solr.cloud.ZkTestServer in project lucene-solr by apache.
the class ZkStateReaderTest method testExternalCollectionWatchedNotWatched.
public void testExternalCollectionWatchedNotWatched() throws Exception {
String zkDir = createTempDir("testExternalCollectionWatchedNotWatched").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
ZkStateReader reader = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
// create new collection with stateFormat = 2
ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json"));
writer.enqueueUpdate(reader.getClusterState(), c1, null);
writer.writePendingUpdates();
reader.forceUpdateCollection("c1");
assertTrue(reader.getClusterState().getCollectionRef("c1").isLazilyLoaded());
reader.registerCore("c1");
assertFalse(reader.getClusterState().getCollectionRef("c1").isLazilyLoaded());
reader.unregisterCore("c1");
assertTrue(reader.getClusterState().getCollectionRef("c1").isLazilyLoaded());
} finally {
IOUtils.close(reader, zkClient);
server.shutdown();
}
}
use of org.apache.solr.cloud.ZkTestServer in project lucene-solr by apache.
the class ZkStateWriterTest method testExternalModificationToStateFormat2.
public void testExternalModificationToStateFormat2() throws Exception {
String zkDir = createTempDir("testExternalModificationToStateFormat2").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
reader.createClusterStateWatchersAndUpdate();
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c2", true);
ClusterState state = reader.getClusterState();
// create collection 2 with stateFormat = 2
ZkWriteCommand c2 = new ZkWriteCommand("c2", new DocCollection("c2", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.getCollectionPath("c2")));
state = writer.enqueueUpdate(reader.getClusterState(), c2, null);
// first write is flushed immediately
assertFalse(writer.hasPendingUpdates());
int sharedClusterStateVersion = state.getZkClusterStateVersion();
int stateFormat2Version = state.getCollection("c2").getZNodeVersion();
// Simulate an external modification to /collections/c2/state.json
byte[] data = zkClient.getData(ZkStateReader.getCollectionPath("c2"), null, null, true);
zkClient.setData(ZkStateReader.getCollectionPath("c2"), data, true);
// get the most up-to-date state
reader.forceUpdateCollection("c2");
state = reader.getClusterState();
log.info("Cluster state: {}", state);
assertTrue(state.hasCollection("c2"));
assertEquals(sharedClusterStateVersion, (int) state.getZkClusterStateVersion());
assertEquals(stateFormat2Version + 1, state.getCollection("c2").getZNodeVersion());
// enqueue an update to stateFormat2 collection such that update is pending
state = writer.enqueueUpdate(state, c2, null);
assertTrue(writer.hasPendingUpdates());
// get the most up-to-date state
reader.forceUpdateCollection("c2");
state = reader.getClusterState();
// enqueue a stateFormat=1 collection which should cause a flush
ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE));
try {
writer.enqueueUpdate(state, c1, null);
fail("Enqueue should not have succeeded");
} catch (KeeperException.BadVersionException bve) {
// expected
}
try {
writer.enqueueUpdate(reader.getClusterState(), c2, null);
fail("enqueueUpdate after BadVersionException should not have succeeded");
} catch (IllegalStateException e) {
// expected
}
try {
writer.writePendingUpdates();
fail("writePendingUpdates after BadVersionException should not have succeeded");
} catch (IllegalStateException e) {
// expected
}
}
} finally {
IOUtils.close(zkClient);
server.shutdown();
}
}
use of org.apache.solr.cloud.ZkTestServer in project lucene-solr by apache.
the class ZkStateWriterTest method testSingleExternalCollection.
public void testSingleExternalCollection() throws Exception {
String zkDir = createTempDir("testSingleExternalCollection").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
try (ZkStateReader reader = new ZkStateReader(zkClient)) {
reader.createClusterStateWatchersAndUpdate();
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
// create new collection with stateFormat = 2
ZkWriteCommand c1 = new ZkWriteCommand("c1", new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json"));
ClusterState clusterState = writer.enqueueUpdate(reader.getClusterState(), c1, null);
writer.writePendingUpdates();
Map map = (Map) Utils.fromJSON(zkClient.getData("/clusterstate.json", null, null, true));
assertNull(map.get("c1"));
map = (Map) Utils.fromJSON(zkClient.getData(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", null, null, true));
assertNotNull(map.get("c1"));
}
} finally {
IOUtils.close(zkClient);
server.shutdown();
}
}
Aggregations