use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.
the class TestConfigReload method reloadTest.
private void reloadTest() throws Exception {
SolrZkClient client = cloudClient.getZkStateReader().getZkClient();
log.info("live_nodes_count : " + cloudClient.getZkStateReader().getClusterState().getLiveNodes());
String confPath = ZkConfigManager.CONFIGS_ZKNODE + "/conf1/";
// checkConfReload(client, confPath + ConfigOverlay.RESOURCE_NAME, "overlay");
checkConfReload(client, confPath + SolrConfig.DEFAULT_CONF_FILE, "config", "/config");
}
use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.
the class TestManagedResourceStorage method testZkBasedJsonStorage.
/**
* Runs persisted managed resource creation and update tests on Zookeeper storage.
*/
@Test
public void testZkBasedJsonStorage() throws Exception {
// test using ZooKeeper
assertTrue("Not using ZooKeeper", h.getCoreContainer().isZooKeeperAware());
SolrZkClient zkClient = h.getCoreContainer().getZkController().getZkClient();
SolrResourceLoader loader = new SolrResourceLoader(Paths.get("./"));
// Solr unit tests can only write to their working directory due to
// a custom Java Security Manager installed in the test environment
NamedList<String> initArgs = new NamedList<>();
try {
ZooKeeperStorageIO zkStorageIO = new ZooKeeperStorageIO(zkClient, "/test");
zkStorageIO.configure(loader, initArgs);
doStorageTests(loader, zkStorageIO);
} finally {
loader.close();
}
}
use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.
the class LeaderInitiatedRecoveryOnShardRestartTest method testRestartWithAllInLIR.
@Test
public void testRestartWithAllInLIR() throws Exception {
// still waiting to be able to properly start with no default collection1,
// delete to remove confusion
waitForRecoveriesToFinish(false);
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.DELETE.toString());
params.set("name", DEFAULT_COLLECTION);
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
String baseUrl = ((HttpSolrClient) clients.get(0)).getBaseURL();
HttpSolrClient delClient = getHttpSolrClient(baseUrl.substring(0, baseUrl.lastIndexOf("/")));
delClient.request(request);
delClient.close();
String testCollectionName = "all_in_lir";
String shardId = "shard1";
createCollection(testCollectionName, 1, 3, 1);
waitForRecoveriesToFinish(testCollectionName, false);
cloudClient.setDefaultCollection(testCollectionName);
Map<String, Object> stateObj = Utils.makeMap();
stateObj.put(ZkStateReader.STATE_PROP, "down");
stateObj.put("createdByNodeName", "test");
stateObj.put("createdByCoreNodeName", "test");
byte[] znodeData = Utils.toJSON(stateObj);
SolrZkClient zkClient = cloudClient.getZkStateReader().getZkClient();
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node1", znodeData, true);
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node2", znodeData, true);
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node3", znodeData, true);
// everyone gets a couple docs so that everyone has tlog entries
// and won't become leader simply because they have no tlog versions
SolrInputDocument doc = new SolrInputDocument();
addFields(doc, "id", "1");
SolrInputDocument doc2 = new SolrInputDocument();
addFields(doc2, "id", "2");
cloudClient.add(doc);
cloudClient.add(doc2);
cloudClient.commit();
assertEquals("We just added 2 docs, we should be able to find them", 2, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
// randomly add too many docs to peer sync to one replica so that only one random replica is the valid leader
// the versions don't matter, they just have to be higher than what the last 2 docs got
HttpSolrClient client = (HttpSolrClient) clients.get(random().nextInt(clients.size()));
client.setBaseURL(client.getBaseURL().substring(0, client.getBaseURL().lastIndexOf("/")) + "/" + testCollectionName);
params = new ModifiableSolrParams();
params.set(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
try {
for (int i = 0; i < 101; i++) {
add(client, params, sdoc("id", 3 + i, "_version_", Long.MAX_VALUE - 1 - i));
}
} catch (RemoteSolrException e) {
// resend without version
if (e.getMessage().contains("conflict")) {
for (int i = 0; i < 101; i++) {
add(client, params, sdoc("id", 3 + i));
}
}
}
client.commit();
for (JettySolrRunner jetty : jettys) {
ChaosMonkey.stop(jetty);
}
ChaosMonkey.stop(controlJetty);
Thread.sleep(10000);
log.info("Start back up");
for (JettySolrRunner jetty : jettys) {
ChaosMonkey.start(jetty);
}
ChaosMonkey.start(controlJetty);
// recoveries will not finish without SOLR-8075 and SOLR-8367
waitForRecoveriesToFinish(testCollectionName, true);
// now expire each node
try {
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node1", znodeData, true);
} catch (NodeExistsException e) {
}
try {
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node2", znodeData, true);
} catch (NodeExistsException e) {
}
try {
zkClient.makePath("/collections/" + testCollectionName + "/leader_initiated_recovery/" + shardId + "/core_node3", znodeData, true);
} catch (NodeExistsException e) {
}
for (JettySolrRunner jetty : jettys) {
chaosMonkey.expireSession(jetty);
}
Thread.sleep(2000);
// recoveries will not finish without SOLR-8075 and SOLR-8367
waitForRecoveriesToFinish(testCollectionName, true);
}
use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.
the class OverseerTest method testShardAssignment.
@Test
public void testShardAssignment() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
MockZKController zkController = null;
SolrZkClient zkClient = null;
SolrZkClient overseerClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
overseerClient = electNewOverseer(server.getZkAddress());
ZkStateReader reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
zkController = new MockZKController(server.getZkAddress(), "127.0.0.1");
final int numShards = 6;
for (int i = 0; i < numShards; i++) {
assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i + 1), "node" + (i + 1), Replica.State.ACTIVE, 3));
}
final Map<String, Replica> rmap = reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap();
assertEquals(rmap.toString(), 2, rmap.size());
assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard2").getReplicasMap().size());
assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard3").getReplicasMap().size());
//make sure leaders are in cloud state
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard1", 15000));
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard2", 15000));
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard3", 15000));
} finally {
close(zkClient);
if (zkController != null) {
zkController.close();
}
close(overseerClient);
server.shutdown();
}
}
use of org.apache.solr.common.cloud.SolrZkClient in project lucene-solr by apache.
the class LeaderElectionIntegrationTest method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
ignoreException("No UpdateLog found - cannot sync");
ignoreException("No UpdateLog found - cannot recover");
System.setProperty("zkClientTimeout", "8000");
zkDir = createTempDir("zkData").toFile().getAbsolutePath();
zkServer = new ZkTestServer(zkDir);
zkServer.run();
System.setProperty("zkHost", zkServer.getZkAddress());
AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", "schema.xml");
log.info("####SETUP_START " + getTestName());
// set some system properties for use by tests
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
for (int i = 7000; i < 7000 + NUM_SHARD_REPLICAS; i++) {
try {
setupContainer(i, "shard1");
} catch (Throwable t) {
log.error("!!!Could not start container:" + i + " The exception thrown was: " + t.getClass() + " " + t.getMessage());
fail("Could not start container:" + i + ". Reason:" + t.getClass() + " " + t.getMessage());
}
}
try {
setupContainer(3333, "shard2");
} catch (Throwable t) {
log.error("!!!Could not start container 3333. The exception thrown was: " + t.getClass() + " " + t.getMessage());
fail("Could not start container: 3333");
}
zkClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT);
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
boolean initSuccessful = false;
for (int i = 0; i < 30; i++) {
List<String> liveNodes = zkClient.getChildren("/live_nodes", null, true);
if (liveNodes.size() == NUM_SHARD_REPLICAS + 1) {
// all nodes up
initSuccessful = true;
break;
}
Thread.sleep(1000);
log.info("Waiting for more nodes to come up, now: " + liveNodes.size() + "/" + (NUM_SHARD_REPLICAS + 1));
}
if (!initSuccessful) {
fail("Init was not successful!");
}
log.info("####SETUP_END " + getTestName());
}
Aggregations