use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class TestCloudDeleteByQuery method createMiniSolrCloudCluster.
@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
final String configName = "solrCloudCollectionConfig";
final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");
configureCluster(NUM_SERVERS).addConfig(configName, configDir).configure();
Map<String, String> collectionProperties = new HashMap<>();
collectionProperties.put("config", "solrconfig-tlog.xml");
// string id for doc routing prefix
collectionProperties.put("schema", "schema15.xml");
CollectionAdminRequest.createCollection(COLLECTION_NAME, configName, NUM_SHARDS, REPLICATION_FACTOR).setProperties(collectionProperties).process(cluster.getSolrClient());
CLOUD_CLIENT = cluster.getSolrClient();
CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
ZkStateReader zkStateReader = CLOUD_CLIENT.getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION_NAME, zkStateReader, true, true, 330);
// really hackish way to get a URL for specific nodes based on shard/replica hosting
// inspired by TestMiniSolrCloudCluster
HashMap<String, String> urlMap = new HashMap<>();
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
URL jettyURL = jetty.getBaseUrl();
String nodeKey = jettyURL.getHost() + ":" + jettyURL.getPort() + jettyURL.getPath().replace("/", "_");
urlMap.put(nodeKey, jettyURL.toString());
}
ClusterState clusterState = zkStateReader.getClusterState();
for (Slice slice : clusterState.getSlices(COLLECTION_NAME)) {
String shardName = slice.getName();
Replica leader = slice.getLeader();
assertNotNull("slice has null leader: " + slice.toString(), leader);
assertNotNull("slice leader has null node name: " + slice.toString(), leader.getNodeName());
String leaderUrl = urlMap.remove(leader.getNodeName());
assertNotNull("could not find URL for " + shardName + " leader: " + leader.getNodeName(), leaderUrl);
assertEquals("expected two total replicas for: " + slice.getName(), 2, slice.getReplicas().size());
String passiveUrl = null;
for (Replica replica : slice.getReplicas()) {
if (!replica.equals(leader)) {
passiveUrl = urlMap.remove(replica.getNodeName());
assertNotNull("could not find URL for " + shardName + " replica: " + replica.getNodeName(), passiveUrl);
}
}
assertNotNull("could not find URL for " + shardName + " replica", passiveUrl);
if (shardName.equals("shard1")) {
S_ONE_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
S_ONE_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
} else if (shardName.equals("shard2")) {
S_TWO_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
S_TWO_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
} else {
fail("unexpected shard: " + shardName);
}
}
assertEquals("Should be exactly one server left (nost hosting either shard)", 1, urlMap.size());
NO_COLLECTION_CLIENT = getHttpSolrClient(urlMap.values().iterator().next() + "/" + COLLECTION_NAME + "/");
assertNotNull(S_ONE_LEADER_CLIENT);
assertNotNull(S_TWO_LEADER_CLIENT);
assertNotNull(S_ONE_NON_LEADER_CLIENT);
assertNotNull(S_TWO_NON_LEADER_CLIENT);
assertNotNull(NO_COLLECTION_CLIENT);
// sanity check that our S_ONE_PRE & S_TWO_PRE really do map to shard1 & shard2 with default routing
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_ONE_PRE + random().nextInt()), f("expected_shard_s", "shard1"))).getStatus());
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_TWO_PRE + random().nextInt()), f("expected_shard_s", "shard2"))).getStatus());
assertEquals(0, CLOUD_CLIENT.commit().getStatus());
SolrDocumentList docs = CLOUD_CLIENT.query(params("q", "*:*", "fl", "id,expected_shard_s,[shard]")).getResults();
assertEquals(2, docs.getNumFound());
assertEquals(2, docs.size());
for (SolrDocument doc : docs) {
String expected = COLLECTION_NAME + "_" + doc.getFirstValue("expected_shard_s") + "_replica";
String docShard = doc.getFirstValue("[shard]").toString();
assertTrue("shard routing prefixes don't seem to be aligned anymore, " + "did someone change the default routing rules? " + "and/or the the default core name rules? " + "and/or the numShards used by this test? ... " + "couldn't find " + expected + " as substring of [shard] == '" + docShard + "' ... for docId == " + doc.getFirstValue("id"), docShard.contains(expected));
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class OverseerTest method electNewOverseer.
private SolrZkClient electNewOverseer(String address) throws InterruptedException, TimeoutException, IOException, KeeperException, ParserConfigurationException, SAXException {
SolrZkClient zkClient = new SolrZkClient(address, TIMEOUT);
ZkStateReader reader = new ZkStateReader(zkClient);
readers.add(reader);
LeaderElector overseerElector = new LeaderElector(zkClient);
if (overseers.size() > 0) {
overseers.get(overseers.size() - 1).close();
overseers.get(overseers.size() - 1).getZkStateReader().getZkClient().close();
}
UpdateShardHandler updateShardHandler = new UpdateShardHandler(UpdateShardHandlerConfig.DEFAULT);
updateShardHandlers.add(updateShardHandler);
HttpShardHandlerFactory httpShardHandlerFactory = new HttpShardHandlerFactory();
httpShardHandlerFactorys.add(httpShardHandlerFactory);
Overseer overseer = new Overseer(httpShardHandlerFactory.getShardHandler(), updateShardHandler, "/admin/cores", reader, null, new CloudConfig.CloudConfigBuilder("127.0.0.1", 8983, "").build());
overseers.add(overseer);
ElectionContext ec = new OverseerElectionContext(zkClient, overseer, address.replaceAll("/", "_"));
overseerElector.setup(ec);
overseerElector.joinElection(ec, false);
return zkClient;
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class PeerSyncReplicationTest method waitTillNodesActive.
private void waitTillNodesActive() throws Exception {
for (int i = 0; i < 60; i++) {
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection collection1 = clusterState.getCollection("collection1");
Slice slice = collection1.getSlice("shard1");
Collection<Replica> replicas = slice.getReplicas();
boolean allActive = true;
Collection<String> nodesDownNames = nodesDown.stream().map(n -> n.coreNodeName).collect(Collectors.toList());
Collection<Replica> replicasToCheck = replicas.stream().filter(r -> !nodesDownNames.contains(r.getName())).collect(Collectors.toList());
for (Replica replica : replicasToCheck) {
if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
allActive = false;
break;
}
}
if (allActive) {
return;
}
}
printLayout();
fail("timeout waiting to see all nodes active");
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class SolrSchema method getTableMap.
@Override
protected Map<String, Table> getTableMap() {
String zk = this.properties.getProperty("zk");
try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder().withZkHost(zk).build()) {
cloudSolrClient.connect();
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
final ImmutableMap.Builder<String, Table> builder = ImmutableMap.builder();
for (String collection : clusterState.getCollectionsMap().keySet()) {
builder.put(collection, new SolrTable(this, collection));
}
Aliases aliases = zkStateReader.getAliases();
if (aliases.collectionAliasSize() > 0) {
for (Map.Entry<String, String> alias : aliases.getCollectionAliasMap().entrySet()) {
builder.put(alias.getKey(), new SolrTable(this, alias.getValue()));
}
}
return builder.build();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class CollectionsHandler method waitForActiveCollection.
private static void waitForActiveCollection(String collectionName, ZkNodeProps message, CoreContainer cc, SolrResponse response) throws KeeperException, InterruptedException {
if (response.getResponse().get("exception") != null) {
// the main called failed, don't wait
log.info("Not waiting for active collection due to exception: " + response.getResponse().get("exception"));
return;
}
if (response.getResponse().get("failure") != null) {
// TODO: we should not wait for Replicas we know failed
}
String replicaNotAlive = null;
String replicaState = null;
String nodeNotLive = null;
CloudConfig ccfg = cc.getConfig().getCloudConfig();
Integer numRetries = ccfg.getCreateCollectionWaitTimeTillActive();
Boolean checkLeaderOnly = ccfg.isCreateCollectionCheckLeaderActive();
log.info("Wait for new collection to be active for at most " + numRetries + " seconds. Check all shard " + (checkLeaderOnly ? "leaders" : "replicas"));
ZkStateReader zkStateReader = cc.getZkController().getZkStateReader();
for (int i = 0; i < numRetries; i++) {
ClusterState clusterState = zkStateReader.getClusterState();
Collection<Slice> shards = clusterState.getSlices(collectionName);
if (shards != null) {
replicaNotAlive = null;
for (Slice shard : shards) {
Collection<Replica> replicas;
if (!checkLeaderOnly)
replicas = shard.getReplicas();
else {
replicas = new ArrayList<Replica>();
replicas.add(shard.getLeader());
}
for (Replica replica : replicas) {
String state = replica.getStr(ZkStateReader.STATE_PROP);
log.debug("Checking replica status, collection={} replica={} state={}", collectionName, replica.getCoreUrl(), state);
if (!clusterState.liveNodesContain(replica.getNodeName()) || !state.equals(Replica.State.ACTIVE.toString())) {
replicaNotAlive = replica.getCoreUrl();
nodeNotLive = replica.getNodeName();
replicaState = state;
break;
}
}
if (replicaNotAlive != null)
break;
}
if (replicaNotAlive == null)
return;
}
Thread.sleep(1000);
}
if (nodeNotLive != null && replicaState != null) {
log.error("Timed out waiting for new collection's replicas to become ACTIVE " + (replicaState.equals(Replica.State.ACTIVE.toString()) ? "node " + nodeNotLive + " is not live" : "replica " + replicaNotAlive + " is in state of " + replicaState.toString()) + " with timeout=" + numRetries);
} else {
log.error("Timed out waiting for new collection's replicas to become ACTIVE with timeout=" + numRetries);
}
}
Aggregations