use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class SyncSliceTest method waitTillAllNodesActive.
private void waitTillAllNodesActive() throws Exception {
for (int i = 0; i < 60; i++) {
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection collection1 = clusterState.getCollection("collection1");
Slice slice = collection1.getSlice("shard1");
Collection<Replica> replicas = slice.getReplicas();
boolean allActive = true;
for (Replica replica : replicas) {
if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
allActive = false;
break;
}
}
if (allActive) {
return;
}
}
printLayout();
fail("timeout waiting to see all nodes active");
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method createMiniSolrCloudCluster.
@BeforeClass
public static void createMiniSolrCloudCluster() throws Exception {
final String configName = "solrCloudCollectionConfig";
final File configDir = new File(TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
configureCluster(NUM_SERVERS).addConfig(configName, configDir.toPath()).configure();
assertSpinLoopAllJettyAreRunning(cluster);
CLOUD_CLIENT = cluster.getSolrClient();
CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
CollectionAdminRequest.createCollection(COLLECTION_NAME, configName, NUM_SHARDS, REPLICATION_FACTOR).withProperty("config", "solrconfig-distrib-update-processor-chains.xml").withProperty("schema", // string id for doc routing prefix
"schema15.xml").process(CLOUD_CLIENT);
ZkStateReader zkStateReader = CLOUD_CLIENT.getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION_NAME, zkStateReader, true, true, 330);
// really hackish way to get a URL for specific nodes based on shard/replica hosting
// inspired by TestMiniSolrCloudCluster
HashMap<String, String> urlMap = new HashMap<>();
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
URL jettyURL = jetty.getBaseUrl();
String nodeKey = jettyURL.getHost() + ":" + jettyURL.getPort() + jettyURL.getPath().replace("/", "_");
urlMap.put(nodeKey, jettyURL.toString());
}
zkStateReader.updateClusterState();
ClusterState clusterState = zkStateReader.getClusterState();
for (Slice slice : clusterState.getSlices(COLLECTION_NAME)) {
String shardName = slice.getName();
Replica leader = slice.getLeader();
assertNotNull("slice has null leader: " + slice.toString(), leader);
assertNotNull("slice leader has null node name: " + slice.toString(), leader.getNodeName());
String leaderUrl = urlMap.remove(leader.getNodeName());
assertNotNull("could not find URL for " + shardName + " leader: " + leader.getNodeName(), leaderUrl);
assertEquals("expected two total replicas for: " + slice.getName(), 2, slice.getReplicas().size());
String passiveUrl = null;
for (Replica replica : slice.getReplicas()) {
if (!replica.equals(leader)) {
passiveUrl = urlMap.remove(replica.getNodeName());
assertNotNull("could not find URL for " + shardName + " replica: " + replica.getNodeName(), passiveUrl);
}
}
assertNotNull("could not find URL for " + shardName + " replica", passiveUrl);
if (shardName.equals("shard1")) {
S_ONE_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
S_ONE_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
} else if (shardName.equals("shard2")) {
S_TWO_LEADER_CLIENT = getHttpSolrClient(leaderUrl + "/" + COLLECTION_NAME + "/");
S_TWO_NON_LEADER_CLIENT = getHttpSolrClient(passiveUrl + "/" + COLLECTION_NAME + "/");
} else {
fail("unexpected shard: " + shardName);
}
}
assertEquals("Should be exactly one server left (nost hosting either shard)", 1, urlMap.size());
NO_COLLECTION_CLIENT = getHttpSolrClient(urlMap.values().iterator().next() + "/" + COLLECTION_NAME + "/");
assertNotNull(S_ONE_LEADER_CLIENT);
assertNotNull(S_TWO_LEADER_CLIENT);
assertNotNull(S_ONE_NON_LEADER_CLIENT);
assertNotNull(S_TWO_NON_LEADER_CLIENT);
assertNotNull(NO_COLLECTION_CLIENT);
// sanity check that our S_ONE_PRE & S_TWO_PRE really do map to shard1 & shard2 with default routing
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_ONE_PRE + random().nextInt()), f("expected_shard_s", "shard1"))).getStatus());
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_TWO_PRE + random().nextInt()), f("expected_shard_s", "shard2"))).getStatus());
assertEquals(0, CLOUD_CLIENT.commit().getStatus());
SolrDocumentList docs = CLOUD_CLIENT.query(params("q", "*:*", "fl", "id,expected_shard_s,[shard]")).getResults();
assertEquals(2, docs.getNumFound());
assertEquals(2, docs.size());
for (SolrDocument doc : docs) {
String expected = COLLECTION_NAME + "_" + doc.getFirstValue("expected_shard_s") + "_replica";
String docShard = doc.getFirstValue("[shard]").toString();
assertTrue("shard routing prefixes don't seem to be aligned anymore, " + "did someone change the default routing rules? " + "and/or the the default core name rules? " + "and/or the numShards used by this test? ... " + "couldn't find " + expected + " as substring of [shard] == '" + docShard + "' ... for docId == " + doc.getFirstValue("id"), docShard.contains(expected));
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class TestStressInPlaceUpdates method getClientForLeader.
/**
* Method gets the SolrClient for the leader replica. This is needed for a workaround for SOLR-8733.
*/
public SolrClient getClientForLeader() throws KeeperException, InterruptedException {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
cloudClient.getZkStateReader().forceUpdateCollection(DEFAULT_COLLECTION);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Replica leader = null;
Slice shard1 = clusterState.getCollection(DEFAULT_COLLECTION).getSlice(SHARD1);
leader = shard1.getLeader();
for (int i = 0; i < clients.size(); i++) {
String leaderBaseUrl = zkStateReader.getBaseUrlForNodeName(leader.getNodeName());
if (((HttpSolrClient) clients.get(i)).getBaseURL().startsWith(leaderBaseUrl))
return clients.get(i);
}
return null;
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class TestSolrCloudWithKerberosAlt method testCollectionCreateSearchDelete.
protected void testCollectionCreateSearchDelete() throws Exception {
String collectionName = "testkerberoscollection";
MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), JettyConfig.builder().setContext("/solr").build());
CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
cloudSolrClient.setDefaultCollection(collectionName);
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
// create collection
String configName = "solrCloudCollectionConfig";
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR);
Properties properties = new Properties();
properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
properties.put("solr.tests.maxBufferedDocs", "100000");
properties.put("solr.tests.ramBufferSizeMB", "100");
// use non-test classes so RandomizedRunner isn't necessary
properties.put(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
properties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
properties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
createRequest.setProperties(properties);
createRequest.process(cloudSolrClient);
try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
zkStateReader.createClusterStateWatchersAndUpdate();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify/query collection
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
cloudSolrClient.add(doc);
cloudSolrClient.commit();
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(1, rsp.getResults().getNumFound());
// delete the collection we created earlier
CollectionAdminRequest.deleteCollection(collectionName).process(cloudSolrClient);
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
}
} finally {
cloudSolrClient.close();
miniCluster.shutdown();
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class TestSolrCloudWithSecureImpersonation method create1ShardCollection.
private void create1ShardCollection(String name, String config, MiniSolrCloudCluster solrCluster) throws Exception {
CollectionAdminResponse response;
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create(name, config, 1, 1, 0, 0) {
@Override
public SolrParams getParams() {
ModifiableSolrParams msp = new ModifiableSolrParams(super.getParams());
msp.set(USER_PARAM, "user");
return msp;
}
};
create.setMaxShardsPerNode(1);
response = create.process(solrCluster.getSolrClient());
if (response.getStatus() != 0 || response.getErrorMessages() != null) {
fail("Could not create collection. Response" + response.toString());
}
ZkStateReader zkStateReader = solrCluster.getSolrClient().getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(name, zkStateReader, false, true, 100);
}
Aggregations