use of org.apache.solr.common.cloud.ZkCoreNodeProps in project lucene-solr by apache.
the class CollectionsAPIDistributedZkTest method collectStartTimes.
private void collectStartTimes(String collectionName, Map<String, Long> urlToTime) throws SolrServerException, IOException {
DocCollection collectionState = getCollectionState(collectionName);
if (collectionState != null) {
for (Slice shard : collectionState) {
for (Replica replica : shard) {
ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica);
CoreStatus coreStatus;
try (HttpSolrClient server = getHttpSolrClient(coreProps.getBaseUrl())) {
coreStatus = CoreAdminRequest.getCoreStatus(coreProps.getCoreName(), false, server);
}
long before = coreStatus.getCoreStartTime().getTime();
urlToTime.put(coreProps.getCoreUrl(), before);
}
}
} else {
throw new IllegalArgumentException("Could not find collection " + collectionName);
}
}
use of org.apache.solr.common.cloud.ZkCoreNodeProps in project lucene-solr by apache.
the class UnloadDistributedZkTest method testCoreUnloadAndLeaders.
/**
* @throws Exception on any problem
*/
private void testCoreUnloadAndLeaders() throws Exception {
File tmpDir = createTempDir().toFile();
String core1DataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection1" + "_1n";
// create a new collection collection
SolrClient client = clients.get(0);
String url1 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url1)) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(60000);
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection1");
createCmd.setCollection("unloadcollection");
createCmd.setNumShards(1);
createCmd.setDataDir(getDataDir(core1DataDir));
adminClient.request(createCmd);
}
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
zkStateReader.forceUpdateCollection("unloadcollection");
int slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
assertEquals(1, slices);
client = clients.get(1);
String url2 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url2)) {
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection2");
createCmd.setCollection("unloadcollection");
String core2dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection1" + "_2n";
createCmd.setDataDir(getDataDir(core2dataDir));
adminClient.request(createCmd);
}
zkStateReader.forceUpdateCollection("unloadcollection");
slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
assertEquals(1, slices);
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
ZkCoreNodeProps leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
Random random = random();
if (random.nextBoolean()) {
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getCoreUrl())) {
// lets try and use the solrj client to index and retrieve a couple
// documents
SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall");
SolrInputDocument doc2 = getDoc(id, 7, i1, -600, tlong, 600, t1, "humpty dumpy3 sat on a walls");
SolrInputDocument doc3 = getDoc(id, 8, i1, -600, tlong, 600, t1, "humpty dumpy2 sat on a walled");
collectionClient.add(doc1);
collectionClient.add(doc2);
collectionClient.add(doc3);
collectionClient.commit();
}
}
// create another replica for our collection
client = clients.get(2);
String url3 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url3)) {
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection3");
createCmd.setCollection("unloadcollection");
String core3dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection" + "_3n";
createCmd.setDataDir(getDataDir(core3dataDir));
adminClient.request(createCmd);
}
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
// so that we start with some versions when we reload...
DirectUpdateHandler2.commitOnClose = false;
try (HttpSolrClient addClient = getHttpSolrClient(url3 + "/unloadcollection3")) {
addClient.setConnectionTimeout(30000);
// add a few docs
for (int x = 20; x < 100; x++) {
SolrInputDocument doc1 = getDoc(id, x, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall");
addClient.add(doc1);
}
}
// unload the leader
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
collectionClient.setConnectionTimeout(15000);
collectionClient.setSoTimeout(30000);
Unload unloadCmd = new Unload(false);
unloadCmd.setCoreName(leaderProps.getCoreName());
ModifiableSolrParams p = (ModifiableSolrParams) unloadCmd.getParams();
collectionClient.request(unloadCmd);
}
// Thread.currentThread().sleep(500);
// printLayout();
int tries = 50;
while (leaderProps.getCoreUrl().equals(zkStateReader.getLeaderUrl("unloadcollection", "shard1", 15000))) {
Thread.sleep(100);
if (tries-- == 0) {
fail("Leader never changed");
}
}
// ensure there is a leader
zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
try (HttpSolrClient addClient = getHttpSolrClient(url2 + "/unloadcollection2")) {
addClient.setConnectionTimeout(30000);
addClient.setSoTimeout(90000);
// add a few docs while the leader is down
for (int x = 101; x < 200; x++) {
SolrInputDocument doc1 = getDoc(id, x, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall");
addClient.add(doc1);
}
}
// create another replica for our collection
client = clients.get(3);
String url4 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url4)) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection4");
createCmd.setCollection("unloadcollection");
String core4dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection" + "_4n";
createCmd.setDataDir(getDataDir(core4dataDir));
adminClient.request(createCmd);
}
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
// unload the leader again
leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
collectionClient.setConnectionTimeout(15000);
collectionClient.setSoTimeout(30000);
Unload unloadCmd = new Unload(false);
unloadCmd.setCoreName(leaderProps.getCoreName());
SolrParams p = (ModifiableSolrParams) unloadCmd.getParams();
collectionClient.request(unloadCmd);
}
tries = 50;
while (leaderProps.getCoreUrl().equals(zkStateReader.getLeaderUrl("unloadcollection", "shard1", 15000))) {
Thread.sleep(100);
if (tries-- == 0) {
fail("Leader never changed");
}
}
zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
// set this back
DirectUpdateHandler2.commitOnClose = true;
// bring the downed leader back as replica
try (HttpSolrClient adminClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
Create createCmd = new Create();
createCmd.setCoreName(leaderProps.getCoreName());
createCmd.setCollection("unloadcollection");
createCmd.setDataDir(getDataDir(core1DataDir));
adminClient.request(createCmd);
}
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
long found1, found3;
try (HttpSolrClient adminClient = getHttpSolrClient(url2 + "/unloadcollection")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
adminClient.commit();
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
found1 = adminClient.query(q).getResults().getNumFound();
}
try (HttpSolrClient adminClient = getHttpSolrClient(url3 + "/unloadcollection")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
adminClient.commit();
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
found3 = adminClient.query(q).getResults().getNumFound();
}
try (HttpSolrClient adminClient = getHttpSolrClient(url4 + "/unloadcollection")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
adminClient.commit();
SolrQuery q = new SolrQuery("*:*");
q.set("distrib", false);
long found4 = adminClient.query(q).getResults().getNumFound();
// all 3 shards should now have the same number of docs
assertEquals(found1, found3);
assertEquals(found3, found4);
}
}
Aggregations