use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class AssignTest method testAssignNode.
@Test
public void testAssignNode() throws Exception {
String cname = "collection1";
Map<String, DocCollection> collectionStates = new HashMap<>();
Map<String, Slice> slices = new HashMap<>();
Map<String, Replica> replicas = new HashMap<>();
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString(), ZkStateReader.BASE_URL_PROP, "0.0.0.0", ZkStateReader.CORE_NAME_PROP, "core1", ZkStateReader.ROLES_PROP, null, ZkStateReader.NODE_NAME_PROP, "0_0_0_0", ZkStateReader.SHARD_ID_PROP, "shard1", ZkStateReader.COLLECTION_PROP, cname, ZkStateReader.NUM_SHARDS_PROP, "1", ZkStateReader.CORE_NODE_NAME_PROP, "core_node1");
Replica replica = new Replica("core_node1", m.getProperties());
replicas.put("core_node1", replica);
Slice slice = new Slice("slice1", replicas, new HashMap<String, Object>(0));
slices.put("slice1", slice);
DocRouter router = new ImplicitDocRouter();
DocCollection docCollection = new DocCollection(cname, slices, new HashMap<String, Object>(0), router);
collectionStates.put(cname, docCollection);
Set<String> liveNodes = new HashSet<>();
ClusterState state = new ClusterState(-1, liveNodes, collectionStates);
String nodeName = Assign.assignNode(state.getCollection("collection1"));
assertEquals("core_node2", nodeName);
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class BasicDistributedZkTest method testANewCollectionInOneInstanceWithManualShardAssignement.
private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
System.clearProperty("numShards");
List<SolrClient> collectionClients = new ArrayList<>();
SolrClient client = clients.get(0);
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(0, ((HttpSolrClient) client).getBaseURL().length() - DEFAULT_COLLECTION.length() - 1);
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 1, "slice1");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 2, "slice2");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 3, "slice2");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 4, "slice1");
while (pending != null && pending.size() > 0) {
Future<Object> future = completionService.take();
pending.remove(future);
}
SolrClient client1 = collectionClients.get(0);
SolrClient client2 = collectionClients.get(1);
SolrClient client3 = collectionClients.get(2);
SolrClient client4 = collectionClients.get(3);
// no one should be recovering
waitForRecoveriesToFinish(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader(), false, true);
assertAllActive(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader());
//printLayout();
// TODO: enable when we don't falsely get slice1...
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice1", 30000);
// solrj.getZkStateReader().getLeaderUrl(oneInstanceCollection2, "slice2", 30000);
client2.add(getDoc(id, "1"));
client3.add(getDoc(id, "2"));
client4.add(getDoc(id, "3"));
client1.commit();
SolrQuery query = new SolrQuery("*:*");
query.set("distrib", false);
long oneDocs = client1.query(query).getResults().getNumFound();
long twoDocs = client2.query(query).getResults().getNumFound();
long threeDocs = client3.query(query).getResults().getNumFound();
long fourDocs = client4.query(query).getResults().getNumFound();
query.set("collection", oneInstanceCollection2);
query.set("distrib", true);
long allDocs = getCommonCloudSolrClient().query(query).getResults().getNumFound();
// System.out.println("1:" + oneDocs);
// System.out.println("2:" + twoDocs);
// System.out.println("3:" + threeDocs);
// System.out.println("4:" + fourDocs);
// System.out.println("All Docs:" + allDocs);
// assertEquals(oneDocs, threeDocs);
// assertEquals(twoDocs, fourDocs);
// assertNotSame(oneDocs, twoDocs);
assertEquals(3, allDocs);
// we added a role of none on these creates - check for it
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
zkStateReader.forceUpdateCollection(oneInstanceCollection2);
Map<String, Slice> slices = zkStateReader.getClusterState().getSlicesMap(oneInstanceCollection2);
assertNotNull(slices);
String roles = slices.get("slice1").getReplicasMap().values().iterator().next().getStr(ZkStateReader.ROLES_PROP);
assertEquals("none", roles);
ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "slice1"));
// now test that unloading a core gets us a new leader
try (HttpSolrClient unloadClient = getHttpSolrClient(baseUrl)) {
unloadClient.setConnectionTimeout(15000);
unloadClient.setSoTimeout(60000);
Unload unloadCmd = new Unload(true);
unloadCmd.setCoreName(props.getCoreName());
String leader = props.getCoreUrl();
unloadClient.request(unloadCmd);
int tries = 50;
while (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "slice1", 10000))) {
Thread.sleep(100);
if (tries-- == 0) {
fail("Leader never changed");
}
}
}
IOUtils.close(collectionClients);
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class BaseCdcrDistributedZkTest method startServers.
/**
* Creates and starts a given number of servers.
*/
protected List<String> startServers(int nServer) throws Exception {
String temporaryCollection = "tmp_collection";
System.setProperty("collection", temporaryCollection);
for (int i = 1; i <= nServer; i++) {
// give everyone there own solrhome
File jettyDir = createTempDir("jetty").toFile();
jettyDir.mkdirs();
setupJettySolrHome(jettyDir);
JettySolrRunner jetty = createJetty(jettyDir, null, "shard" + i);
jettys.add(jetty);
}
ZkStateReader zkStateReader = jettys.get(0).getCoreContainer().getZkController().getZkStateReader();
// now wait till we see the leader for each shard
for (int i = 1; i <= shardCount; i++) {
this.printLayout();
zkStateReader.getLeaderRetry(temporaryCollection, "shard" + i, 15000);
}
// store the node names
List<String> nodeNames = new ArrayList<>();
for (Slice shard : zkStateReader.getClusterState().getCollection(temporaryCollection).getSlices()) {
for (Replica replica : shard.getReplicas()) {
nodeNames.add(replica.getNodeName());
}
}
this.waitForRecoveriesToFinish(temporaryCollection, zkStateReader, true);
// delete the temporary collection - we will create our own collections later
this.deleteCollection(temporaryCollection);
this.waitForCollectionToDisappear(temporaryCollection);
System.clearProperty("collection");
return nodeNames;
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class Assign method buildCoreName.
public static String buildCoreName(DocCollection collection, String shard, Replica.Type type) {
Slice slice = collection.getSlice(shard);
int replicaNum = slice.getReplicas().size();
for (; ; ) {
String replicaName = buildCoreName(collection.getName(), shard, type, replicaNum);
boolean exists = false;
for (Replica replica : slice.getReplicas()) {
if (replicaName.equals(replica.getStr(CORE_NAME_PROP))) {
exists = true;
break;
}
}
if (exists)
replicaNum++;
else
return replicaName;
}
}
use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.
the class BackupCmd method selectReplicaWithSnapshot.
private Replica selectReplicaWithSnapshot(CollectionSnapshotMetaData snapshotMeta, Slice slice) {
// The goal here is to choose the snapshot of the replica which was the leader at the time snapshot was created.
// If that is not possible, we choose any other replica for the given shard.
Collection<CoreSnapshotMetaData> snapshots = snapshotMeta.getReplicaSnapshotsForShard(slice.getName());
Optional<CoreSnapshotMetaData> leaderCore = snapshots.stream().filter(x -> x.isLeader()).findFirst();
if (leaderCore.isPresent()) {
log.info("Replica {} was the leader when snapshot {} was created.", leaderCore.get().getCoreName(), snapshotMeta.getName());
Replica r = slice.getReplica(leaderCore.get().getCoreName());
if ((r != null) && !r.getState().equals(State.DOWN)) {
return r;
}
}
Optional<Replica> r = slice.getReplicas().stream().filter(x -> x.getState() != State.DOWN && snapshotMeta.isSnapshotExists(slice.getName(), x)).findFirst();
if (!r.isPresent()) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to find any live replica with a snapshot named " + snapshotMeta.getName() + " for shard " + slice.getName());
}
return r.get();
}
Aggregations