use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class PeerSyncReplicationTest method waitTillNodesActive.
private void waitTillNodesActive() throws Exception {
for (int i = 0; i < 60; i++) {
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection collection1 = clusterState.getCollection("collection1");
Slice slice = collection1.getSlice("shard1");
Collection<Replica> replicas = slice.getReplicas();
boolean allActive = true;
Collection<String> nodesDownNames = nodesDown.stream().map(n -> n.coreNodeName).collect(Collectors.toList());
Collection<Replica> replicasToCheck = replicas.stream().filter(r -> !nodesDownNames.contains(r.getName())).collect(Collectors.toList());
for (Replica replica : replicasToCheck) {
if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
allActive = false;
break;
}
}
if (allActive) {
return;
}
}
printLayout();
fail("timeout waiting to see all nodes active");
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class ShardSplitTest method incompleteOrOverlappingCustomRangeTest.
private void incompleteOrOverlappingCustomRangeTest() throws Exception {
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
List<DocRouter.Range> subRanges = new ArrayList<>();
List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
// test with only one range
subRanges.add(ranges.get(0));
try {
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
fail("Shard splitting with just one custom hash range should not succeed");
} catch (HttpSolrClient.RemoteSolrException e) {
log.info("Expected exception:", e);
}
subRanges.clear();
// test with ranges with a hole in between them
// order shouldn't matter
subRanges.add(ranges.get(3));
subRanges.add(ranges.get(0));
try {
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
fail("Shard splitting with missing hashes in between given ranges should not succeed");
} catch (HttpSolrClient.RemoteSolrException e) {
log.info("Expected exception:", e);
}
subRanges.clear();
// test with overlapping ranges
subRanges.add(ranges.get(0));
subRanges.add(ranges.get(1));
subRanges.add(ranges.get(2));
subRanges.add(new DocRouter.Range(ranges.get(3).min - 15, ranges.get(3).max));
try {
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
fail("Shard splitting with overlapping ranges should not succeed");
} catch (HttpSolrClient.RemoteSolrException e) {
log.info("Expected exception:", e);
}
subRanges.clear();
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class ReplicaPropertiesBase method verifyPropertyVal.
// The params are triplets,
// collection
// shard
// replica
public static void verifyPropertyVal(CloudSolrClient client, String collectionName, String replicaName, String property, String val) throws InterruptedException, KeeperException {
Replica replica = null;
ClusterState clusterState = null;
for (int idx = 0; idx < 300; ++idx) {
// Keep trying while Overseer writes the ZK state for up to 30 seconds.
clusterState = client.getZkStateReader().getClusterState();
replica = clusterState.getReplica(collectionName, replicaName);
if (replica == null) {
fail("Could not find collection/replica pair! " + collectionName + "/" + replicaName);
}
if (StringUtils.equals(val, replica.getStr(property)))
return;
Thread.sleep(100);
}
fail("Property '" + property + "' with value " + replica.getStr(property) + " not set correctly for collection/replica pair: " + collectionName + "/" + replicaName + " property map is " + replica.getProperties().toString() + ".");
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class ReplicaPropertiesBase method verifyUnique.
public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced) throws KeeperException, InterruptedException {
DocCollection col = null;
for (int idx = 0; idx < 300; ++idx) {
ClusterState clusterState = client.getZkStateReader().getClusterState();
col = clusterState.getCollection(collectionName);
if (col == null) {
fail("Could not find collection " + collectionName);
}
Map<String, Integer> counts = new HashMap<>();
Set<String> uniqueNodes = new HashSet<>();
boolean allSlicesHaveProp = true;
boolean badSlice = false;
for (Slice slice : col.getSlices()) {
boolean thisSliceHasProp = false;
int propCount = 0;
for (Replica replica : slice.getReplicas()) {
uniqueNodes.add(replica.getNodeName());
String propVal = replica.getStr(property);
if (StringUtils.isNotBlank(propVal)) {
++propCount;
if (counts.containsKey(replica.getNodeName()) == false) {
counts.put(replica.getNodeName(), 0);
}
int count = counts.get(replica.getNodeName());
thisSliceHasProp = true;
counts.put(replica.getNodeName(), count + 1);
}
}
badSlice = (propCount > 1) ? true : badSlice;
allSlicesHaveProp = allSlicesHaveProp ? thisSliceHasProp : allSlicesHaveProp;
}
if (balanced == false && badSlice == false) {
return;
}
if (allSlicesHaveProp && balanced) {
// Check that the properties are evenly distributed.
int minProps = col.getSlices().size() / uniqueNodes.size();
int maxProps = minProps;
if (col.getSlices().size() % uniqueNodes.size() > 0) {
++maxProps;
}
boolean doSleep = false;
for (Map.Entry<String, Integer> ent : counts.entrySet()) {
if (ent.getValue() != minProps && ent.getValue() != maxProps) {
doSleep = true;
}
}
if (doSleep == false) {
assertTrue("We really shouldn't be calling this if there is no node with the property " + property, counts.size() > 0);
return;
}
}
Thread.sleep(100);
}
fail("Collection " + collectionName + " does not have roles evenly distributed. Collection is: " + col.toString());
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class BackupManager method readCollectionState.
/**
* This method reads the meta-data information for the backed-up collection.
*
* @param backupLoc The base path used to store the backup data.
* @param backupId The unique name for the backup.
* @param collectionName The name of the collection whose meta-data is to be returned.
* @return the meta-data information for the backed-up collection.
* @throws IOException in case of errors.
*/
public DocCollection readCollectionState(URI backupLoc, String backupId, String collectionName) throws IOException {
Objects.requireNonNull(collectionName);
URI zkStateDir = repository.resolve(backupLoc, backupId, ZK_STATE_DIR);
try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) {
// probably ok since the json file should be small.
byte[] arr = new byte[(int) is.length()];
is.readBytes(arr, 0, (int) is.length());
ClusterState c_state = ClusterState.load(-1, arr, Collections.emptySet());
return c_state.getCollection(collectionName);
}
}
Aggregations