use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class ShardSplitTest method splitByRouteKeyTest.
private void splitByRouteKeyTest() throws Exception {
log.info("Starting splitByRouteKeyTest");
String collectionName = "splitByRouteKeyTest";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
try (CloudSolrClient client = createCloudClient(null)) {
Map<String, Object> props = Utils.makeMap(REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards);
createCollection(collectionInfos, collectionName, props, client);
}
List<Integer> list = collectionInfos.get(collectionName);
checkForCollection(collectionName, list, null);
waitForRecoveriesToFinish(false);
String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
String splitKey = "b!";
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(collectionName).getRouter();
Slice shard1 = clusterState.getSlice(collectionName, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = ((CompositeIdRouter) router).partitionRangeByKey(splitKey, shard1Range);
final int[] docCounts = new int[ranges.size()];
int uniqIdentifier = (1 << 12);
int splitKeyDocCount = 0;
for (int i = 100; i <= 200; i++) {
// See comment in ShardRoutingTest for hash distribution
String shardKey = "" + (char) ('a' + (i % 26));
String idStr = shardKey + "!" + i;
collectionClient.add(getDoc(id, idStr, "n_ti", (shardKey + "!").equals(splitKey) ? uniqIdentifier : i));
int idx = getHashRangeIdx(router, ranges, idStr);
if (idx != -1) {
docCounts[idx]++;
}
if (splitKey.equals(shardKey + "!"))
splitKeyDocCount++;
}
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
}
log.info("Route key doc count = {}", splitKeyDocCount);
collectionClient.commit();
for (int i = 0; i < 3; i++) {
try {
splitShard(collectionName, null, null, splitKey);
break;
} catch (HttpSolrClient.RemoteSolrException e) {
if (e.code() != 500) {
throw e;
}
log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
if (i == 2) {
fail("SPLITSHARD was not successful even after three tries");
}
}
}
waitForRecoveriesToFinish(collectionName, false);
SolrQuery solrQuery = new SolrQuery("*:*");
assertEquals("DocCount on shard1_0 does not match", docCounts[0], collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
assertEquals("DocCount on shard1_1 does not match", docCounts[1], collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
assertEquals("DocCount on shard1_2 does not match", docCounts[2], collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
solrQuery = new SolrQuery("n_ti:" + uniqIdentifier);
assertEquals("shard1_0 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
assertEquals("Wrong number of docs on shard1_1 for route key: " + splitKey, splitKeyDocCount, collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
assertEquals("shard1_2 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
}
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class ShardSplitTest method incompleteOrOverlappingCustomRangeTest.
private void incompleteOrOverlappingCustomRangeTest() throws Exception {
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
List<DocRouter.Range> subRanges = new ArrayList<>();
List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
// test with only one range
subRanges.add(ranges.get(0));
try {
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
fail("Shard splitting with just one custom hash range should not succeed");
} catch (HttpSolrClient.RemoteSolrException e) {
log.info("Expected exception:", e);
}
subRanges.clear();
// test with ranges with a hole in between them
// order shouldn't matter
subRanges.add(ranges.get(3));
subRanges.add(ranges.get(0));
try {
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
fail("Shard splitting with missing hashes in between given ranges should not succeed");
} catch (HttpSolrClient.RemoteSolrException e) {
log.info("Expected exception:", e);
}
subRanges.clear();
// test with overlapping ranges
subRanges.add(ranges.get(0));
subRanges.add(ranges.get(1));
subRanges.add(ranges.get(2));
subRanges.add(new DocRouter.Range(ranges.get(3).min - 15, ranges.get(3).max));
try {
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
fail("Shard splitting with overlapping ranges should not succeed");
} catch (HttpSolrClient.RemoteSolrException e) {
log.info("Expected exception:", e);
}
subRanges.clear();
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class TestHashPartitioner method testHashCodes.
public void testHashCodes() throws Exception {
DocRouter router = DocRouter.getDocRouter(PlainIdRouter.NAME);
assertTrue(router instanceof PlainIdRouter);
DocCollection coll = createCollection(4, router);
doNormalIdHashing(coll);
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class TestHashPartitioner method testRandomCompositeIds.
/** Make sure CompositeIdRouter can route random IDs without throwing exceptions */
public void testRandomCompositeIds() throws Exception {
DocRouter router = DocRouter.getDocRouter(CompositeIdRouter.NAME);
DocCollection coll = createCollection(TestUtil.nextInt(random(), 1, 10), router);
StringBuilder idBuilder = new StringBuilder();
for (int i = 0; i < 10000; ++i) {
idBuilder.setLength(0);
int numParts = TestUtil.nextInt(random(), 1, 30);
for (int part = 0; part < numParts; ++part) {
switch(random().nextInt(5)) {
case 0:
idBuilder.append('!');
break;
case 1:
idBuilder.append('/');
break;
case 2:
idBuilder.append(TestUtil.nextInt(random(), -100, 1000));
break;
default:
{
int length = TestUtil.nextInt(random(), 1, 10);
char[] str = new char[length];
TestUtil.randomFixedLengthUnicodeString(random(), str, 0, length);
idBuilder.append(str);
break;
}
}
}
String id = idBuilder.toString();
try {
Slice targetSlice = router.getTargetSlice(id, null, null, null, coll);
assertNotNull(targetSlice);
} catch (Exception e) {
throw new Exception("Exception routing id '" + id + "'", e);
}
}
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class ClusterStatus method getClusterStatus.
@SuppressWarnings("unchecked")
public void getClusterStatus(NamedList results) throws KeeperException, InterruptedException {
// read aliases
Aliases aliases = zkStateReader.getAliases();
Map<String, List<String>> collectionVsAliases = new HashMap<>();
Map<String, String> aliasVsCollections = aliases.getCollectionAliasMap();
if (aliasVsCollections != null) {
for (Map.Entry<String, String> entry : aliasVsCollections.entrySet()) {
List<String> colls = StrUtils.splitSmart(entry.getValue(), ',');
String alias = entry.getKey();
for (String coll : colls) {
if (collection == null || collection.equals(coll)) {
List<String> list = collectionVsAliases.get(coll);
if (list == null) {
list = new ArrayList<>();
collectionVsAliases.put(coll, list);
}
list.add(alias);
}
}
}
}
Map roles = null;
if (zkStateReader.getZkClient().exists(ZkStateReader.ROLES, true)) {
roles = (Map) Utils.fromJSON(zkStateReader.getZkClient().getData(ZkStateReader.ROLES, null, null, true));
}
ClusterState clusterState = zkStateReader.getClusterState();
// convert cluster state into a map of writable types
byte[] bytes = Utils.toJSON(clusterState);
Map<String, Object> stateMap = (Map<String, Object>) Utils.fromJSON(bytes);
String routeKey = message.getStr(ShardParams._ROUTE_);
String shard = message.getStr(ZkStateReader.SHARD_ID_PROP);
Map<String, DocCollection> collectionsMap = null;
if (collection == null) {
collectionsMap = clusterState.getCollectionsMap();
} else {
collectionsMap = Collections.singletonMap(collection, clusterState.getCollectionOrNull(collection));
}
NamedList<Object> collectionProps = new SimpleOrderedMap<>();
for (Map.Entry<String, DocCollection> entry : collectionsMap.entrySet()) {
Map<String, Object> collectionStatus;
String name = entry.getKey();
DocCollection clusterStateCollection = entry.getValue();
if (clusterStateCollection == null) {
if (collection != null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection: " + name + " not found");
} else {
//collection might have got deleted at the same time
continue;
}
}
Set<String> requestedShards = new HashSet<>();
if (routeKey != null) {
DocRouter router = clusterStateCollection.getRouter();
Collection<Slice> slices = router.getSearchSlices(routeKey, null, clusterStateCollection);
for (Slice slice : slices) {
requestedShards.add(slice.getName());
}
}
if (shard != null) {
requestedShards.add(shard);
}
if (clusterStateCollection.getStateFormat() > 1) {
bytes = Utils.toJSON(clusterStateCollection);
Map<String, Object> docCollection = (Map<String, Object>) Utils.fromJSON(bytes);
collectionStatus = getCollectionStatus(docCollection, name, requestedShards);
} else {
collectionStatus = getCollectionStatus((Map<String, Object>) stateMap.get(name), name, requestedShards);
}
collectionStatus.put("znodeVersion", clusterStateCollection.getZNodeVersion());
if (collectionVsAliases.containsKey(name) && !collectionVsAliases.get(name).isEmpty()) {
collectionStatus.put("aliases", collectionVsAliases.get(name));
}
String configName = zkStateReader.readConfigName(name);
collectionStatus.put("configName", configName);
collectionProps.add(name, collectionStatus);
}
List<String> liveNodes = zkStateReader.getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true);
// now we need to walk the collectionProps tree to cross-check replica state with live nodes
crossCheckReplicaStateWithLiveNodes(liveNodes, collectionProps);
NamedList<Object> clusterStatus = new SimpleOrderedMap<>();
clusterStatus.add("collections", collectionProps);
// read cluster properties
Map clusterProps = zkStateReader.getClusterProperties();
if (clusterProps != null && !clusterProps.isEmpty()) {
clusterStatus.add("properties", clusterProps);
}
// add the alias map too
if (aliasVsCollections != null && !aliasVsCollections.isEmpty()) {
clusterStatus.add("aliases", aliasVsCollections);
}
// add the roles map
if (roles != null) {
clusterStatus.add("roles", roles);
}
// add live_nodes
clusterStatus.add("live_nodes", liveNodes);
results.add("cluster", clusterStatus);
}
Aggregations