use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class OverseerTest method testPerformance.
@Test
@Ignore
public void testPerformance() throws Exception {
String zkDir = createTempDir("OverseerTest.testPerformance").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient controllerClient = null;
SolrZkClient overseerClient = null;
ZkStateReader reader = null;
MockZKController mockController = null;
try {
server.run();
controllerClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
ZkController.createClusterZkNodes(controllerClient);
reader = new ZkStateReader(controllerClient);
reader.createClusterStateWatchersAndUpdate();
mockController = new MockZKController(server.getZkAddress(), "node1");
final int MAX_COLLECTIONS = 10, MAX_CORES = 10, MAX_STATE_CHANGES = 20000, STATE_FORMAT = 2;
for (int i = 0; i < MAX_COLLECTIONS; i++) {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(), "name", "perf" + i, ZkStateReader.NUM_SHARDS_PROP, "1", "stateFormat", String.valueOf(STATE_FORMAT), ZkStateReader.REPLICATION_FACTOR, "1", ZkStateReader.MAX_SHARDS_PER_NODE, "1");
DistributedQueue q = Overseer.getStateUpdateQueue(controllerClient);
q.offer(Utils.toJSON(m));
controllerClient.makePath("/collections/perf" + i, true);
}
for (int i = 0, j = 0, k = 0; i < MAX_STATE_CHANGES; i++, j++, k++) {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString(), ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.CORE_NAME_PROP, "core" + k, ZkStateReader.CORE_NODE_NAME_PROP, "node1", ZkStateReader.COLLECTION_PROP, "perf" + j, ZkStateReader.NUM_SHARDS_PROP, "1", ZkStateReader.BASE_URL_PROP, "http://" + "node1" + "/solr/");
DistributedQueue q = Overseer.getStateUpdateQueue(controllerClient);
q.offer(Utils.toJSON(m));
if (j >= MAX_COLLECTIONS - 1)
j = 0;
if (k >= MAX_CORES - 1)
k = 0;
if (i > 0 && i % 100 == 0)
log.info("Published {} items", i);
}
// let's publish a sentinel collection which we'll use to wait for overseer to complete operations
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString(), ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.CORE_NAME_PROP, "core1", ZkStateReader.CORE_NODE_NAME_PROP, "node1", ZkStateReader.COLLECTION_PROP, "perf_sentinel", ZkStateReader.NUM_SHARDS_PROP, "1", ZkStateReader.BASE_URL_PROP, "http://" + "node1" + "/solr/");
DistributedQueue q = Overseer.getStateUpdateQueue(controllerClient);
q.offer(Utils.toJSON(m));
Timer t = new Timer();
Timer.Context context = t.time();
try {
overseerClient = electNewOverseer(server.getZkAddress());
assertTrue(overseers.size() > 0);
while (true) {
ClusterState state = reader.getClusterState();
if (state.hasCollection("perf_sentinel")) {
break;
}
Thread.sleep(1000);
}
} finally {
context.stop();
}
log.info("Overseer loop finished processing: ");
printTimingStats(t);
Overseer overseer = overseers.get(0);
Overseer.Stats stats = overseer.getStats();
String[] interestingOps = { "state", "update_state", "am_i_leader", "" };
Arrays.sort(interestingOps);
for (Map.Entry<String, Overseer.Stat> entry : stats.getStats().entrySet()) {
String op = entry.getKey();
if (Arrays.binarySearch(interestingOps, op) < 0)
continue;
Overseer.Stat stat = entry.getValue();
log.info("op: {}, success: {}, failure: {}", op, stat.success.get(), stat.errors.get());
Timer timer = stat.requestTime;
printTimingStats(timer);
}
} finally {
close(overseerClient);
close(mockController);
close(controllerClient);
close(reader);
server.shutdown();
}
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class OverseerTest method waitForCollections.
//wait until collections are available
private void waitForCollections(ZkStateReader stateReader, String... collections) throws InterruptedException, KeeperException {
int maxIterations = 100;
while (0 < maxIterations--) {
final ClusterState state = stateReader.getClusterState();
Set<String> availableCollections = state.getCollectionsMap().keySet();
int availableCount = 0;
for (String requiredCollection : collections) {
if (availableCollections.contains(requiredCollection)) {
availableCount++;
}
if (availableCount == collections.length)
return;
Thread.sleep(50);
}
}
log.warn("Timeout waiting for collections: " + Arrays.asList(collections) + " state:" + stateReader.getClusterState());
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class OverseerElectionContext method publishActiveIfRegisteredAndNotActive.
public void publishActiveIfRegisteredAndNotActive(SolrCore core) throws KeeperException, InterruptedException {
if (core.getCoreDescriptor().getCloudDescriptor().hasRegistered()) {
ZkStateReader zkStateReader = zkController.getZkStateReader();
zkStateReader.forceUpdateCollection(collection);
ClusterState clusterState = zkStateReader.getClusterState();
Replica rep = (clusterState == null) ? null : clusterState.getReplica(collection, leaderProps.getStr(ZkStateReader.CORE_NODE_NAME_PROP));
if (rep != null && rep.getState() != Replica.State.ACTIVE && rep.getState() != Replica.State.RECOVERING) {
log.debug("We have become the leader after core registration but are not in an ACTIVE state - publishing ACTIVE");
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
}
}
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class OverseerCollectionMessageHandler method collectionCmd.
void collectionCmd(ZkNodeProps message, ModifiableSolrParams params, NamedList results, Replica.State stateMatcher, String asyncId, Map<String, String> requestMap, Set<String> okayExceptions) {
log.info("Executing Collection Cmd : " + params);
String collectionName = message.getStr(NAME);
ShardHandler shardHandler = shardHandlerFactory.getShardHandler();
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection coll = clusterState.getCollection(collectionName);
for (Slice slice : coll.getSlices()) {
sliceCmd(clusterState, params, stateMatcher, slice, shardHandler, asyncId, requestMap);
}
processResponses(results, shardHandler, false, null, asyncId, requestMap, okayExceptions);
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class ShardSplitTest method splitByRouteKeyTest.
private void splitByRouteKeyTest() throws Exception {
log.info("Starting splitByRouteKeyTest");
String collectionName = "splitByRouteKeyTest";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
try (CloudSolrClient client = createCloudClient(null)) {
Map<String, Object> props = Utils.makeMap(REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards);
createCollection(collectionInfos, collectionName, props, client);
}
List<Integer> list = collectionInfos.get(collectionName);
checkForCollection(collectionName, list, null);
waitForRecoveriesToFinish(false);
String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
String splitKey = "b!";
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(collectionName).getRouter();
Slice shard1 = clusterState.getSlice(collectionName, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = ((CompositeIdRouter) router).partitionRangeByKey(splitKey, shard1Range);
final int[] docCounts = new int[ranges.size()];
int uniqIdentifier = (1 << 12);
int splitKeyDocCount = 0;
for (int i = 100; i <= 200; i++) {
// See comment in ShardRoutingTest for hash distribution
String shardKey = "" + (char) ('a' + (i % 26));
String idStr = shardKey + "!" + i;
collectionClient.add(getDoc(id, idStr, "n_ti", (shardKey + "!").equals(splitKey) ? uniqIdentifier : i));
int idx = getHashRangeIdx(router, ranges, idStr);
if (idx != -1) {
docCounts[idx]++;
}
if (splitKey.equals(shardKey + "!"))
splitKeyDocCount++;
}
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
}
log.info("Route key doc count = {}", splitKeyDocCount);
collectionClient.commit();
for (int i = 0; i < 3; i++) {
try {
splitShard(collectionName, null, null, splitKey);
break;
} catch (HttpSolrClient.RemoteSolrException e) {
if (e.code() != 500) {
throw e;
}
log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
if (i == 2) {
fail("SPLITSHARD was not successful even after three tries");
}
}
}
waitForRecoveriesToFinish(collectionName, false);
SolrQuery solrQuery = new SolrQuery("*:*");
assertEquals("DocCount on shard1_0 does not match", docCounts[0], collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
assertEquals("DocCount on shard1_1 does not match", docCounts[1], collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
assertEquals("DocCount on shard1_2 does not match", docCounts[2], collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
solrQuery = new SolrQuery("n_ti:" + uniqIdentifier);
assertEquals("shard1_0 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_0")).getResults().getNumFound());
assertEquals("Wrong number of docs on shard1_1 for route key: " + splitKey, splitKeyDocCount, collectionClient.query(solrQuery.setParam("shards", "shard1_1")).getResults().getNumFound());
assertEquals("shard1_2 must have 0 docs for route key: " + splitKey, 0, collectionClient.query(solrQuery.setParam("shards", "shard1_2")).getResults().getNumFound());
}
}
Aggregations