use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class ShardSplitTest method splitByUniqueKeyTest.
private void splitByUniqueKeyTest() throws Exception {
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
List<DocRouter.Range> subRanges = new ArrayList<>();
if (usually()) {
List<DocRouter.Range> ranges = router.partitionRange(4, shard1Range);
// 75% of range goes to shard1_0 and the rest to shard1_1
subRanges.add(new DocRouter.Range(ranges.get(0).min, ranges.get(2).max));
subRanges.add(ranges.get(3));
} else {
subRanges = router.partitionRange(2, shard1Range);
}
final List<DocRouter.Range> ranges = subRanges;
final int[] docCounts = new int[ranges.size()];
int numReplicas = shard1.getReplicas().size();
del("*:*");
for (int id = 0; id <= 100; id++) {
// See comment in ShardRoutingTest for hash distribution
String shardKey = "" + (char) ('a' + (id % 26));
indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id), id);
}
commit();
Thread indexThread = new Thread() {
@Override
public void run() {
Random random = random();
int max = atLeast(random, 401);
int sleep = atLeast(random, 25);
log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms");
Set<String> deleted = new HashSet<>();
for (int id = 101; id < max; id++) {
try {
indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
Thread.sleep(sleep);
if (usually(random)) {
String delId = String.valueOf(random.nextInt(id - 101 + 1) + 101);
if (deleted.contains(delId))
continue;
try {
deleteAndUpdateCount(router, ranges, docCounts, delId);
deleted.add(delId);
} catch (Exception e) {
log.error("Exception while deleting docs", e);
}
}
} catch (Exception e) {
log.error("Exception while adding doc id = " + id, e);
// do not select this id for deletion ever
deleted.add(String.valueOf(id));
}
}
}
};
indexThread.start();
try {
for (int i = 0; i < 3; i++) {
try {
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
log.info("Layout after split: \n");
printLayout();
break;
} catch (HttpSolrClient.RemoteSolrException e) {
if (e.code() != 500) {
throw e;
}
log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
if (i == 2) {
fail("SPLITSHARD was not successful even after three tries");
}
}
}
} finally {
try {
indexThread.join();
} catch (InterruptedException e) {
log.error("Indexing thread interrupted", e);
}
}
waitForRecoveriesToFinish(true);
checkDocCountsAndShardStates(docCounts, numReplicas);
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class ShardSplitTest method checkSubShardConsistency.
protected void checkSubShardConsistency(String shard) throws SolrServerException, IOException {
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Slice slice = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, shard);
long[] numFound = new long[slice.getReplicasMap().size()];
int c = 0;
for (Replica replica : slice.getReplicas()) {
String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
QueryResponse response;
try (HttpSolrClient client = getHttpSolrClient(coreUrl)) {
response = client.query(query);
}
numFound[c++] = response.getResults().getNumFound();
log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
assertTrue("Shard: " + shard + " Replica: " + coreUrl + " has 0 docs", response.getResults().getNumFound() > 0);
}
for (int i = 0; i < slice.getReplicasMap().size(); i++) {
assertEquals(shard + " is not consistent", numFound[0], numFound[i]);
}
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class DistribJoinFromCollectionTest method setupCluster.
@BeforeClass
public static void setupCluster() throws Exception {
final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");
String configName = "solrCloudCollectionConfig";
int nodeCount = 5;
configureCluster(nodeCount).addConfig(configName, configDir).configure();
Map<String, String> collectionProperties = new HashMap<>();
collectionProperties.put("config", "solrconfig-tlog.xml");
collectionProperties.put("schema", "schema.xml");
// create a collection holding data for the "to" side of the JOIN
int shards = 2;
int replicas = 2;
CollectionAdminRequest.createCollection(toColl, configName, shards, replicas).setProperties(collectionProperties).process(cluster.getSolrClient());
// get the set of nodes where replicas for the "to" collection exist
Set<String> nodeSet = new HashSet<>();
ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
ClusterState cs = zkStateReader.getClusterState();
for (Slice slice : cs.getCollection(toColl).getActiveSlices()) for (Replica replica : slice.getReplicas()) nodeSet.add(replica.getNodeName());
assertTrue(nodeSet.size() > 0);
// deploy the "from" collection to all nodes where the "to" collection exists
CollectionAdminRequest.createCollection(fromColl, configName, 1, 4).setCreateNodeSet(StringUtils.join(nodeSet, ",")).setProperties(collectionProperties).process(cluster.getSolrClient());
toDocId = indexDoc(toColl, 1001, "a", null, "b");
indexDoc(fromColl, 2001, "a", "c", null);
// so the commits fire
Thread.sleep(1000);
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class SyncSliceTest method waitTillAllNodesActive.
private void waitTillAllNodesActive() throws Exception {
for (int i = 0; i < 60; i++) {
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection collection1 = clusterState.getCollection("collection1");
Slice slice = collection1.getSlice("shard1");
Collection<Replica> replicas = slice.getReplicas();
boolean allActive = true;
for (Replica replica : replicas) {
if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
allActive = false;
break;
}
}
if (allActive) {
return;
}
}
printLayout();
fail("timeout waiting to see all nodes active");
}
use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.
the class SliceStateTest method testDefaultSliceState.
@Test
public void testDefaultSliceState() {
Map<String, DocCollection> collectionStates = new HashMap<>();
Set<String> liveNodes = new HashSet<>();
liveNodes.add("node1");
Map<String, Slice> slices = new HashMap<>();
Map<String, Replica> sliceToProps = new HashMap<>();
Map<String, Object> props = new HashMap<>();
Replica replica = new Replica("node1", props);
sliceToProps.put("node1", replica);
Slice slice = new Slice("shard1", sliceToProps, null);
assertSame("Default state not set to active", Slice.State.ACTIVE, slice.getState());
slices.put("shard1", slice);
collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));
ClusterState clusterState = new ClusterState(-1, liveNodes, collectionStates);
byte[] bytes = Utils.toJSON(clusterState);
ClusterState loadedClusterState = ClusterState.load(-1, bytes, liveNodes);
assertSame("Default state not set to active", Slice.State.ACTIVE, loadedClusterState.getSlice("collection1", "shard1").getState());
}
Aggregations