use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class V2ApiIntegrationTest method testCollectionsApi.
@Test
public void testCollectionsApi() throws Exception {
CloudSolrClient client = cluster.getSolrClient();
Map result = resAsMap(client, new V2Request.Builder("/c/" + COLL_NAME + "/get/_introspect").build());
assertEquals("/c/collection1/get", Utils.getObjectByPath(result, true, "/spec[0]/url/paths[0]"));
result = resAsMap(client, new V2Request.Builder("/collections/" + COLL_NAME + "/get/_introspect").build());
assertEquals("/collections/collection1/get", Utils.getObjectByPath(result, true, "/spec[0]/url/paths[0]"));
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestCollectionStateWatchers method testStateWatcherChecksCurrentStateOnRegister.
@Test
public void testStateWatcherChecksCurrentStateOnRegister() throws Exception {
CloudSolrClient client = cluster.getSolrClient();
CollectionAdminRequest.createCollection("currentstate", "config", 1, 1).processAndWait(client, MAX_WAIT_TIMEOUT);
final CountDownLatch latch = new CountDownLatch(1);
client.registerCollectionStateWatcher("currentstate", (n, c) -> {
latch.countDown();
return false;
});
assertTrue("CollectionStateWatcher isn't called on new registration", latch.await(MAX_WAIT_TIMEOUT, TimeUnit.SECONDS));
assertEquals("CollectionStateWatcher should be retained", 1, client.getZkStateReader().getStateWatchers("currentstate").size());
final CountDownLatch latch2 = new CountDownLatch(1);
client.registerCollectionStateWatcher("currentstate", (n, c) -> {
latch2.countDown();
return true;
});
assertTrue("CollectionStateWatcher isn't called when registering for already-watched collection", latch.await(MAX_WAIT_TIMEOUT, TimeUnit.SECONDS));
waitFor("CollectionStateWatcher should be removed", 1, TimeUnit.SECONDS, () -> client.getZkStateReader().getStateWatchers("currentstate").size() == 1);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class ShardSplitTest method testSplitStaticIndexReplication.
/*
Creates a collection with replicationFactor=1, splits a shard. Restarts the sub-shard leader node.
Add a replica. Ensure count matches in leader and replica.
*/
public void testSplitStaticIndexReplication() throws Exception {
waitForThingsToLevelOut(15);
DocCollection defCol = cloudClient.getZkStateReader().getClusterState().getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION);
Replica replica = defCol.getReplicas().get(0);
String nodeName = replica.getNodeName();
String collectionName = "testSplitStaticIndexReplication";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
// some high number so we can create replicas without hindrance
create.setMaxShardsPerNode(5);
// we want to create the leader on a fixed node so that we know which one to restart later
create.setCreateNodeSet(nodeName);
create.process(cloudClient);
try (CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress(), true, cloudClient.getLbClient().getHttpClient())) {
client.setDefaultCollection(collectionName);
StoppableIndexingThread thread = new StoppableIndexingThread(controlClient, client, "i1", true);
try {
thread.start();
// give the indexer sometime to do its work
Thread.sleep(1000);
thread.safeStop();
thread.join();
client.commit();
controlClient.commit();
CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName);
splitShard.setShardName(SHARD1);
String asyncId = splitShard.processAsync(client);
RequestStatusState state = CollectionAdminRequest.requestStatus(asyncId).waitFor(client, 120);
if (state == RequestStatusState.COMPLETED) {
waitForRecoveriesToFinish(collectionName, true);
// let's wait to see parent shard become inactive
CountDownLatch latch = new CountDownLatch(1);
client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
@Override
public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
Slice parent = collectionState.getSlice(SHARD1);
Slice slice10 = collectionState.getSlice(SHARD1_0);
Slice slice11 = collectionState.getSlice(SHARD1_1);
if (slice10 != null && slice11 != null && parent.getState() == Slice.State.INACTIVE && slice10.getState() == Slice.State.ACTIVE && slice11.getState() == Slice.State.ACTIVE) {
latch.countDown();
// removes the watch
return true;
}
return false;
}
});
latch.await(1, TimeUnit.MINUTES);
if (latch.getCount() != 0) {
// sanity check
fail("Sub-shards did not become active even after waiting for 1 minute");
}
int liveNodeCount = client.getZkStateReader().getClusterState().getLiveNodes().size();
// restart the sub-shard leader node
boolean restarted = false;
for (JettySolrRunner jetty : jettys) {
int port = jetty.getBaseUrl().getPort();
if (replica.getStr(BASE_URL_PROP).contains(":" + port)) {
ChaosMonkey.kill(jetty);
ChaosMonkey.start(jetty);
restarted = true;
break;
}
}
if (!restarted) {
// sanity check
fail("We could not find a jetty to kill for replica: " + replica.getCoreUrl());
}
// add a new replica for the sub-shard
CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collectionName, SHARD1_0);
// use control client because less chances of it being the node being restarted
// this is to avoid flakiness of test because of NoHttpResponseExceptions
String control_collection = client.getZkStateReader().getClusterState().getCollection("control_collection").getReplicas().get(0).getStr(BASE_URL_PROP);
try (HttpSolrClient control = new HttpSolrClient.Builder(control_collection).withHttpClient(client.getLbClient().getHttpClient()).build()) {
state = addReplica.processAndWait(control, 30);
}
if (state == RequestStatusState.COMPLETED) {
CountDownLatch newReplicaLatch = new CountDownLatch(1);
client.getZkStateReader().registerCollectionStateWatcher(collectionName, new CollectionStateWatcher() {
@Override
public boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
if (liveNodes.size() != liveNodeCount) {
return false;
}
Slice slice = collectionState.getSlice(SHARD1_0);
if (slice.getReplicas().size() == 2) {
if (!slice.getReplicas().stream().anyMatch(r -> r.getState() == Replica.State.RECOVERING)) {
// we see replicas and none of them are recovering
newReplicaLatch.countDown();
return true;
}
}
return false;
}
});
newReplicaLatch.await(30, TimeUnit.SECONDS);
// check consistency of sub-shard replica explicitly because checkShardConsistency methods doesn't
// handle new shards/replica so well.
ClusterState clusterState = client.getZkStateReader().getClusterState();
DocCollection collection = clusterState.getCollection(collectionName);
int numReplicasChecked = assertConsistentReplicas(collection.getSlice(SHARD1_0));
assertEquals("We should have checked consistency for exactly 2 replicas of shard1_0", 2, numReplicasChecked);
} else {
fail("Adding a replica to sub-shard did not complete even after waiting for 30 seconds!. Saw state = " + state.getKey());
}
} else {
fail("We expected shard split to succeed on a static index but it didn't. Found state = " + state.getKey());
}
} finally {
thread.safeStop();
thread.join();
}
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class ShardSplitTest method splitByRouteFieldTest.
public void splitByRouteFieldTest() throws Exception {
log.info("Starting testSplitWithRouteField");
String collectionName = "routeFieldColl";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
String shard_fld = "shard_s";
try (CloudSolrClient client = createCloudClient(null)) {
Map<String, Object> props = Utils.makeMap(REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards, "router.field", shard_fld);
createCollection(collectionInfos, collectionName, props, client);
}
List<Integer> list = collectionInfos.get(collectionName);
checkForCollection(collectionName, list, null);
waitForRecoveriesToFinish(false);
String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(collectionName).getRouter();
Slice shard1 = clusterState.getSlice(collectionName, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
final int[] docCounts = new int[ranges.size()];
for (int i = 100; i <= 200; i++) {
// See comment in ShardRoutingTest for hash distribution
String shardKey = "" + (char) ('a' + (i % 26));
collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
int idx = getHashRangeIdx(router, ranges, shardKey);
if (idx != -1) {
docCounts[idx]++;
}
}
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
}
collectionClient.commit();
for (int i = 0; i < 3; i++) {
try {
splitShard(collectionName, SHARD1, null, null);
break;
} catch (HttpSolrClient.RemoteSolrException e) {
if (e.code() != 500) {
throw e;
}
log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
if (i == 2) {
fail("SPLITSHARD was not successful even after three tries");
}
}
}
waitForRecoveriesToFinish(collectionName, false);
assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestTlogReplica method testOnlyLeaderIndexes.
public void testOnlyLeaderIndexes() throws Exception {
createAndWaitForCollection(1, 0, 2, 0);
CloudSolrClient cloudClient = cluster.getSolrClient();
new UpdateRequest().add(sdoc("id", "1")).add(sdoc("id", "2")).add(sdoc("id", "3")).add(sdoc("id", "4")).process(cloudClient, collectionName);
{
UpdateHandler updateHandler = getSolrCore(true).get(0).getUpdateHandler();
RefCounted<IndexWriter> iwRef = updateHandler.getSolrCoreState().getIndexWriter(null);
assertTrue("IndexWriter at leader must see updates ", iwRef.get().hasUncommittedChanges());
iwRef.decref();
}
for (SolrCore solrCore : getSolrCore(false)) {
RefCounted<IndexWriter> iwRef = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
assertFalse("IndexWriter at replicas must not see updates ", iwRef.get().hasUncommittedChanges());
iwRef.decref();
}
checkRTG(1, 4, cluster.getJettySolrRunners());
new UpdateRequest().deleteById("1").deleteByQuery("id:2").process(cloudClient, collectionName);
// The DBQ is not processed at replicas, so we still can get doc2 and other docs by RTG
checkRTG(2, 4, getSolrRunner(false));
new UpdateRequest().commit(cloudClient, collectionName);
waitForNumDocsInAllActiveReplicas(2);
// Update log roll over
for (SolrCore solrCore : getSolrCore(false)) {
UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
assertFalse(updateLog.hasUncommittedChanges());
}
// UpdateLog copy over old updates
for (int i = 15; i <= 150; i++) {
cloudClient.add(collectionName, sdoc("id", String.valueOf(i)));
if (random().nextInt(100) < 15 & i != 150) {
cloudClient.commit(collectionName);
}
}
checkRTG(120, 150, cluster.getJettySolrRunners());
waitForReplicasCatchUp(20);
}
Aggregations