use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class TestRandomRequestDistribution method testRequestTracking.
/**
* Asserts that requests aren't always sent to the same poor node. See SOLR-7493
*/
private void testRequestTracking() throws Exception {
CollectionAdminRequest.createCollection("a1x2", 1, 2).setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1)).process(cloudClient);
CollectionAdminRequest.createCollection("b1x1", 1, 1).setCreateNodeSet(nodeNames.get(2)).process(cloudClient);
waitForRecoveriesToFinish("a1x2", true);
waitForRecoveriesToFinish("b1x1", true);
cloudClient.getZkStateReader().forceUpdateCollection("b1x1");
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
DocCollection b1x1 = clusterState.getCollection("b1x1");
Collection<Replica> replicas = b1x1.getSlice("shard1").getReplicas();
assertEquals(1, replicas.size());
String baseUrl = replicas.iterator().next().getStr(ZkStateReader.BASE_URL_PROP);
if (!baseUrl.endsWith("/"))
baseUrl += "/";
try (HttpSolrClient client = getHttpSolrClient(baseUrl + "a1x2")) {
client.setSoTimeout(5000);
client.setConnectionTimeout(2000);
log.info("Making requests to " + baseUrl + "a1x2");
for (int i = 0; i < 10; i++) {
client.query(new SolrQuery("*:*"));
}
}
Map<String, Integer> shardVsCount = new HashMap<>();
for (JettySolrRunner runner : jettys) {
CoreContainer container = runner.getCoreContainer();
SolrMetricManager metricManager = container.getMetricManager();
for (SolrCore core : container.getCores()) {
String registry = core.getCoreMetricManager().getRegistryName();
Counter cnt = metricManager.counter(null, registry, "requests", "QUERY.standard");
SolrRequestHandler select = core.getRequestHandler("");
// long c = (long) select.getStatistics().get("requests");
shardVsCount.put(core.getName(), (int) cnt.getCount());
}
}
log.info("Shard count map = " + shardVsCount);
for (Map.Entry<String, Integer> entry : shardVsCount.entrySet()) {
assertTrue("Shard " + entry.getKey() + " received all 10 requests", entry.getValue() != 10);
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class TestTlogReplica method testCreateDelete.
// 2 times to make sure cleanup is complete and we can create the same collection
@Repeat(iterations = 2)
public void testCreateDelete() throws Exception {
try {
switch(random().nextInt(3)) {
case 0:
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 0, 4, 0).setMaxShardsPerNode(100).process(cluster.getSolrClient());
break;
case 1:
// Sometimes don't use SolrJ
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&tlogReplicas=%s&maxShardsPerNode=%s", cluster.getRandomJetty(random()).getBaseUrl(), collectionName, // numShards
2, // tlogReplicas
4, // maxShardsPerNode
100);
HttpGet createCollectionGet = new HttpGet(url);
HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(createCollectionGet);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
case 2:
// Sometimes use V2 API
url = cluster.getRandomJetty(random()).getBaseUrl().toString() + "/____v2/c";
String requestBody = String.format(Locale.ROOT, "{create:{name:%s, numShards:%s, tlogReplicas:%s, maxShardsPerNode:%s}}", collectionName, // numShards
2, // tlogReplicas
4, // maxShardsPerNode
100);
HttpPost createCollectionPost = new HttpPost(url);
createCollectionPost.setHeader("Content-type", "application/json");
createCollectionPost.setEntity(new StringEntity(requestBody));
httpResponse = cluster.getSolrClient().getHttpClient().execute(createCollectionPost);
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
break;
}
boolean reloaded = false;
while (true) {
DocCollection docCollection = getCollectionState(collectionName);
assertNotNull(docCollection);
assertEquals("Expecting 2 shards", 2, docCollection.getSlices().size());
assertEquals("Expecting 4 relpicas per shard", 8, docCollection.getReplicas().size());
assertEquals("Expecting 8 tlog replicas, 4 per shard", 8, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size());
assertEquals("Expecting no nrt replicas", 0, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size());
assertEquals("Expecting no pull replicas", 0, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size());
for (Slice s : docCollection.getSlices()) {
assertTrue(s.getLeader().getType() == Replica.Type.TLOG);
List<String> shardElectionNodes = cluster.getZkClient().getChildren(ZkStateReader.getShardLeadersElectPath(collectionName, s.getName()), null, true);
assertEquals("Unexpected election nodes for Shard: " + s.getName() + ": " + Arrays.toString(shardElectionNodes.toArray()), 4, shardElectionNodes.size());
}
assertUlogPresence(docCollection);
if (reloaded) {
break;
} else {
// reload
CollectionAdminResponse response = CollectionAdminRequest.reloadCollection(collectionName).process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
reloaded = true;
}
}
} finally {
zkClient().printLayoutToStdOut();
}
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class TestTlogReplica method testAddDocs.
@SuppressWarnings("unchecked")
public void testAddDocs() throws Exception {
int numTlogReplicas = 1 + random().nextInt(3);
DocCollection docCollection = createAndWaitForCollection(1, 0, numTlogReplicas, 0);
assertEquals(1, docCollection.getSlices().size());
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
cluster.getSolrClient().commit(collectionName);
Slice s = docCollection.getSlices().iterator().next();
try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
}
TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS);
for (Replica r : s.getReplicas(EnumSet.of(Replica.Type.TLOG))) {
//TODO: assert replication < REPLICATION_TIMEOUT_SECS
try (HttpSolrClient tlogReplicaClient = getHttpSolrClient(r.getCoreUrl())) {
while (true) {
try {
assertEquals("Replica " + r.getName() + " not up to date after 10 seconds", 1, tlogReplicaClient.query(new SolrQuery("*:*")).getResults().getNumFound());
// Append replicas process all updates
SolrQuery req = new SolrQuery("qt", "/admin/plugins", "stats", "true");
QueryResponse statsResponse = tlogReplicaClient.query(req);
assertEquals("Append replicas should recive all updates. Replica: " + r + ", response: " + statsResponse, 1L, ((Map<String, Object>) ((NamedList<Object>) statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.cumulativeAdds.count"));
break;
} catch (AssertionError e) {
if (t.hasTimedOut()) {
throw e;
} else {
Thread.sleep(100);
}
}
}
}
}
assertUlogPresence(docCollection);
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class TestPullReplicaErrorHandling method testCantConnectToLeader.
public void testCantConnectToLeader() throws Exception {
int numShards = 1;
CollectionAdminRequest.createCollection(collectionName, "conf", numShards, 1, 0, 1).setMaxShardsPerNode(1).process(cluster.getSolrClient());
addDocs(10);
DocCollection docCollection = assertNumberOfReplicas(numShards, 0, numShards, false, true);
Slice s = docCollection.getSlices().iterator().next();
SocketProxy proxy = getProxyForReplica(s.getLeader());
try {
// wait for replication
try (HttpSolrClient pullReplicaClient = getHttpSolrClient(s.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
assertNumDocs(10, pullReplicaClient);
}
proxy.close();
expectThrows(SolrException.class, () -> addDocs(1));
try (HttpSolrClient pullReplicaClient = getHttpSolrClient(s.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
assertNumDocs(10, pullReplicaClient);
}
assertNumDocs(10, cluster.getSolrClient());
} finally {
LOG.info("Opening leader node");
proxy.reopen();
}
// Back to normal
// Even if the leader is back to normal, the replica can get broken pipe for some time when trying to connect to it. The commit
// can fail if it's sent to the replica and it forwards it to the leader, and since it uses CUSC the error is hidden! That breaks
// the last part of this test.
// addDocs(20);
// assertNumDocs(20, cluster.getSolrClient(), 300);
// try (HttpSolrClient pullReplicaClient = getHttpSolrClient(s.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
// assertNumDocs(20, pullReplicaClient);
// }
}
use of org.apache.solr.common.cloud.DocCollection in project lucene-solr by apache.
the class TestPullReplicaErrorHandling method assertNumberOfReplicas.
private DocCollection assertNumberOfReplicas(int numWriter, int numActive, int numPassive, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
if (updateCollection) {
cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
}
DocCollection docCollection = getCollectionState(collectionName);
assertNotNull(docCollection);
assertEquals("Unexpected number of writer replicas: " + docCollection, numWriter, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).stream().filter(r -> !activeOnly || r.getState() == Replica.State.ACTIVE).count());
assertEquals("Unexpected number of pull replicas: " + docCollection, numPassive, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).stream().filter(r -> !activeOnly || r.getState() == Replica.State.ACTIVE).count());
assertEquals("Unexpected number of active replicas: " + docCollection, numActive, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).stream().filter(r -> !activeOnly || r.getState() == Replica.State.ACTIVE).count());
return docCollection;
}
Aggregations