use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class ShardSplitTest method splitByRouteFieldTest.
public void splitByRouteFieldTest() throws Exception {
log.info("Starting testSplitWithRouteField");
String collectionName = "routeFieldColl";
int numShards = 4;
int replicationFactor = 2;
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
String shard_fld = "shard_s";
try (CloudSolrClient client = createCloudClient(null)) {
Map<String, Object> props = Utils.makeMap(REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards, "router.field", shard_fld);
createCollection(collectionInfos, collectionName, props, client);
}
List<Integer> list = collectionInfos.get(collectionName);
checkForCollection(collectionName, list, null);
waitForRecoveriesToFinish(false);
String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
final DocRouter router = clusterState.getCollection(collectionName).getRouter();
Slice shard1 = clusterState.getSlice(collectionName, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
final int[] docCounts = new int[ranges.size()];
for (int i = 100; i <= 200; i++) {
// See comment in ShardRoutingTest for hash distribution
String shardKey = "" + (char) ('a' + (i % 26));
collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
int idx = getHashRangeIdx(router, ranges, shardKey);
if (idx != -1) {
docCounts[idx]++;
}
}
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
}
collectionClient.commit();
for (int i = 0; i < 3; i++) {
try {
splitShard(collectionName, SHARD1, null, null);
break;
} catch (HttpSolrClient.RemoteSolrException e) {
if (e.code() != 500) {
throw e;
}
log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
if (i == 2) {
fail("SPLITSHARD was not successful even after three tries");
}
}
}
waitForRecoveriesToFinish(collectionName, false);
assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestTlogReplica method testOnlyLeaderIndexes.
public void testOnlyLeaderIndexes() throws Exception {
createAndWaitForCollection(1, 0, 2, 0);
CloudSolrClient cloudClient = cluster.getSolrClient();
new UpdateRequest().add(sdoc("id", "1")).add(sdoc("id", "2")).add(sdoc("id", "3")).add(sdoc("id", "4")).process(cloudClient, collectionName);
{
UpdateHandler updateHandler = getSolrCore(true).get(0).getUpdateHandler();
RefCounted<IndexWriter> iwRef = updateHandler.getSolrCoreState().getIndexWriter(null);
assertTrue("IndexWriter at leader must see updates ", iwRef.get().hasUncommittedChanges());
iwRef.decref();
}
for (SolrCore solrCore : getSolrCore(false)) {
RefCounted<IndexWriter> iwRef = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
assertFalse("IndexWriter at replicas must not see updates ", iwRef.get().hasUncommittedChanges());
iwRef.decref();
}
checkRTG(1, 4, cluster.getJettySolrRunners());
new UpdateRequest().deleteById("1").deleteByQuery("id:2").process(cloudClient, collectionName);
// The DBQ is not processed at replicas, so we still can get doc2 and other docs by RTG
checkRTG(2, 4, getSolrRunner(false));
new UpdateRequest().commit(cloudClient, collectionName);
waitForNumDocsInAllActiveReplicas(2);
// Update log roll over
for (SolrCore solrCore : getSolrCore(false)) {
UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
assertFalse(updateLog.hasUncommittedChanges());
}
// UpdateLog copy over old updates
for (int i = 15; i <= 150; i++) {
cloudClient.add(collectionName, sdoc("id", String.valueOf(i)));
if (random().nextInt(100) < 15 & i != 150) {
cloudClient.commit(collectionName);
}
}
checkRTG(120, 150, cluster.getJettySolrRunners());
waitForReplicasCatchUp(20);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestTlogReplica method testBasicLeaderElection.
public void testBasicLeaderElection() throws Exception {
createAndWaitForCollection(1, 0, 2, 0);
CloudSolrClient cloudClient = cluster.getSolrClient();
new UpdateRequest().deleteByQuery("*:*").commit(cluster.getSolrClient(), collectionName);
new UpdateRequest().add(sdoc("id", "1")).add(sdoc("id", "2")).process(cloudClient, collectionName);
JettySolrRunner oldLeaderJetty = getSolrRunner(true).get(0);
ChaosMonkey.kill(oldLeaderJetty);
waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
new UpdateRequest().add(sdoc("id", "3")).add(sdoc("id", "4")).process(cloudClient, collectionName);
ChaosMonkey.start(oldLeaderJetty);
waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
checkRTG(1, 4, cluster.getJettySolrRunners());
new UpdateRequest().commit(cloudClient, collectionName);
waitForNumDocsInAllActiveReplicas(4, 0);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestTlogReplica method testDeleteById.
public void testDeleteById() throws Exception {
createAndWaitForCollection(1, 0, 2, 0);
CloudSolrClient cloudClient = cluster.getSolrClient();
new UpdateRequest().deleteByQuery("*:*").commit(cluster.getSolrClient(), collectionName);
new UpdateRequest().add(sdoc("id", "1")).commit(cloudClient, collectionName);
waitForNumDocsInAllActiveReplicas(1);
new UpdateRequest().deleteById("1").process(cloudClient, collectionName);
boolean successs = false;
try {
checkRTG(1, 1, cluster.getJettySolrRunners());
successs = true;
} catch (AssertionError e) {
//expected
}
assertFalse("Doc1 is deleted but it's still exist", successs);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestSolrCloudWithKerberosAlt method testCollectionCreateSearchDelete.
protected void testCollectionCreateSearchDelete() throws Exception {
String collectionName = "testkerberoscollection";
MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), JettyConfig.builder().setContext("/solr").build());
CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
cloudSolrClient.setDefaultCollection(collectionName);
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
// create collection
String configName = "solrCloudCollectionConfig";
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR);
Properties properties = new Properties();
properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
properties.put("solr.tests.maxBufferedDocs", "100000");
properties.put("solr.tests.ramBufferSizeMB", "100");
// use non-test classes so RandomizedRunner isn't necessary
properties.put(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
properties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
properties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
createRequest.setProperties(properties);
createRequest.process(cloudSolrClient);
try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
zkStateReader.createClusterStateWatchersAndUpdate();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify/query collection
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
cloudSolrClient.add(doc);
cloudSolrClient.commit();
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(1, rsp.getResults().getNumFound());
// delete the collection we created earlier
CollectionAdminRequest.deleteCollection(collectionName).process(cloudSolrClient);
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
}
} finally {
cloudSolrClient.close();
miniCluster.shutdown();
}
}
Aggregations