use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestSolrCoreSnapshots method testIndexOptimization.
@Test
public void testIndexOptimization() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCoreSnapshots_IndexOptimization";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
try (SolrClient adminClient = getHttpSolrClient(cluster.getJettySolrRunners().get(0).getBaseUrl().toString());
SolrClient masterClient = getHttpSolrClient(replica.getCoreUrl())) {
SnapshotMetaData metaData = createSnapshot(adminClient, coreName, commitName);
int numTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempt = 0; attempt < numTests; attempt++) {
//Modify existing index before we call optimize.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery("id:" + i);
}
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
}
// Before invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 0);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// After invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
List<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 1);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Delete the snapshot
deleteSnapshot(adminClient, coreName, metaData.getName());
// Add few documents. Without this the optimize command below does not take effect.
{
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// Verify that the index directory contains only 1 index commit (which is not the same as the snapshotted commit).
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
assertTrue(commits.size() == 1);
assertFalse(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestDynamicLoading method setupHarnesses.
private void setupHarnesses() {
for (final SolrClient client : clients) {
RestTestHarness harness = new RestTestHarness(() -> ((HttpSolrClient) client).getBaseURL());
restTestHarnesses.add(harness);
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestCloudManagedSchemaConcurrent method setupHarnesses.
private void setupHarnesses() {
for (final SolrClient client : clients) {
RestTestHarness harness = new RestTestHarness(() -> ((HttpSolrClient) client).getBaseURL());
restTestHarnesses.add(harness);
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestBulkSchemaConcurrent method setupHarnesses.
private void setupHarnesses() {
for (final SolrClient client : clients) {
RestTestHarness harness = new RestTestHarness(() -> ((HttpSolrClient) client).getBaseURL());
restTestHarnesses.add(harness);
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestSmileRequest method testDistribJsonRequest.
@Test
public void testDistribJsonRequest() throws Exception {
initServers();
SolrTestCaseHS.Client client = servers.getClient(random().nextInt());
client.tester = new SolrTestCaseHS.Client.Tester() {
@Override
public void assertJQ(SolrClient client, SolrParams args, String... tests) throws Exception {
((HttpSolrClient) client).setParser(SmileResponseParser.inst);
QueryRequest query = new QueryRequest(args);
String path = args.get("qt");
if (path != null) {
query.setPath(path);
}
NamedList<Object> rsp = client.request(query);
Map m = rsp.asMap(5);
String jsonStr = Utils.toJSONString(m);
SolrTestCaseHS.matchJSON(jsonStr, tests);
}
};
client.queryDefaults().set("shards", servers.getShards());
TestJsonRequest.doJsonRequest(client);
}
Aggregations