use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestCloudSchemaless method test.
@Test
@ShardsFixed(num = 8)
public void test() throws Exception {
setupHarnesses();
// First, add a bunch of documents in a single update with the same new field.
// This tests that the replicas properly handle schema additions.
int slices = getCommonCloudSolrClient().getZkStateReader().getClusterState().getActiveSlices("collection1").size();
int trials = 50;
// generate enough docs so that we can expect at least a doc per slice
int numDocsPerTrial = (int) (slices * (Math.log(slices) + 1));
SolrClient randomClient = clients.get(random().nextInt(clients.size()));
int docNumber = 0;
for (int i = 0; i < trials; ++i) {
List<SolrInputDocument> docs = new ArrayList<>();
for (int j = 0; j < numDocsPerTrial; ++j) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", Long.toHexString(Double.doubleToLongBits(random().nextDouble())));
doc.addField("newTestFieldInt" + docNumber++, "123");
doc.addField("constantField", "3.14159");
docs.add(doc);
}
randomClient.add(docs);
}
randomClient.commit();
String[] expectedFields = getExpectedFieldResponses(docNumber);
// Check that all the fields were added
for (RestTestHarness client : restTestHarnesses) {
String request = "/schema/fields?wt=xml";
String response = client.query(request);
String result = BaseTestHarness.validateXPath(response, expectedFields);
if (result != null) {
String msg = "QUERY FAILED: xpath=" + result + " request=" + request + " response=" + response;
log.error(msg);
fail(msg);
}
}
// Now, let's ensure that writing the same field with two different types fails
int failTrials = 50;
for (int i = 0; i < failTrials; ++i) {
List<SolrInputDocument> docs = null;
SolrInputDocument intDoc = new SolrInputDocument();
intDoc.addField("id", Long.toHexString(Double.doubleToLongBits(random().nextDouble())));
intDoc.addField("longOrDateField" + i, "123");
SolrInputDocument dateDoc = new SolrInputDocument();
dateDoc.addField("id", Long.toHexString(Double.doubleToLongBits(random().nextDouble())));
dateDoc.addField("longOrDateField" + i, "1995-12-31T23:59:59Z");
// randomize the order of the docs
if (random().nextBoolean()) {
docs = Arrays.asList(intDoc, dateDoc);
} else {
docs = Arrays.asList(dateDoc, intDoc);
}
try {
randomClient.add(docs);
randomClient.commit();
fail("Expected Bad Request Exception");
} catch (SolrException se) {
assertEquals(ErrorCode.BAD_REQUEST, ErrorCode.getErrorCode(se.code()));
}
try {
CloudSolrClient cloudSolrClient = getCommonCloudSolrClient();
cloudSolrClient.add(docs);
cloudSolrClient.commit();
fail("Expected Bad Request Exception");
} catch (SolrException ex) {
assertEquals(ErrorCode.BAD_REQUEST, ErrorCode.getErrorCode((ex).code()));
}
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestManagedSchemaAPI method testAddFieldAndDocument.
private void testAddFieldAndDocument(String collection) throws IOException, SolrServerException {
CloudSolrClient cloudClient = cluster.getSolrClient();
String fieldName = "myNewField1";
addStringField(fieldName, collection, cloudClient);
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "2");
doc.addField(fieldName, "val1");
UpdateRequest ureq = new UpdateRequest().add(doc);
cloudClient.request(ureq, collection);
;
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TupleStream method getShards.
public static List<String> getShards(String zkHost, String collection, StreamContext streamContext) throws IOException {
Map<String, List<String>> shardsMap = null;
List<String> shards = new ArrayList();
if (streamContext != null) {
shardsMap = (Map<String, List<String>>) streamContext.get("shards");
}
if (shardsMap != null) {
//Manual Sharding
shards = shardsMap.get(collection);
} else {
//SolrCloud Sharding
CloudSolrClient cloudSolrClient = streamContext.getSolrClientCache().getCloudSolrClient(zkHost);
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
ClusterState clusterState = zkStateReader.getClusterState();
Collection<Slice> slices = getSlices(collection, zkStateReader, true);
Set<String> liveNodes = clusterState.getLiveNodes();
for (Slice slice : slices) {
Collection<Replica> replicas = slice.getReplicas();
List<Replica> shuffler = new ArrayList<>();
for (Replica replica : replicas) {
if (replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName()))
shuffler.add(replica);
}
Collections.shuffle(shuffler, new Random());
Replica rep = shuffler.get(0);
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep);
String url = zkProps.getCoreUrl();
shards.add(url);
}
}
return shards;
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestSolrCoreSnapshots method testBackupRestore.
@Test
public void testBackupRestore() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCoreSnapshots";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
String location = createTempDir().toFile().getAbsolutePath();
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String backupName = TestUtil.randomSimpleString(random(), 1, 5);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
String duplicateName = commitName.concat("_duplicate");
try (SolrClient adminClient = getHttpSolrClient(cluster.getJettySolrRunners().get(0).getBaseUrl().toString());
SolrClient masterClient = getHttpSolrClient(replica.getCoreUrl())) {
SnapshotMetaData metaData = createSnapshot(adminClient, coreName, commitName);
// Create another snapshot referring to the same index commit to verify the
// reference counting implementation during snapshot deletion.
SnapshotMetaData duplicateCommit = createSnapshot(adminClient, coreName, duplicateName);
assertEquals(metaData.getIndexDirPath(), duplicateCommit.getIndexDirPath());
assertEquals(metaData.getGenerationNumber(), duplicateCommit.getGenerationNumber());
// Delete all documents
masterClient.deleteByQuery("*:*");
masterClient.commit();
BackupRestoreUtils.verifyDocs(0, cluster.getSolrClient(), collectionName);
// Verify that the index directory contains at least 2 index commits - one referred by the snapshots
// and the other containing document deletions.
{
List<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
assertTrue(commits.size() >= 2);
}
// Backup the earlier created snapshot.
{
Map<String, String> params = new HashMap<>();
params.put("name", backupName);
params.put("commitName", commitName);
params.put("location", location);
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params);
}
// Restore the backup
{
Map<String, String> params = new HashMap<>();
params.put("name", "snapshot." + backupName);
params.put("location", location);
BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params);
BackupRestoreUtils.verifyDocs(nDocs, cluster.getSolrClient(), collectionName);
}
// Verify that the old index directory (before restore) contains only those index commits referred by snapshots.
// The IndexWriter (used to cleanup index files) creates an additional commit during closing. Hence we expect 2 commits (instead
// of 1).
{
List<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
assertEquals(2, commits.size());
assertEquals(metaData.getGenerationNumber(), commits.get(0).getGeneration());
}
// Delete first snapshot
deleteSnapshot(adminClient, coreName, commitName);
// Verify that corresponding index files have NOT been deleted (due to reference counting).
assertFalse(listCommits(metaData.getIndexDirPath()).isEmpty());
// Delete second snapshot
deleteSnapshot(adminClient, coreName, duplicateCommit.getName());
// Verify that corresponding index files have been deleted. Ideally this directory should
// be removed immediately. But the current DirectoryFactory impl waits until the
// closing the core (or the directoryFactory) for actual removal. Since the IndexWriter
// (used to cleanup index files) creates an additional commit during closing, we expect a single
// commit (instead of 0).
assertEquals(1, listCommits(duplicateCommit.getIndexDirPath()).size());
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class SearchHandlerTest method testZkConnected.
@Test
public void testZkConnected() throws Exception {
MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(5, createTempDir(), buildJettyConfig("/solr"));
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(5, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
// create collection
String collectionName = "testSolrCloudCollection";
String configName = "solrCloudCollectionConfig";
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
CollectionAdminRequest.createCollection(collectionName, configName, 2, 2).process(miniCluster.getSolrClient());
QueryRequest req = new QueryRequest();
QueryResponse rsp = req.process(cloudSolrClient, collectionName);
assertTrue(rsp.getResponseHeader().getBooleanArg("zkConnected"));
} finally {
miniCluster.shutdown();
}
}
Aggregations