use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestSegmentSorting method testSegmentTerminateEarly.
public void testSegmentTerminateEarly() throws Exception {
final SegmentTerminateEarlyTestState tstes = new SegmentTerminateEarlyTestState(random());
final CloudSolrClient cloudSolrClient = cluster.getSolrClient();
// add some documents, then optimize to get merged-sorted segments
tstes.addDocuments(cloudSolrClient, 10, 10, true);
// CommonParams.SEGMENT_TERMINATE_EARLY parameter intentionally absent
tstes.queryTimestampDescending(cloudSolrClient);
// add a few more documents, but don't optimize to have some not-merge-sorted segments
tstes.addDocuments(cloudSolrClient, 2, 10, false);
// CommonParams.SEGMENT_TERMINATE_EARLY parameter now present
tstes.queryTimestampDescendingSegmentTerminateEarlyYes(cloudSolrClient);
tstes.queryTimestampDescendingSegmentTerminateEarlyNo(cloudSolrClient);
// CommonParams.SEGMENT_TERMINATE_EARLY parameter present but it won't be used
tstes.queryTimestampDescendingSegmentTerminateEarlyYesGrouped(cloudSolrClient);
// uses a sort order that is _not_ compatible with the merge sort order
tstes.queryTimestampAscendingSegmentTerminateEarlyYes(cloudSolrClient);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestSolrCloudSnapshots method testSnapshots.
@Test
public void testSnapshots() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCloudSnapshots";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", NUM_SHARDS, NUM_REPLICAS);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
BackupRestoreUtils.verifyDocs(nDocs, solrClient, collectionName);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
// Verify if snapshot creation works with replica failures.
boolean replicaFailures = usually();
Optional<String> stoppedCoreName = Optional.empty();
if (replicaFailures) {
// Here the assumption is that Solr will spread the replicas uniformly across nodes.
// If this is not true for some reason, then we will need to add some logic to find a
// node with a single replica.
this.cluster.getRandomJetty(random()).stop();
// Sleep a bit for allowing ZK watch to fire.
Thread.sleep(5000);
// Figure out if at-least one replica is "down".
DocCollection collState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
for (Slice s : collState.getSlices()) {
for (Replica replica : s.getReplicas()) {
if (replica.getState() == State.DOWN) {
stoppedCoreName = Optional.of(replica.getCoreName());
}
}
}
}
int expectedCoresWithSnapshot = stoppedCoreName.isPresent() ? (NUM_SHARDS * NUM_REPLICAS) - 1 : (NUM_SHARDS * NUM_REPLICAS);
CollectionAdminRequest.CreateSnapshot createSnap = new CollectionAdminRequest.CreateSnapshot(collectionName, commitName);
createSnap.process(solrClient);
Collection<CollectionSnapshotMetaData> collectionSnaps = listCollectionSnapshots(solrClient, collectionName);
assertEquals(1, collectionSnaps.size());
CollectionSnapshotMetaData meta = collectionSnaps.iterator().next();
assertEquals(commitName, meta.getName());
assertEquals(CollectionSnapshotMetaData.SnapshotStatus.Successful, meta.getStatus());
assertEquals(expectedCoresWithSnapshot, meta.getReplicaSnapshots().size());
Map<String, CoreSnapshotMetaData> snapshotByCoreName = meta.getReplicaSnapshots().stream().collect(Collectors.toMap(CoreSnapshotMetaData::getCoreName, Function.identity()));
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(2, collectionState.getActiveSlices().size());
for (Slice shard : collectionState.getActiveSlices()) {
assertEquals(2, shard.getReplicas().size());
for (Replica replica : shard.getReplicas()) {
if (stoppedCoreName.isPresent() && stoppedCoreName.get().equals(replica.getCoreName())) {
// We know that the snapshot is not created for this replica.
continue;
}
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
assertTrue(snapshotByCoreName.containsKey(coreName));
CoreSnapshotMetaData coreSnapshot = snapshotByCoreName.get(coreName);
try (SolrClient adminClient = getHttpSolrClient(replicaBaseUrl)) {
Collection<SnapshotMetaData> snapshots = listCoreSnapshots(adminClient, coreName);
Optional<SnapshotMetaData> metaData = snapshots.stream().filter(x -> commitName.equals(x.getName())).findFirst();
assertTrue("Snapshot not created for core " + coreName, metaData.isPresent());
assertEquals(coreSnapshot.getIndexDirPath(), metaData.get().getIndexDirPath());
assertEquals(coreSnapshot.getGenerationNumber(), metaData.get().getGenerationNumber());
}
}
}
// Delete all documents.
{
solrClient.deleteByQuery(collectionName, "*:*");
solrClient.commit(collectionName);
BackupRestoreUtils.verifyDocs(0, solrClient, collectionName);
}
String backupLocation = createTempDir().toFile().getAbsolutePath();
String backupName = "mytestbackup";
String restoreCollectionName = collectionName + "_restored";
//Create a backup using the earlier created snapshot.
{
CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName).setLocation(backupLocation).setCommitName(commitName);
if (random().nextBoolean()) {
assertEquals(0, backup.process(solrClient).getStatus());
} else {
//async
assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(solrClient, 30));
}
}
// Restore backup.
{
CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName).setLocation(backupLocation);
if (replicaFailures) {
// In this case one of the Solr servers would be down. Hence we need to increase
// max_shards_per_node property for restore command to succeed.
restore.setMaxShardsPerNode(2);
}
if (random().nextBoolean()) {
assertEquals(0, restore.process(solrClient).getStatus());
} else {
//async
assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(solrClient, 30));
}
AbstractDistribZkTestBase.waitForRecoveriesToFinish(restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
BackupRestoreUtils.verifyDocs(nDocs, solrClient, restoreCollectionName);
}
// Verify if the snapshot deletion works correctly when one or more replicas containing the snapshot are
// deleted
boolean replicaDeletion = rarely();
if (replicaDeletion) {
CoreSnapshotMetaData replicaToDelete = null;
for (String shardId : meta.getShards()) {
List<CoreSnapshotMetaData> replicas = meta.getReplicaSnapshotsForShard(shardId);
if (replicas.size() > 1) {
int r_index = random().nextInt(replicas.size());
replicaToDelete = replicas.get(r_index);
}
}
if (replicaToDelete != null) {
collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
for (Slice s : collectionState.getSlices()) {
for (Replica r : s.getReplicas()) {
if (r.getCoreName().equals(replicaToDelete.getCoreName())) {
log.info("Deleting replica {}", r);
CollectionAdminRequest.DeleteReplica delReplica = CollectionAdminRequest.deleteReplica(collectionName, replicaToDelete.getShardId(), r.getName());
delReplica.process(solrClient);
// The replica deletion will cleanup the snapshot meta-data.
snapshotByCoreName.remove(r.getCoreName());
break;
}
}
}
}
}
// Delete snapshot
CollectionAdminRequest.DeleteSnapshot deleteSnap = new CollectionAdminRequest.DeleteSnapshot(collectionName, commitName);
deleteSnap.process(solrClient);
// Wait for a while so that the clusterstate.json updates are propagated to the client side.
Thread.sleep(2000);
collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
for (Slice shard : collectionState.getActiveSlices()) {
for (Replica replica : shard.getReplicas()) {
if (stoppedCoreName.isPresent() && stoppedCoreName.get().equals(replica.getCoreName())) {
// We know that the snapshot was not created for this replica.
continue;
}
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
try (SolrClient adminClient = getHttpSolrClient(replicaBaseUrl)) {
Collection<SnapshotMetaData> snapshots = listCoreSnapshots(adminClient, coreName);
Optional<SnapshotMetaData> metaData = snapshots.stream().filter(x -> commitName.equals(x.getName())).findFirst();
assertFalse("Snapshot not deleted for core " + coreName, metaData.isPresent());
// Remove the entry for core if the snapshot is deleted successfully.
snapshotByCoreName.remove(coreName);
}
}
}
// Verify all core-level snapshots are deleted.
assertTrue("The cores remaining " + snapshotByCoreName, snapshotByCoreName.isEmpty());
assertTrue(listCollectionSnapshots(solrClient, collectionName).isEmpty());
// Verify if the collection deletion result in proper cleanup of snapshot metadata.
{
String commitName_2 = commitName + "_2";
CollectionAdminRequest.CreateSnapshot createSnap_2 = new CollectionAdminRequest.CreateSnapshot(collectionName, commitName_2);
assertEquals(0, createSnap_2.process(solrClient).getStatus());
Collection<CollectionSnapshotMetaData> collectionSnaps_2 = listCollectionSnapshots(solrClient, collectionName);
assertEquals(1, collectionSnaps.size());
assertEquals(commitName_2, collectionSnaps_2.iterator().next().getName());
// Delete collection
CollectionAdminRequest.Delete deleteCol = CollectionAdminRequest.deleteCollection(collectionName);
assertEquals(0, deleteCol.process(solrClient).getStatus());
assertTrue(SolrSnapshotManager.listSnapshots(solrClient.getZkStateReader().getZkClient(), collectionName).isEmpty());
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestSolrCoreSnapshots method testIndexOptimization.
@Test
public void testIndexOptimization() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCoreSnapshots_IndexOptimization";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
try (SolrClient adminClient = getHttpSolrClient(cluster.getJettySolrRunners().get(0).getBaseUrl().toString());
SolrClient masterClient = getHttpSolrClient(replica.getCoreUrl())) {
SnapshotMetaData metaData = createSnapshot(adminClient, coreName, commitName);
int numTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempt = 0; attempt < numTests; attempt++) {
//Modify existing index before we call optimize.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery("id:" + i);
}
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
}
// Before invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 0);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// After invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
List<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 1);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Delete the snapshot
deleteSnapshot(adminClient, coreName, metaData.getName());
// Add few documents. Without this the optimize command below does not take effect.
{
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// Verify that the index directory contains only 1 index commit (which is not the same as the snapshotted commit).
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
assertTrue(commits.size() == 1);
assertFalse(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class PreAnalyzedFieldManagedSchemaCloudTest method addField.
private void addField(Map<String, Object> field) throws Exception {
CloudSolrClient client = cluster.getSolrClient();
UpdateResponse addFieldResponse = new SchemaRequest.AddField(field).process(client, COLLECTION);
assertNotNull(addFieldResponse);
assertEquals(0, addFieldResponse.getStatus());
assertNull(addFieldResponse.getResponse().get("errors"));
FieldResponse fieldResponse = new SchemaRequest.Field(field.get("name").toString()).process(client, COLLECTION);
assertNotNull(fieldResponse);
assertEquals(0, fieldResponse.getStatus());
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestManagedSchemaAPI method testReloadAndAddSimple.
private void testReloadAndAddSimple(String collection) throws IOException, SolrServerException {
CloudSolrClient cloudClient = cluster.getSolrClient();
String fieldName = "myNewField";
addStringField(fieldName, collection, cloudClient);
CollectionAdminRequest.Reload reloadRequest = CollectionAdminRequest.reloadCollection(collection);
CollectionAdminResponse response = reloadRequest.process(cloudClient);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "1");
doc.addField(fieldName, "val");
UpdateRequest ureq = new UpdateRequest().add(doc);
cloudClient.request(ureq, collection);
}
Aggregations