use of org.apache.solr.SolrTestCaseJ4.getHttpSolrClient in project lucene-solr by apache.
the class DistributedVersionInfoTest method testReplicaVersionHandling.
@Test
public void testReplicaVersionHandling() throws Exception {
final String shardId = "shard1";
CollectionAdminRequest.createCollection(COLLECTION, "conf", 1, 3).processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
final ZkStateReader stateReader = cluster.getSolrClient().getZkStateReader();
stateReader.waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 1, 3));
final Replica leader = stateReader.getLeaderRetry(COLLECTION, shardId);
// start by reloading the empty collection so we try to calculate the max from an empty index
reloadCollection(leader, COLLECTION);
sendDoc(1);
cluster.getSolrClient().commit(COLLECTION);
// verify doc is on the leader and replica
final List<Replica> notLeaders = stateReader.getClusterState().getCollection(COLLECTION).getReplicas().stream().filter(r -> r.getCoreName().equals(leader.getCoreName()) == false).collect(Collectors.toList());
assertDocsExistInAllReplicas(leader, notLeaders, COLLECTION, 1, 1, null);
// get max version from the leader and replica
Replica replica = notLeaders.get(0);
Long maxOnLeader = getMaxVersionFromIndex(leader);
Long maxOnReplica = getMaxVersionFromIndex(replica);
assertEquals("leader and replica should have same max version: " + maxOnLeader, maxOnLeader, maxOnReplica);
// send the same doc but with a lower version than the max in the index
try (SolrClient client = getHttpSolrClient(replica.getCoreUrl())) {
String docId = String.valueOf(1);
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", docId);
// bad version!!!
doc.setField("_version_", maxOnReplica - 1);
// simulate what the leader does when sending a doc to a replica
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(DISTRIB_UPDATE_PARAM, DistributedUpdateProcessor.DistribPhase.FROMLEADER.toString());
params.set(DISTRIB_FROM, leader.getCoreUrl());
UpdateRequest req = new UpdateRequest();
req.setParams(params);
req.add(doc);
log.info("Sending doc with out-of-date version (" + (maxOnReplica - 1) + ") document directly to replica");
client.request(req);
client.commit();
Long docVersion = getVersionFromIndex(replica, docId);
assertEquals("older version should have been thrown away", maxOnReplica, docVersion);
}
reloadCollection(leader, COLLECTION);
maxOnLeader = getMaxVersionFromIndex(leader);
maxOnReplica = getMaxVersionFromIndex(replica);
assertEquals("leader and replica should have same max version after reload", maxOnLeader, maxOnReplica);
// now start sending docs while collection is reloading
delQ("*:*");
commit();
final Set<Integer> deletedDocs = new HashSet<>();
final AtomicInteger docsSent = new AtomicInteger(0);
final Random rand = new Random(5150);
Thread docSenderThread = new Thread() {
public void run() {
// brief delay before sending docs
try {
Thread.sleep(rand.nextInt(30) + 1);
} catch (InterruptedException e) {
}
for (int i = 0; i < 1000; i++) {
if (i % (rand.nextInt(20) + 1) == 0) {
try {
Thread.sleep(rand.nextInt(50) + 1);
} catch (InterruptedException e) {
}
}
int docId = i + 1;
try {
sendDoc(docId);
docsSent.incrementAndGet();
} catch (Exception e) {
}
}
}
};
Thread reloaderThread = new Thread() {
public void run() {
try {
Thread.sleep(rand.nextInt(300) + 1);
} catch (InterruptedException e) {
}
for (int i = 0; i < 3; i++) {
try {
reloadCollection(leader, COLLECTION);
} catch (Exception e) {
}
try {
Thread.sleep(rand.nextInt(300) + 300);
} catch (InterruptedException e) {
}
}
}
};
Thread deleteThread = new Thread() {
public void run() {
// brief delay before sending docs
try {
Thread.sleep(500);
} catch (InterruptedException e) {
}
for (int i = 0; i < 200; i++) {
try {
Thread.sleep(rand.nextInt(50) + 1);
} catch (InterruptedException e) {
}
int ds = docsSent.get();
if (ds > 0) {
int docToDelete = rand.nextInt(ds) + 1;
if (!deletedDocs.contains(docToDelete)) {
delI(String.valueOf(docToDelete));
deletedDocs.add(docToDelete);
}
}
}
}
};
Thread committerThread = new Thread() {
public void run() {
try {
Thread.sleep(rand.nextInt(200) + 1);
} catch (InterruptedException e) {
}
for (int i = 0; i < 20; i++) {
try {
cluster.getSolrClient().commit(COLLECTION);
} catch (Exception e) {
}
try {
Thread.sleep(rand.nextInt(100) + 100);
} catch (InterruptedException e) {
}
}
}
};
docSenderThread.start();
reloaderThread.start();
committerThread.start();
deleteThread.start();
docSenderThread.join();
reloaderThread.join();
committerThread.join();
deleteThread.join();
cluster.getSolrClient().commit(COLLECTION);
log.info("Total of " + deletedDocs.size() + " docs deleted");
maxOnLeader = getMaxVersionFromIndex(leader);
maxOnReplica = getMaxVersionFromIndex(replica);
assertEquals("leader and replica should have same max version before reload", maxOnLeader, maxOnReplica);
reloadCollection(leader, COLLECTION);
maxOnLeader = getMaxVersionFromIndex(leader);
maxOnReplica = getMaxVersionFromIndex(replica);
assertEquals("leader and replica should have same max version after reload", maxOnLeader, maxOnReplica);
assertDocsExistInAllReplicas(leader, notLeaders, COLLECTION, 1, 1000, deletedDocs);
}
use of org.apache.solr.SolrTestCaseJ4.getHttpSolrClient in project lucene-solr by apache.
the class SolrExceptionTest method testSolrException.
public void testSolrException() throws Throwable {
// test a connection to a solr server that probably doesn't exist
// this is a very simple test and most of the test should be considered verified
// if the compiler won't let you by without the try/catch
boolean gotExpectedError = false;
CloseableHttpClient httpClient = null;
try {
// switched to a local address to avoid going out on the net, ns lookup issues, etc.
// set a 1ms timeout to let the connection fail faster.
httpClient = HttpClientUtil.createClient(null);
try (HttpSolrClient client = getHttpSolrClient("http://[ff01::114]:11235/solr/", httpClient)) {
client.setConnectionTimeout(1);
SolrQuery query = new SolrQuery("test123");
client.query(query);
}
httpClient.close();
} catch (SolrServerException sse) {
gotExpectedError = true;
/***
assertTrue(UnknownHostException.class == sse.getRootCause().getClass()
//If one is using OpenDNS, then you don't get UnknownHostException, instead you get back that the query couldn't execute
|| (sse.getRootCause().getClass() == SolrException.class && ((SolrException) sse.getRootCause()).code() == 302 && sse.getMessage().equals("Error executing query")));
***/
} finally {
if (httpClient != null)
HttpClientUtil.close(httpClient);
}
assertTrue(gotExpectedError);
}
use of org.apache.solr.SolrTestCaseJ4.getHttpSolrClient in project lucene-solr by apache.
the class TestSolrCloudSnapshots method testSnapshots.
@Test
public void testSnapshots() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCloudSnapshots";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", NUM_SHARDS, NUM_REPLICAS);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
BackupRestoreUtils.verifyDocs(nDocs, solrClient, collectionName);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
// Verify if snapshot creation works with replica failures.
boolean replicaFailures = usually();
Optional<String> stoppedCoreName = Optional.empty();
if (replicaFailures) {
// Here the assumption is that Solr will spread the replicas uniformly across nodes.
// If this is not true for some reason, then we will need to add some logic to find a
// node with a single replica.
this.cluster.getRandomJetty(random()).stop();
// Sleep a bit for allowing ZK watch to fire.
Thread.sleep(5000);
// Figure out if at-least one replica is "down".
DocCollection collState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
for (Slice s : collState.getSlices()) {
for (Replica replica : s.getReplicas()) {
if (replica.getState() == State.DOWN) {
stoppedCoreName = Optional.of(replica.getCoreName());
}
}
}
}
int expectedCoresWithSnapshot = stoppedCoreName.isPresent() ? (NUM_SHARDS * NUM_REPLICAS) - 1 : (NUM_SHARDS * NUM_REPLICAS);
CollectionAdminRequest.CreateSnapshot createSnap = new CollectionAdminRequest.CreateSnapshot(collectionName, commitName);
createSnap.process(solrClient);
Collection<CollectionSnapshotMetaData> collectionSnaps = listCollectionSnapshots(solrClient, collectionName);
assertEquals(1, collectionSnaps.size());
CollectionSnapshotMetaData meta = collectionSnaps.iterator().next();
assertEquals(commitName, meta.getName());
assertEquals(CollectionSnapshotMetaData.SnapshotStatus.Successful, meta.getStatus());
assertEquals(expectedCoresWithSnapshot, meta.getReplicaSnapshots().size());
Map<String, CoreSnapshotMetaData> snapshotByCoreName = meta.getReplicaSnapshots().stream().collect(Collectors.toMap(CoreSnapshotMetaData::getCoreName, Function.identity()));
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(2, collectionState.getActiveSlices().size());
for (Slice shard : collectionState.getActiveSlices()) {
assertEquals(2, shard.getReplicas().size());
for (Replica replica : shard.getReplicas()) {
if (stoppedCoreName.isPresent() && stoppedCoreName.get().equals(replica.getCoreName())) {
// We know that the snapshot is not created for this replica.
continue;
}
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
assertTrue(snapshotByCoreName.containsKey(coreName));
CoreSnapshotMetaData coreSnapshot = snapshotByCoreName.get(coreName);
try (SolrClient adminClient = getHttpSolrClient(replicaBaseUrl)) {
Collection<SnapshotMetaData> snapshots = listCoreSnapshots(adminClient, coreName);
Optional<SnapshotMetaData> metaData = snapshots.stream().filter(x -> commitName.equals(x.getName())).findFirst();
assertTrue("Snapshot not created for core " + coreName, metaData.isPresent());
assertEquals(coreSnapshot.getIndexDirPath(), metaData.get().getIndexDirPath());
assertEquals(coreSnapshot.getGenerationNumber(), metaData.get().getGenerationNumber());
}
}
}
// Delete all documents.
{
solrClient.deleteByQuery(collectionName, "*:*");
solrClient.commit(collectionName);
BackupRestoreUtils.verifyDocs(0, solrClient, collectionName);
}
String backupLocation = createTempDir().toFile().getAbsolutePath();
String backupName = "mytestbackup";
String restoreCollectionName = collectionName + "_restored";
//Create a backup using the earlier created snapshot.
{
CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName).setLocation(backupLocation).setCommitName(commitName);
if (random().nextBoolean()) {
assertEquals(0, backup.process(solrClient).getStatus());
} else {
//async
assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(solrClient, 30));
}
}
// Restore backup.
{
CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName).setLocation(backupLocation);
if (replicaFailures) {
// In this case one of the Solr servers would be down. Hence we need to increase
// max_shards_per_node property for restore command to succeed.
restore.setMaxShardsPerNode(2);
}
if (random().nextBoolean()) {
assertEquals(0, restore.process(solrClient).getStatus());
} else {
//async
assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(solrClient, 30));
}
AbstractDistribZkTestBase.waitForRecoveriesToFinish(restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
BackupRestoreUtils.verifyDocs(nDocs, solrClient, restoreCollectionName);
}
// Verify if the snapshot deletion works correctly when one or more replicas containing the snapshot are
// deleted
boolean replicaDeletion = rarely();
if (replicaDeletion) {
CoreSnapshotMetaData replicaToDelete = null;
for (String shardId : meta.getShards()) {
List<CoreSnapshotMetaData> replicas = meta.getReplicaSnapshotsForShard(shardId);
if (replicas.size() > 1) {
int r_index = random().nextInt(replicas.size());
replicaToDelete = replicas.get(r_index);
}
}
if (replicaToDelete != null) {
collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
for (Slice s : collectionState.getSlices()) {
for (Replica r : s.getReplicas()) {
if (r.getCoreName().equals(replicaToDelete.getCoreName())) {
log.info("Deleting replica {}", r);
CollectionAdminRequest.DeleteReplica delReplica = CollectionAdminRequest.deleteReplica(collectionName, replicaToDelete.getShardId(), r.getName());
delReplica.process(solrClient);
// The replica deletion will cleanup the snapshot meta-data.
snapshotByCoreName.remove(r.getCoreName());
break;
}
}
}
}
}
// Delete snapshot
CollectionAdminRequest.DeleteSnapshot deleteSnap = new CollectionAdminRequest.DeleteSnapshot(collectionName, commitName);
deleteSnap.process(solrClient);
// Wait for a while so that the clusterstate.json updates are propagated to the client side.
Thread.sleep(2000);
collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
for (Slice shard : collectionState.getActiveSlices()) {
for (Replica replica : shard.getReplicas()) {
if (stoppedCoreName.isPresent() && stoppedCoreName.get().equals(replica.getCoreName())) {
// We know that the snapshot was not created for this replica.
continue;
}
String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
try (SolrClient adminClient = getHttpSolrClient(replicaBaseUrl)) {
Collection<SnapshotMetaData> snapshots = listCoreSnapshots(adminClient, coreName);
Optional<SnapshotMetaData> metaData = snapshots.stream().filter(x -> commitName.equals(x.getName())).findFirst();
assertFalse("Snapshot not deleted for core " + coreName, metaData.isPresent());
// Remove the entry for core if the snapshot is deleted successfully.
snapshotByCoreName.remove(coreName);
}
}
}
// Verify all core-level snapshots are deleted.
assertTrue("The cores remaining " + snapshotByCoreName, snapshotByCoreName.isEmpty());
assertTrue(listCollectionSnapshots(solrClient, collectionName).isEmpty());
// Verify if the collection deletion result in proper cleanup of snapshot metadata.
{
String commitName_2 = commitName + "_2";
CollectionAdminRequest.CreateSnapshot createSnap_2 = new CollectionAdminRequest.CreateSnapshot(collectionName, commitName_2);
assertEquals(0, createSnap_2.process(solrClient).getStatus());
Collection<CollectionSnapshotMetaData> collectionSnaps_2 = listCollectionSnapshots(solrClient, collectionName);
assertEquals(1, collectionSnaps.size());
assertEquals(commitName_2, collectionSnaps_2.iterator().next().getName());
// Delete collection
CollectionAdminRequest.Delete deleteCol = CollectionAdminRequest.deleteCollection(collectionName);
assertEquals(0, deleteCol.process(solrClient).getStatus());
assertTrue(SolrSnapshotManager.listSnapshots(solrClient.getZkStateReader().getZkClient(), collectionName).isEmpty());
}
}
use of org.apache.solr.SolrTestCaseJ4.getHttpSolrClient in project lucene-solr by apache.
the class TestSolrCoreSnapshots method testIndexOptimization.
@Test
public void testIndexOptimization() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "SolrCoreSnapshots_IndexOptimization";
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
create.process(solrClient);
int nDocs = BackupRestoreUtils.indexDocs(cluster.getSolrClient(), collectionName, docsSeed);
DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
assertEquals(1, collectionState.getActiveSlices().size());
Slice shard = collectionState.getActiveSlices().iterator().next();
assertEquals(1, shard.getReplicas().size());
Replica replica = shard.getReplicas().iterator().next();
String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
String commitName = TestUtil.randomSimpleString(random(), 1, 5);
try (SolrClient adminClient = getHttpSolrClient(cluster.getJettySolrRunners().get(0).getBaseUrl().toString());
SolrClient masterClient = getHttpSolrClient(replica.getCoreUrl())) {
SnapshotMetaData metaData = createSnapshot(adminClient, coreName, commitName);
int numTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
for (int attempt = 0; attempt < numTests; attempt++) {
//Modify existing index before we call optimize.
if (nDocs > 0) {
//Delete a few docs
int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
for (int i = 0; i < numDeletes; i++) {
masterClient.deleteByQuery("id:" + i);
}
//Add a few more
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
}
// Before invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 0);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// After invoking optimize command, verify that the index directory contains multiple commits (including the one we snapshotted earlier).
{
List<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
// Verify that multiple index commits are stored in this directory.
assertTrue(commits.size() > 1);
// Verify that the snapshot commit is present in this directory.
assertTrue(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
// Delete the snapshot
deleteSnapshot(adminClient, coreName, metaData.getName());
// Add few documents. Without this the optimize command below does not take effect.
{
int moreAdds = TestUtil.nextInt(random(), 1, 100);
for (int i = 0; i < moreAdds; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i + nDocs);
doc.addField("name", "name = " + (i + nDocs));
masterClient.add(doc);
}
masterClient.commit();
}
// Optimize the index.
masterClient.optimize(true, true, 1);
// Verify that the index directory contains only 1 index commit (which is not the same as the snapshotted commit).
Collection<IndexCommit> commits = listCommits(metaData.getIndexDirPath());
assertTrue(commits.size() == 1);
assertFalse(commits.stream().filter(x -> x.getGeneration() == metaData.getGenerationNumber()).findFirst().isPresent());
}
}
use of org.apache.solr.SolrTestCaseJ4.getHttpSolrClient in project lucene-solr by apache.
the class MockSolrEntityProcessor method getDocs.
private SolrDocumentList getDocs(int start, int rows) {
SolrDocumentList docs = new SolrDocumentList();
docs.setNumFound(docsData.size());
docs.setStart(start);
int endIndex = start + rows;
int end = docsData.size() < endIndex ? docsData.size() : endIndex;
for (int i = start; i < end; i++) {
SolrDocument doc = new SolrDocument();
SolrTestCaseJ4.Doc testDoc = docsData.get(i);
doc.addField("id", testDoc.id);
doc.addField("description", testDoc.getValues("description"));
docs.add(doc);
}
return docs;
}
Aggregations