use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestCollectionStateWatchers method testPredicateFailureTimesOut.
@Test
public void testPredicateFailureTimesOut() throws Exception {
CloudSolrClient client = cluster.getSolrClient();
expectThrows(TimeoutException.class, () -> {
client.waitForState("nosuchcollection", 1, TimeUnit.SECONDS, ((liveNodes, collectionState) -> false));
});
waitFor("Watchers for collection should be removed after timeout", 1, TimeUnit.SECONDS, () -> client.getZkStateReader().getStateWatchers("nosuchcollection").isEmpty());
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestHdfsCloudBackupRestore method testConfigBackupOnly.
protected void testConfigBackupOnly(String configName, String collectionName) throws Exception {
String backupName = "configonlybackup";
CloudSolrClient solrClient = cluster.getSolrClient();
CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName).setRepositoryName(getBackupRepoName()).setIndexBackupStrategy(CollectionAdminParams.NO_INDEX_BACKUP_STRATEGY);
backup.process(solrClient);
Map<String, String> params = new HashMap<>();
params.put("location", "/backup");
params.put("solr.hdfs.home", hdfsUri + "/solr");
HdfsBackupRepository repo = new HdfsBackupRepository();
repo.init(new NamedList<>(params));
BackupManager mgr = new BackupManager(repo, solrClient.getZkStateReader());
URI baseLoc = repo.createURI("/backup");
Properties props = mgr.readBackupProperties(baseLoc, backupName);
assertNotNull(props);
assertEquals(collectionName, props.getProperty(COLLECTION_NAME_PROP));
assertEquals(backupName, props.getProperty(BACKUP_NAME_PROP));
assertEquals(configName, props.getProperty(COLL_CONF));
DocCollection collectionState = mgr.readCollectionState(baseLoc, backupName, collectionName);
assertNotNull(collectionState);
assertEquals(collectionName, collectionState.getName());
URI configDirLoc = repo.resolve(baseLoc, backupName, ZK_STATE_DIR, CONFIG_STATE_DIR, configName);
assertTrue(repo.exists(configDirLoc));
Collection<String> expected = Arrays.asList(BACKUP_PROPS_FILE, ZK_STATE_DIR);
URI backupLoc = repo.resolve(baseLoc, backupName);
String[] dirs = repo.listAll(backupLoc);
for (String d : dirs) {
assertTrue(expected.contains(d));
}
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestLeaderElectionWithEmptyReplica method test.
@Test
public void test() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
solrClient.setDefaultCollection(COLLECTION_NAME);
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", String.valueOf(i));
solrClient.add(doc);
}
solrClient.commit();
// find the leader node
Replica replica = solrClient.getZkStateReader().getLeaderRetry(COLLECTION_NAME, "shard1");
JettySolrRunner replicaJetty = null;
List<JettySolrRunner> jettySolrRunners = cluster.getJettySolrRunners();
for (JettySolrRunner jettySolrRunner : jettySolrRunners) {
int port = jettySolrRunner.getBaseUrl().getPort();
if (replica.getStr(BASE_URL_PROP).contains(":" + port)) {
replicaJetty = jettySolrRunner;
break;
}
}
// kill the leader
ChaosMonkey.kill(replicaJetty);
// add a replica (asynchronously)
CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(COLLECTION_NAME, "shard1");
String asyncId = addReplica.processAsync(solrClient);
// wait a bit
Thread.sleep(1000);
// bring the old leader node back up
ChaosMonkey.start(replicaJetty);
// wait until everyone is active
solrClient.waitForState(COLLECTION_NAME, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
// now query each replica and check for consistency
assertConsistentReplicas(solrClient, solrClient.getZkStateReader().getClusterState().getSlice(COLLECTION_NAME, "shard1"));
// sanity check that documents still exist
QueryResponse response = solrClient.query(new SolrQuery("*:*"));
assertEquals("Indexed documents not found", 10, response.getResults().getNumFound());
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestSegmentSorting method createCollection.
@Before
public void createCollection() throws Exception {
final String collectionName = testName.getMethodName();
final CloudSolrClient cloudSolrClient = cluster.getSolrClient();
final Map<String, String> collectionProperties = new HashMap<>();
collectionProperties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-sortingmergepolicyfactory.xml");
CollectionAdminRequest.Create cmd = CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR).setProperties(collectionProperties);
if (random().nextBoolean()) {
assertTrue(cmd.process(cloudSolrClient).isSuccess());
} else {
// async
assertEquals(RequestStatusState.COMPLETED, cmd.processAndWait(cloudSolrClient, 30));
}
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
cloudSolrClient.setDefaultCollection(collectionName);
}
use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.
the class TestMiniSolrCloudCluster method testCollectionCreateWithoutCoresThenDelete.
@Test
public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
final String collectionName = "testSolrCloudCollectionWithoutCores";
final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
try {
assertNotNull(miniCluster.getZkServer());
assertFalse(miniCluster.getJettySolrRunners().isEmpty());
// create collection
final String asyncId = (random().nextBoolean() ? null : "asyncId(" + collectionName + ".create)=" + random().nextInt());
createCollection(miniCluster, collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY, asyncId, null, null);
try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
zkStateReader.createClusterStateWatchersAndUpdate();
// wait for collection to appear
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// check the collection's corelessness
{
int coreCount = 0;
for (Map.Entry<String, Slice> entry : zkStateReader.getClusterState().getSlicesMap(collectionName).entrySet()) {
coreCount += entry.getValue().getReplicasMap().entrySet().size();
}
assertEquals(0, coreCount);
}
}
} finally {
miniCluster.shutdown();
}
}
Aggregations