use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestMiniSolrCloudCluster method testStopAllStartAll.
@Test
public void testStopAllStartAll() throws Exception {
final String collectionName = "testStopAllStartAllCollection";
final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
createCollection(miniCluster, collectionName, null, null, Boolean.TRUE, null);
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
cloudSolrClient.setDefaultCollection(collectionName);
final SolrQuery query = new SolrQuery("*:*");
final SolrInputDocument doc = new SolrInputDocument();
try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
zkStateReader.createClusterStateWatchersAndUpdate();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify collection
final int numDocs = 1 + random().nextInt(10);
for (int ii = 1; ii <= numDocs; ++ii) {
doc.setField("id", "" + ii);
cloudSolrClient.add(doc);
if (ii * 2 == numDocs)
cloudSolrClient.commit();
}
cloudSolrClient.commit();
// query collection
{
final QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(numDocs, rsp.getResults().getNumFound());
}
// the test itself
zkStateReader.forceUpdateCollection(collectionName);
final ClusterState clusterState = zkStateReader.getClusterState();
final HashSet<Integer> leaderIndices = new HashSet<Integer>();
final HashSet<Integer> followerIndices = new HashSet<Integer>();
{
final HashMap<String, Boolean> shardLeaderMap = new HashMap<String, Boolean>();
for (final Slice slice : clusterState.getSlices(collectionName)) {
for (final Replica replica : slice.getReplicas()) {
shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
}
shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
}
for (int ii = 0; ii < jettys.size(); ++ii) {
final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
if (Boolean.TRUE.equals(isLeader)) {
leaderIndices.add(new Integer(ii));
} else if (Boolean.FALSE.equals(isLeader)) {
followerIndices.add(new Integer(ii));
}
// else neither leader nor follower i.e. node without a replica (for our collection)
}
}
final List<Integer> leaderIndicesList = new ArrayList<Integer>(leaderIndices);
final List<Integer> followerIndicesList = new ArrayList<Integer>(followerIndices);
// first stop the followers (in no particular order)
Collections.shuffle(followerIndicesList, random());
for (Integer ii : followerIndicesList) {
if (!leaderIndices.contains(ii)) {
miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
}
}
// then stop the leaders (again in no particular order)
Collections.shuffle(leaderIndicesList, random());
for (Integer ii : leaderIndicesList) {
miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
}
// calculate restart order
final List<Integer> restartIndicesList = new ArrayList<Integer>();
Collections.shuffle(leaderIndicesList, random());
restartIndicesList.addAll(leaderIndicesList);
Collections.shuffle(followerIndicesList, random());
restartIndicesList.addAll(followerIndicesList);
if (random().nextBoolean())
Collections.shuffle(restartIndicesList, random());
// and then restart jettys in that order
for (Integer ii : restartIndicesList) {
final JettySolrRunner jetty = jettys.get(ii.intValue());
if (!jetty.isRunning()) {
miniCluster.startJettySolrRunner(jetty);
assertTrue(jetty.isRunning());
}
}
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
zkStateReader.forceUpdateCollection(collectionName);
// re-query collection
{
final QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(numDocs, rsp.getResults().getNumFound());
}
}
} finally {
miniCluster.shutdown();
}
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestMiniSolrCloudCluster method testCollectionCreateSearchDelete.
@Test
public void testCollectionCreateSearchDelete() throws Exception {
final String collectionName = "testcollection";
MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
// shut down a server
log.info("#### Stopping a server");
JettySolrRunner stoppedServer = miniCluster.stopJettySolrRunner(0);
assertTrue(stoppedServer.isStopped());
assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
// create a server
log.info("#### Starting a server");
JettySolrRunner startedServer = miniCluster.startJettySolrRunner();
assertTrue(startedServer.isRunning());
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
// create collection
log.info("#### Creating a collection");
final String asyncId = (random().nextBoolean() ? null : "asyncId(" + collectionName + ".create)=" + random().nextInt());
createCollection(miniCluster, collectionName, null, asyncId, null, null);
ZkStateReader zkStateReader = miniCluster.getSolrClient().getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify/query collection
log.info("#### updating a querying collection");
cloudSolrClient.setDefaultCollection(collectionName);
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
cloudSolrClient.add(doc);
cloudSolrClient.commit();
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(1, rsp.getResults().getNumFound());
// remove a server not hosting any replicas
zkStateReader.forceUpdateCollection(collectionName);
ClusterState clusterState = zkStateReader.getClusterState();
HashMap<String, JettySolrRunner> jettyMap = new HashMap<String, JettySolrRunner>();
for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) {
String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
jettyMap.put(key, jetty);
}
Collection<Slice> slices = clusterState.getSlices(collectionName);
// track the servers not host repliacs
for (Slice slice : slices) {
jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
for (Replica replica : slice.getReplicas()) {
jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
}
}
assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
log.info("#### Stopping a server");
JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
jettys = miniCluster.getJettySolrRunners();
for (int i = 0; i < jettys.size(); ++i) {
if (jettys.get(i).equals(jettyToStop)) {
miniCluster.stopJettySolrRunner(i);
assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
}
}
// re-create a server (to restore original NUM_SERVERS count)
log.info("#### Starting a server");
startedServer = miniCluster.startJettySolrRunner(jettyToStop);
assertTrue(startedServer.isRunning());
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
CollectionAdminRequest.deleteCollection(collectionName).process(miniCluster.getSolrClient());
// create it again
String asyncId2 = (random().nextBoolean() ? null : "asyncId(" + collectionName + ".create)=" + random().nextInt());
createCollection(miniCluster, collectionName, null, asyncId2, null, null);
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// check that there's no left-over state
assertEquals(0, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
cloudSolrClient.add(doc);
cloudSolrClient.commit();
assertEquals(1, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
} finally {
miniCluster.shutdown();
}
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestCollectionAPI method clusterStatusWithRouteKey.
private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
try (CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION)) {
SolrInputDocument doc = new SolrInputDocument();
// goes to shard2. see ShardRoutingTest for details
doc.addField("id", "a!123");
client.add(doc);
client.commit();
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
params.set("collection", DEFAULT_COLLECTION);
params.set(ShardParams._ROUTE_, "a!");
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
NamedList<Object> rsp = client.request(request);
NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
assertNotNull("Cluster state should not be null", cluster);
NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
assertNotNull("Collections should not be null in cluster state", collections);
assertNotNull(collections.get(DEFAULT_COLLECTION));
assertEquals(1, collections.size());
Map<String, Object> collection = (Map<String, Object>) collections.get(DEFAULT_COLLECTION);
assertEquals("conf1", collection.get("configName"));
Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
assertEquals(1, shardStatus.size());
Map<String, Object> selectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD2);
assertNotNull(selectedShardStatus);
}
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestConfigSetsAPI method scriptRequest.
public void scriptRequest(String collection) throws SolrServerException, IOException {
SolrClient client = solrCluster.getSolrClient();
SolrInputDocument doc = sdoc("id", "4055", "subject", "Solr");
client.add(collection, doc);
client.commit(collection);
assertEquals("42", client.query(collection, params("q", "*:*")).getResults().get(0).get("script_added_i"));
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestPullReplica method addDocs.
private void addDocs(int numDocs) throws SolrServerException, IOException {
List<SolrInputDocument> docs = new ArrayList<>(numDocs);
for (int i = 0; i < numDocs; i++) {
docs.add(new SolrInputDocument("id", String.valueOf(i), "fieldName_s", String.valueOf(i)));
}
cluster.getSolrClient().add(collectionName, docs);
cluster.getSolrClient().commit(collectionName);
}
Aggregations