use of org.apache.solr.client.solrj.SolrRequest in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method createCollection.
// TODO: Use CollectionAdminRequest#createCollection() instead of a raw request
protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos, String collectionName, Map<String, Object> collectionProps, SolrClient client, String confSetName) throws SolrServerException, IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
if (entry.getValue() != null)
params.set(entry.getKey(), String.valueOf(entry.getValue()));
}
Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
if (numShards == null) {
String shardNames = (String) collectionProps.get(SHARDS_PROP);
numShards = StrUtils.splitSmart(shardNames, ',').size();
}
Integer numNrtReplicas = (Integer) collectionProps.get(ZkStateReader.NRT_REPLICAS);
if (numNrtReplicas == null) {
numNrtReplicas = (Integer) collectionProps.get(ZkStateReader.REPLICATION_FACTOR);
}
if (numNrtReplicas == null) {
numNrtReplicas = (Integer) OverseerCollectionMessageHandler.COLL_PROPS.get(ZkStateReader.REPLICATION_FACTOR);
}
if (numNrtReplicas == null) {
numNrtReplicas = Integer.valueOf(0);
}
Integer numTlogReplicas = (Integer) collectionProps.get(ZkStateReader.TLOG_REPLICAS);
if (numTlogReplicas == null) {
numTlogReplicas = Integer.valueOf(0);
}
Integer numPullReplicas = (Integer) collectionProps.get(ZkStateReader.PULL_REPLICAS);
if (numPullReplicas == null) {
numPullReplicas = Integer.valueOf(0);
}
if (confSetName != null) {
params.set("collection.configName", confSetName);
}
int clientIndex = random().nextInt(2);
List<Integer> list = new ArrayList<>();
list.add(numShards);
list.add(numNrtReplicas + numTlogReplicas + numPullReplicas);
if (collectionInfos != null) {
collectionInfos.put(collectionName, list);
}
params.set("name", collectionName);
if ("1".equals(getStateFormat())) {
log.info("Creating collection with stateFormat=1: " + collectionName);
params.set(DocCollection.STATE_FORMAT, "1");
}
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
CollectionAdminResponse res = new CollectionAdminResponse();
if (client == null) {
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(clientIndex));
try (SolrClient adminClient = createNewSolrClient("", baseUrl)) {
res.setResponse(adminClient.request(request));
}
} else {
res.setResponse(client.request(request));
}
return res;
}
use of org.apache.solr.client.solrj.SolrRequest in project lucene-solr by apache.
the class TestDelegationWithHadoopAuth method getStatusCode.
private int getStatusCode(String token, final String user, final String op, HttpSolrClient client) throws Exception {
SolrClient delegationTokenClient;
if (random().nextBoolean())
delegationTokenClient = new HttpSolrClient.Builder(client.getBaseURL().toString()).withKerberosDelegationToken(token).withResponseParser(client.getParser()).build();
else
delegationTokenClient = new CloudSolrClient.Builder().withZkHost((cluster.getZkServer().getZkAddress())).withLBHttpSolrClientBuilder(new LBHttpSolrClient.Builder().withResponseParser(client.getParser()).withHttpSolrClientBuilder(new HttpSolrClient.Builder().withKerberosDelegationToken(token))).build();
try {
ModifiableSolrParams p = new ModifiableSolrParams();
if (user != null)
p.set(PseudoAuthenticator.USER_NAME, user);
if (op != null)
p.set("op", op);
SolrRequest req = getAdminRequest(p);
if (user != null || op != null) {
Set<String> queryParams = new HashSet<>();
if (user != null)
queryParams.add(PseudoAuthenticator.USER_NAME);
if (op != null)
queryParams.add("op");
req.setQueryParams(queryParams);
}
try {
delegationTokenClient.request(req, null);
return HttpStatus.SC_OK;
} catch (HttpSolrClient.RemoteSolrException re) {
return re.code();
}
} finally {
delegationTokenClient.close();
}
}
use of org.apache.solr.client.solrj.SolrRequest in project lucene-solr by apache.
the class SSLMigrationTest method setUrlScheme.
private void setUrlScheme(String value) throws Exception {
@SuppressWarnings("rawtypes") Map m = makeMap("action", CollectionAction.CLUSTERPROP.toString().toLowerCase(Locale.ROOT), "name", "urlScheme", "val", value);
@SuppressWarnings("unchecked") SolrParams params = new MapSolrParams(m);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
List<String> urls = new ArrayList<String>();
for (Replica replica : getReplicas()) {
urls.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
}
//Create new SolrServer to configure new HttpClient w/ SSL config
getLBHttpSolrClient(urls.toArray(new String[] {})).request(request);
}
use of org.apache.solr.client.solrj.SolrRequest in project lucene-solr by apache.
the class SharedFSAutoReplicaFailoverTest method testBasics.
// very slow tests, especially since jetty is started and stopped
// serially
private void testBasics() throws Exception {
String collection1 = "solrj_collection";
Create createCollectionRequest = CollectionAdminRequest.createCollection(collection1, "conf1", 2, 2).setMaxShardsPerNode(2).setRouterField("myOwnField").setAutoAddReplicas(true);
CollectionAdminResponse response = createCollectionRequest.process(cloudClient);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
waitForRecoveriesToFinish(collection1, false);
String collection2 = "solrj_collection2";
createCollectionRequest = CollectionAdminRequest.createCollection(collection2, "conf1", 2, 2).setMaxShardsPerNode(2).setRouterField("myOwnField").setAutoAddReplicas(false);
CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
assertEquals(0, response2.getStatus());
assertTrue(response2.isSuccess());
waitForRecoveriesToFinish(collection2, false);
String collection3 = "solrj_collection3";
createCollectionRequest = CollectionAdminRequest.createCollection(collection3, "conf1", 5, 1).setMaxShardsPerNode(1).setRouterField("myOwnField").setAutoAddReplicas(true);
CollectionAdminResponse response3 = createCollectionRequest.process(getCommonCloudSolrClient());
assertEquals(0, response3.getStatus());
assertTrue(response3.isSuccess());
waitForRecoveriesToFinish(collection3, false);
// a collection has only 1 replica per a shard
String collection4 = "solrj_collection4";
createCollectionRequest = CollectionAdminRequest.createCollection(collection4, "conf1", 5, 1).setMaxShardsPerNode(5).setRouterField("text").setAutoAddReplicas(true);
CollectionAdminResponse response4 = createCollectionRequest.process(getCommonCloudSolrClient());
assertEquals(0, response4.getStatus());
assertTrue(response4.isSuccess());
waitForRecoveriesToFinish(collection4, false);
// all collections
String[] collections = { collection1, collection2, collection3, collection4 };
// add some documents to collection4
final int numDocs = 100;
// indexed but not committed
addDocs(collection4, numDocs, false);
// no result because not committed yet
queryAndAssertResultSize(collection4, 0, 10000);
assertUlogDir(collections);
ChaosMonkey.stop(jettys.get(1));
ChaosMonkey.stop(jettys.get(2));
Thread.sleep(5000);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
assertSliceAndReplicaCount(collection1);
assertEquals(4, ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
assertTrue(ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2) < 4);
// collection3 has maxShardsPerNode=1, there are 4 standard jetties and one control jetty and 2 nodes stopped
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection3, 3, 30000);
// collection4 has maxShardsPerNode=5 and setMaxShardsPerNode=5
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection4, 5, 30000);
// all docs should be queried after failover
// to query all docs
cloudClient.commit();
assertSingleReplicationAndShardSize(collection4, 5);
queryAndAssertResultSize(collection4, numDocs, 10000);
// collection1 should still be at 4
assertEquals(4, ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
// and collection2 less than 4
assertTrue(ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2) < 4);
assertUlogDir(collections);
ChaosMonkey.stop(jettys);
ChaosMonkey.stop(controlJetty);
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
ChaosMonkey.start(jettys);
ChaosMonkey.start(controlJetty);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
assertSliceAndReplicaCount(collection1);
assertSingleReplicationAndShardSize(collection3, 5);
// all docs should be queried
assertSingleReplicationAndShardSize(collection4, 5);
queryAndAssertResultSize(collection4, numDocs, 10000);
assertUlogDir(collections);
int jettyIndex = random().nextInt(jettys.size());
ChaosMonkey.stop(jettys.get(jettyIndex));
ChaosMonkey.start(jettys.get(jettyIndex));
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 60000));
assertSliceAndReplicaCount(collection1);
assertUlogDir(collections);
assertSingleReplicationAndShardSize(collection3, 5);
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection3, 5, 30000);
assertSingleReplicationAndShardSize(collection4, 5);
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection4, 5, 30000);
//disable autoAddReplicas
Map m = makeMap("action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(), "name", ZkStateReader.AUTO_ADD_REPLICAS, "val", "false");
SolrRequest request = new QueryRequest(new MapSolrParams(m));
request.setPath("/admin/collections");
cloudClient.request(request);
int currentCount = ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1);
ChaosMonkey.stop(jettys.get(3));
//solr.xml has defined workLoopDelay=10s and waitAfterExpiration=10s
//Hence waiting for 30 seconds to be on the safe side.
Thread.sleep(30000);
//Ensures that autoAddReplicas has not kicked in.
assertTrue(currentCount > ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
//enable autoAddReplicas
m = makeMap("action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(), "name", ZkStateReader.AUTO_ADD_REPLICAS);
request = new QueryRequest(new MapSolrParams(m));
request.setPath("/admin/collections");
cloudClient.request(request);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 90000));
assertSliceAndReplicaCount(collection1);
assertUlogDir(collections);
// restart all to test core saved state
ChaosMonkey.stop(jettys);
ChaosMonkey.stop(controlJetty);
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
ChaosMonkey.start(jettys);
ChaosMonkey.start(controlJetty);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
assertSliceAndReplicaCount(collection1);
assertUlogDir(collections);
assertSliceAndReplicaCount(collection1);
assertSingleReplicationAndShardSize(collection3, 5);
// all docs should be queried
assertSingleReplicationAndShardSize(collection4, 5);
queryAndAssertResultSize(collection4, numDocs, 10000);
}
use of org.apache.solr.client.solrj.SolrRequest in project lucene-solr by apache.
the class TestCollectionAPI method clusterStatusZNodeVersion.
private void clusterStatusZNodeVersion() throws Exception {
String cname = "clusterStatusZNodeVersion";
try (CloudSolrClient client = createCloudClient(null)) {
CollectionAdminRequest.createCollection(cname, "conf1", 1, 1).setMaxShardsPerNode(1).process(client);
waitForRecoveriesToFinish(cname, true);
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
params.set("collection", cname);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
NamedList<Object> rsp = client.request(request);
NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
assertNotNull("Cluster state should not be null", cluster);
NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
assertNotNull("Collections should not be null in cluster state", collections);
assertEquals(1, collections.size());
Map<String, Object> collection = (Map<String, Object>) collections.get(cname);
assertNotNull(collection);
assertEquals("conf1", collection.get("configName"));
Integer znodeVersion = (Integer) collection.get("znodeVersion");
assertNotNull(znodeVersion);
CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(cname, "shard1");
addReplica.process(client);
waitForRecoveriesToFinish(cname, true);
rsp = client.request(request);
cluster = (NamedList<Object>) rsp.get("cluster");
collections = (NamedList<Object>) cluster.get("collections");
collection = (Map<String, Object>) collections.get(cname);
Integer newVersion = (Integer) collection.get("znodeVersion");
assertNotNull(newVersion);
assertTrue(newVersion > znodeVersion);
}
}
Aggregations