use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method createCollection.
// TODO: Use CollectionAdminRequest#createCollection() instead of a raw request
protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos, String collectionName, Map<String, Object> collectionProps, SolrClient client, String confSetName) throws SolrServerException, IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.CREATE.toString());
for (Map.Entry<String, Object> entry : collectionProps.entrySet()) {
if (entry.getValue() != null)
params.set(entry.getKey(), String.valueOf(entry.getValue()));
}
Integer numShards = (Integer) collectionProps.get(NUM_SLICES);
if (numShards == null) {
String shardNames = (String) collectionProps.get(SHARDS_PROP);
numShards = StrUtils.splitSmart(shardNames, ',').size();
}
Integer numNrtReplicas = (Integer) collectionProps.get(ZkStateReader.NRT_REPLICAS);
if (numNrtReplicas == null) {
numNrtReplicas = (Integer) collectionProps.get(ZkStateReader.REPLICATION_FACTOR);
}
if (numNrtReplicas == null) {
numNrtReplicas = (Integer) OverseerCollectionMessageHandler.COLL_PROPS.get(ZkStateReader.REPLICATION_FACTOR);
}
if (numNrtReplicas == null) {
numNrtReplicas = Integer.valueOf(0);
}
Integer numTlogReplicas = (Integer) collectionProps.get(ZkStateReader.TLOG_REPLICAS);
if (numTlogReplicas == null) {
numTlogReplicas = Integer.valueOf(0);
}
Integer numPullReplicas = (Integer) collectionProps.get(ZkStateReader.PULL_REPLICAS);
if (numPullReplicas == null) {
numPullReplicas = Integer.valueOf(0);
}
if (confSetName != null) {
params.set("collection.configName", confSetName);
}
int clientIndex = random().nextInt(2);
List<Integer> list = new ArrayList<>();
list.add(numShards);
list.add(numNrtReplicas + numTlogReplicas + numPullReplicas);
if (collectionInfos != null) {
collectionInfos.put(collectionName, list);
}
params.set("name", collectionName);
if ("1".equals(getStateFormat())) {
log.info("Creating collection with stateFormat=1: " + collectionName);
params.set(DocCollection.STATE_FORMAT, "1");
}
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
CollectionAdminResponse res = new CollectionAdminResponse();
if (client == null) {
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(clientIndex));
try (SolrClient adminClient = createNewSolrClient("", baseUrl)) {
res.setResponse(adminClient.request(request));
}
} else {
res.setResponse(client.request(request));
}
return res;
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method queryAndCompareReplicas.
/**
* Executes a query against each live and active replica of the specified shard
* and aserts that the results are identical.
*
* @see #queryAndCompare
*/
public QueryResponse queryAndCompareReplicas(SolrParams params, String shard) throws Exception {
ArrayList<SolrClient> shardClients = new ArrayList<>(7);
updateMappingsFromZk(jettys, clients);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
List<CloudJettyRunner> solrJetties = shardToJetty.get(shard);
assertNotNull("no jetties found for shard: " + shard, solrJetties);
for (CloudJettyRunner cjetty : solrJetties) {
ZkNodeProps props = cjetty.info;
String nodeName = props.getStr(ZkStateReader.NODE_NAME_PROP);
boolean active = Replica.State.getState(props.getStr(ZkStateReader.STATE_PROP)) == Replica.State.ACTIVE;
boolean live = zkStateReader.getClusterState().liveNodesContain(nodeName);
if (active && live) {
shardClients.add(cjetty.client.solrClient);
}
}
return queryAndCompare(params, shardClients);
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class StressHdfsTest method createAndDeleteCollection.
private void createAndDeleteCollection() throws SolrServerException, IOException, Exception, KeeperException, InterruptedException, URISyntaxException {
boolean overshard = random().nextBoolean();
int rep;
int nShards;
int maxReplicasPerNode;
if (overshard) {
nShards = getShardCount() * 2;
maxReplicasPerNode = 8;
rep = 1;
} else {
nShards = getShardCount() / 2;
maxReplicasPerNode = 1;
rep = 2;
if (nShards == 0)
nShards = 1;
}
createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode);
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
// data dirs should be in zk, SOLR-8913
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Slice slice = clusterState.getSlice(DELETE_DATA_DIR_COLLECTION, "shard1");
assertNotNull(clusterState.getSlices(DELETE_DATA_DIR_COLLECTION).toString(), slice);
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
assertNotNull(replica.getProperties().toString(), replica.get("dataDir"));
assertNotNull(replica.getProperties().toString(), replica.get("ulogDir"));
}
cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
for (int i = 1; i < nShards + 1; i++) {
cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
}
// collect the data dirs
List<String> dataDirs = new ArrayList<>();
int i = 0;
for (SolrClient client : clients) {
try (HttpSolrClient c = getHttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION)) {
int docCnt = random().nextInt(1000) + 1;
for (int j = 0; j < docCnt; j++) {
c.add(getDoc("id", i++, "txt_t", "just some random text for a doc"));
}
if (random().nextBoolean()) {
c.commit();
} else {
c.commit(true, true, true);
}
c.setConnectionTimeout(30000);
NamedList<Object> response = c.query(new SolrQuery().setRequestHandler("/admin/system")).getResponse();
NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
String dataDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("data");
dataDirs.add(dataDir);
}
}
if (random().nextBoolean()) {
cloudClient.deleteByQuery("*:*");
cloudClient.commit();
assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
}
cloudClient.commit();
cloudClient.query(new SolrQuery("*:*"));
// delete collection
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionAction.DELETE.toString());
params.set("name", DELETE_DATA_DIR_COLLECTION);
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
cloudClient.request(request);
final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
while (cloudClient.getZkStateReader().getClusterState().hasCollection(DELETE_DATA_DIR_COLLECTION)) {
if (timeout.hasTimedOut()) {
throw new AssertionError("Timeout waiting to see removed collection leave clusterstate");
}
Thread.sleep(200);
}
// check that all dirs are gone
for (String dataDir : dataDirs) {
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
conf.setBoolean("fs.hdfs.impl.disable.cache", true);
FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf);
assertFalse("Data directory exists after collection removal : " + dataDir, fs.exists(new Path(dataDir)));
fs.close();
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestDelegationWithHadoopAuth method getStatusCode.
private int getStatusCode(String token, final String user, final String op, HttpSolrClient client) throws Exception {
SolrClient delegationTokenClient;
if (random().nextBoolean())
delegationTokenClient = new HttpSolrClient.Builder(client.getBaseURL().toString()).withKerberosDelegationToken(token).withResponseParser(client.getParser()).build();
else
delegationTokenClient = new CloudSolrClient.Builder().withZkHost((cluster.getZkServer().getZkAddress())).withLBHttpSolrClientBuilder(new LBHttpSolrClient.Builder().withResponseParser(client.getParser()).withHttpSolrClientBuilder(new HttpSolrClient.Builder().withKerberosDelegationToken(token))).build();
try {
ModifiableSolrParams p = new ModifiableSolrParams();
if (user != null)
p.set(PseudoAuthenticator.USER_NAME, user);
if (op != null)
p.set("op", op);
SolrRequest req = getAdminRequest(p);
if (user != null || op != null) {
Set<String> queryParams = new HashSet<>();
if (user != null)
queryParams.add(PseudoAuthenticator.USER_NAME);
if (op != null)
queryParams.add("op");
req.setQueryParams(queryParams);
}
try {
delegationTokenClient.request(req, null);
return HttpStatus.SC_OK;
} catch (HttpSolrClient.RemoteSolrException re) {
return re.code();
}
} finally {
delegationTokenClient.close();
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestImpersonationWithHadoopAuth method testProxyInvalidProxyUser.
@Test
public void testProxyInvalidProxyUser() throws Exception {
try (SolrClient solrClient = newSolrClient()) {
// wrong direction, should fail
solrClient.request(getProxyRequest("bar", "anyHostAnyUser"));
fail("Expected RemoteSolrException");
} catch (HttpSolrClient.RemoteSolrException ex) {
assertTrue(ex.getMessage().contains(getExpectedGroupExMsg("bar", "anyHostAnyUser")));
}
}
Aggregations