use of org.apache.solr.client.solrj.request.QueryRequest in project lucene-solr by apache.
the class BasicHttpSolrClientTest method testGetRawStream.
@Test
public void testGetRawStream() throws SolrServerException, IOException {
CloseableHttpClient client = HttpClientUtil.createClient(null);
try {
HttpSolrClient solrClient = getHttpSolrClient(jetty.getBaseUrl().toString() + "/collection1", client, null);
QueryRequest req = new QueryRequest();
NamedList response = solrClient.request(req);
InputStream stream = (InputStream) response.get("stream");
assertNotNull(stream);
stream.close();
} finally {
HttpClientUtil.close(client);
;
}
}
use of org.apache.solr.client.solrj.request.QueryRequest in project lucene-solr by apache.
the class SolrExampleTests method testRealtimeGet.
@Test
public void testRealtimeGet() throws Exception {
SolrClient client = getSolrClient();
// Empty the database...
// delete everything!
client.deleteByQuery("*:*");
// Now add something...
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "DOCID");
doc.addField("name", "hello");
client.add(doc);
// Since the transaction log is disabled in the example, we need to commit
client.commit();
SolrQuery q = new SolrQuery();
q.setRequestHandler("/get");
q.set("id", "DOCID");
q.set("fl", "id,name,aaa:[value v=aaa]");
// First Try with the BinaryResponseParser
QueryRequest req = new QueryRequest(q);
req.setResponseParser(new BinaryResponseParser());
QueryResponse rsp = req.process(client);
SolrDocument out = (SolrDocument) rsp.getResponse().get("doc");
assertEquals("DOCID", out.get("id"));
assertEquals("hello", out.get("name"));
assertEquals("aaa", out.get("aaa"));
// Then with the XMLResponseParser
req.setResponseParser(new XMLResponseParser());
rsp = req.process(client);
out = (SolrDocument) rsp.getResponse().get("doc");
assertEquals("DOCID", out.get("id"));
assertEquals("hello", out.get("name"));
assertEquals("aaa", out.get("aaa"));
}
use of org.apache.solr.client.solrj.request.QueryRequest in project lucene-solr by apache.
the class SolrTestCaseHS method getQueryResponse.
public static String getQueryResponse(SolrClient client, String wt, SolrParams params) throws Exception {
if (client == null) {
return getQueryResponse(wt, params);
}
ModifiableSolrParams p = new ModifiableSolrParams(params);
p.set("wt", wt);
String path = p.get("qt");
p.remove("qt");
p.set("indent", "true");
QueryRequest query = new QueryRequest(p);
if (path != null) {
query.setPath(path);
}
query.setResponseParser(new NoOpResponseParser(wt));
NamedList<Object> rsp = client.request(query);
String raw = (String) rsp.get("response");
return raw;
}
use of org.apache.solr.client.solrj.request.QueryRequest in project lucene-solr by apache.
the class SSLMigrationTest method setUrlScheme.
private void setUrlScheme(String value) throws Exception {
@SuppressWarnings("rawtypes") Map m = makeMap("action", CollectionAction.CLUSTERPROP.toString().toLowerCase(Locale.ROOT), "name", "urlScheme", "val", value);
@SuppressWarnings("unchecked") SolrParams params = new MapSolrParams(m);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
List<String> urls = new ArrayList<String>();
for (Replica replica : getReplicas()) {
urls.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
}
//Create new SolrServer to configure new HttpClient w/ SSL config
getLBHttpSolrClient(urls.toArray(new String[] {})).request(request);
}
use of org.apache.solr.client.solrj.request.QueryRequest in project lucene-solr by apache.
the class SharedFSAutoReplicaFailoverTest method testBasics.
// very slow tests, especially since jetty is started and stopped
// serially
private void testBasics() throws Exception {
String collection1 = "solrj_collection";
Create createCollectionRequest = CollectionAdminRequest.createCollection(collection1, "conf1", 2, 2).setMaxShardsPerNode(2).setRouterField("myOwnField").setAutoAddReplicas(true);
CollectionAdminResponse response = createCollectionRequest.process(cloudClient);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
waitForRecoveriesToFinish(collection1, false);
String collection2 = "solrj_collection2";
createCollectionRequest = CollectionAdminRequest.createCollection(collection2, "conf1", 2, 2).setMaxShardsPerNode(2).setRouterField("myOwnField").setAutoAddReplicas(false);
CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
assertEquals(0, response2.getStatus());
assertTrue(response2.isSuccess());
waitForRecoveriesToFinish(collection2, false);
String collection3 = "solrj_collection3";
createCollectionRequest = CollectionAdminRequest.createCollection(collection3, "conf1", 5, 1).setMaxShardsPerNode(1).setRouterField("myOwnField").setAutoAddReplicas(true);
CollectionAdminResponse response3 = createCollectionRequest.process(getCommonCloudSolrClient());
assertEquals(0, response3.getStatus());
assertTrue(response3.isSuccess());
waitForRecoveriesToFinish(collection3, false);
// a collection has only 1 replica per a shard
String collection4 = "solrj_collection4";
createCollectionRequest = CollectionAdminRequest.createCollection(collection4, "conf1", 5, 1).setMaxShardsPerNode(5).setRouterField("text").setAutoAddReplicas(true);
CollectionAdminResponse response4 = createCollectionRequest.process(getCommonCloudSolrClient());
assertEquals(0, response4.getStatus());
assertTrue(response4.isSuccess());
waitForRecoveriesToFinish(collection4, false);
// all collections
String[] collections = { collection1, collection2, collection3, collection4 };
// add some documents to collection4
final int numDocs = 100;
// indexed but not committed
addDocs(collection4, numDocs, false);
// no result because not committed yet
queryAndAssertResultSize(collection4, 0, 10000);
assertUlogDir(collections);
ChaosMonkey.stop(jettys.get(1));
ChaosMonkey.stop(jettys.get(2));
Thread.sleep(5000);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
assertSliceAndReplicaCount(collection1);
assertEquals(4, ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
assertTrue(ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2) < 4);
// collection3 has maxShardsPerNode=1, there are 4 standard jetties and one control jetty and 2 nodes stopped
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection3, 3, 30000);
// collection4 has maxShardsPerNode=5 and setMaxShardsPerNode=5
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection4, 5, 30000);
// all docs should be queried after failover
// to query all docs
cloudClient.commit();
assertSingleReplicationAndShardSize(collection4, 5);
queryAndAssertResultSize(collection4, numDocs, 10000);
// collection1 should still be at 4
assertEquals(4, ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
// and collection2 less than 4
assertTrue(ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2) < 4);
assertUlogDir(collections);
ChaosMonkey.stop(jettys);
ChaosMonkey.stop(controlJetty);
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
ChaosMonkey.start(jettys);
ChaosMonkey.start(controlJetty);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
assertSliceAndReplicaCount(collection1);
assertSingleReplicationAndShardSize(collection3, 5);
// all docs should be queried
assertSingleReplicationAndShardSize(collection4, 5);
queryAndAssertResultSize(collection4, numDocs, 10000);
assertUlogDir(collections);
int jettyIndex = random().nextInt(jettys.size());
ChaosMonkey.stop(jettys.get(jettyIndex));
ChaosMonkey.start(jettys.get(jettyIndex));
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 60000));
assertSliceAndReplicaCount(collection1);
assertUlogDir(collections);
assertSingleReplicationAndShardSize(collection3, 5);
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection3, 5, 30000);
assertSingleReplicationAndShardSize(collection4, 5);
ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection4, 5, 30000);
//disable autoAddReplicas
Map m = makeMap("action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(), "name", ZkStateReader.AUTO_ADD_REPLICAS, "val", "false");
SolrRequest request = new QueryRequest(new MapSolrParams(m));
request.setPath("/admin/collections");
cloudClient.request(request);
int currentCount = ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1);
ChaosMonkey.stop(jettys.get(3));
//solr.xml has defined workLoopDelay=10s and waitAfterExpiration=10s
//Hence waiting for 30 seconds to be on the safe side.
Thread.sleep(30000);
//Ensures that autoAddReplicas has not kicked in.
assertTrue(currentCount > ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
//enable autoAddReplicas
m = makeMap("action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(), "name", ZkStateReader.AUTO_ADD_REPLICAS);
request = new QueryRequest(new MapSolrParams(m));
request.setPath("/admin/collections");
cloudClient.request(request);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 90000));
assertSliceAndReplicaCount(collection1);
assertUlogDir(collections);
// restart all to test core saved state
ChaosMonkey.stop(jettys);
ChaosMonkey.stop(controlJetty);
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
ChaosMonkey.start(jettys);
ChaosMonkey.start(controlJetty);
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
assertSliceAndReplicaCount(collection1);
assertUlogDir(collections);
assertSliceAndReplicaCount(collection1);
assertSingleReplicationAndShardSize(collection3, 5);
// all docs should be queried
assertSingleReplicationAndShardSize(collection4, 5);
queryAndAssertResultSize(collection4, numDocs, 10000);
}
Aggregations