use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class TestSolrCloudWithHadoopAuthPlugin method testCollectionCreateSearchDelete.
protected void testCollectionCreateSearchDelete() throws Exception {
CloudSolrClient solrClient = cluster.getSolrClient();
String collectionName = "testkerberoscollection";
// create collection
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", NUM_SHARDS, REPLICATION_FACTOR);
create.process(solrClient);
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
solrClient.add(collectionName, doc);
solrClient.commit(collectionName);
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = solrClient.query(collectionName, query);
assertEquals(1, rsp.getResults().getNumFound());
CollectionAdminRequest.Delete deleteReq = CollectionAdminRequest.deleteCollection(collectionName);
deleteReq.process(solrClient);
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, solrClient.getZkStateReader(), true, true, 330);
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class KnnStream method open.
public void open() throws IOException {
cloudSolrClient = cache.getCloudSolrClient(zkHost);
ModifiableSolrParams params = getParams(this.props);
StringBuilder builder = new StringBuilder();
for (String key : mltParams) {
if (params.get(key) != null) {
builder.append(" " + key + "=" + params.get(key));
params.remove(key);
}
}
String k = params.get("k");
if (k != null) {
params.add(ROWS, k);
params.remove(k);
}
params.add(Q, "{!mlt" + builder.toString() + "}" + id);
QueryRequest request = new QueryRequest(params);
try {
QueryResponse response = request.process(cloudSolrClient, collection);
SolrDocumentList docs = response.getResults();
documentIterator = docs.iterator();
} catch (Exception e) {
throw new IOException(e);
}
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method doQuery.
void doQuery(String expectedDocs, String... queryParams) throws Exception {
Set<String> expectedIds = new HashSet<>(StrUtils.splitSmart(expectedDocs, ",", true));
QueryResponse rsp = cloudClient.query(params(queryParams));
Set<String> obtainedIds = new HashSet<>();
for (SolrDocument doc : rsp.getResults()) {
obtainedIds.add((String) doc.get("id"));
}
assertEquals(expectedIds, obtainedIds);
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class TestCloudRecovery method leaderRecoverFromLogOnStartupTest.
@Test
public void leaderRecoverFromLogOnStartupTest() throws Exception {
AtomicInteger countReplayLog = new AtomicInteger(0);
DirectUpdateHandler2.commitOnClose = false;
UpdateLog.testing_logReplayFinishHook = countReplayLog::incrementAndGet;
CloudSolrClient cloudClient = cluster.getSolrClient();
cloudClient.add(COLLECTION, sdoc("id", "1"));
cloudClient.add(COLLECTION, sdoc("id", "2"));
cloudClient.add(COLLECTION, sdoc("id", "3"));
cloudClient.add(COLLECTION, sdoc("id", "4"));
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*");
QueryResponse resp = cloudClient.query(COLLECTION, params);
assertEquals(0, resp.getResults().getNumFound());
ChaosMonkey.stop(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
ChaosMonkey.start(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), COLLECTION, 120000));
resp = cloudClient.query(COLLECTION, params);
assertEquals(4, resp.getResults().getNumFound());
// Make sure all nodes is recover from tlog
if (onlyLeaderIndexes) {
// Leader election can be kicked off, so 2 tlog replicas will replay its tlog before becoming new leader
assertTrue(countReplayLog.get() >= 2);
} else {
assertEquals(4, countReplayLog.get());
}
// check metrics
int replicationCount = 0;
int errorsCount = 0;
int skippedCount = 0;
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Timer timer = (Timer) metrics.get("REPLICATION.peerSync.time");
Counter counter = (Counter) metrics.get("REPLICATION.peerSync.errors");
Counter skipped = (Counter) metrics.get("REPLICATION.peerSync.skipped");
replicationCount += timer.getCount();
errorsCount += counter.getCount();
skippedCount += skipped.getCount();
}
}
if (onlyLeaderIndexes) {
assertTrue(replicationCount >= 2);
} else {
assertEquals(2, replicationCount);
}
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class TestCloudRecovery method corruptedLogTest.
@Test
public void corruptedLogTest() throws Exception {
AtomicInteger countReplayLog = new AtomicInteger(0);
DirectUpdateHandler2.commitOnClose = false;
UpdateLog.testing_logReplayFinishHook = countReplayLog::incrementAndGet;
CloudSolrClient cloudClient = cluster.getSolrClient();
cloudClient.add(COLLECTION, sdoc("id", "1000"));
cloudClient.add(COLLECTION, sdoc("id", "1001"));
for (int i = 0; i < 10; i++) {
cloudClient.add(COLLECTION, sdoc("id", String.valueOf(i)));
}
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*");
QueryResponse resp = cloudClient.query(COLLECTION, params);
assertEquals(0, resp.getResults().getNumFound());
int logHeaderSize = Integer.MAX_VALUE;
Map<File, byte[]> contentFiles = new HashMap<>();
for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) {
for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) {
File tlogFolder = new File(solrCore.getUlogDir(), UpdateLog.TLOG_NAME);
String[] tLogFiles = tlogFolder.list();
Arrays.sort(tLogFiles);
File lastTLogFile = new File(tlogFolder.getAbsolutePath() + "/" + tLogFiles[tLogFiles.length - 1]);
byte[] tlogBytes = IOUtils.toByteArray(new FileInputStream(lastTLogFile));
contentFiles.put(lastTLogFile, tlogBytes);
logHeaderSize = Math.min(tlogBytes.length, logHeaderSize);
}
}
ChaosMonkey.stop(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
for (Map.Entry<File, byte[]> entry : contentFiles.entrySet()) {
byte[] tlogBytes = entry.getValue();
if (tlogBytes.length <= logHeaderSize)
continue;
FileOutputStream stream = new FileOutputStream(entry.getKey());
int skipLastBytes = Math.max(random().nextInt(tlogBytes.length - logHeaderSize), 2);
for (int i = 0; i < entry.getValue().length - skipLastBytes; i++) {
stream.write(tlogBytes[i]);
}
stream.close();
}
ChaosMonkey.start(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), COLLECTION, 120000));
resp = cloudClient.query(COLLECTION, params);
// Make sure cluster still healthy
assertTrue(resp.getResults().getNumFound() >= 2);
}
Aggregations