use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class TestCloudRecovery method leaderRecoverFromLogOnStartupTest.
@Test
public void leaderRecoverFromLogOnStartupTest() throws Exception {
AtomicInteger countReplayLog = new AtomicInteger(0);
DirectUpdateHandler2.commitOnClose = false;
UpdateLog.testing_logReplayFinishHook = countReplayLog::incrementAndGet;
CloudSolrClient cloudClient = cluster.getSolrClient();
cloudClient.add(COLLECTION, sdoc("id", "1"));
cloudClient.add(COLLECTION, sdoc("id", "2"));
cloudClient.add(COLLECTION, sdoc("id", "3"));
cloudClient.add(COLLECTION, sdoc("id", "4"));
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*");
QueryResponse resp = cloudClient.query(COLLECTION, params);
assertEquals(0, resp.getResults().getNumFound());
ChaosMonkey.stop(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
ChaosMonkey.start(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), COLLECTION, 120000));
resp = cloudClient.query(COLLECTION, params);
assertEquals(4, resp.getResults().getNumFound());
// Make sure all nodes is recover from tlog
if (onlyLeaderIndexes) {
// Leader election can be kicked off, so 2 tlog replicas will replay its tlog before becoming new leader
assertTrue(countReplayLog.get() >= 2);
} else {
assertEquals(4, countReplayLog.get());
}
// check metrics
int replicationCount = 0;
int errorsCount = 0;
int skippedCount = 0;
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Timer timer = (Timer) metrics.get("REPLICATION.peerSync.time");
Counter counter = (Counter) metrics.get("REPLICATION.peerSync.errors");
Counter skipped = (Counter) metrics.get("REPLICATION.peerSync.skipped");
replicationCount += timer.getCount();
errorsCount += counter.getCount();
skippedCount += skipped.getCount();
}
}
if (onlyLeaderIndexes) {
assertTrue(replicationCount >= 2);
} else {
assertEquals(2, replicationCount);
}
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class TestCloudRecovery method corruptedLogTest.
@Test
public void corruptedLogTest() throws Exception {
AtomicInteger countReplayLog = new AtomicInteger(0);
DirectUpdateHandler2.commitOnClose = false;
UpdateLog.testing_logReplayFinishHook = countReplayLog::incrementAndGet;
CloudSolrClient cloudClient = cluster.getSolrClient();
cloudClient.add(COLLECTION, sdoc("id", "1000"));
cloudClient.add(COLLECTION, sdoc("id", "1001"));
for (int i = 0; i < 10; i++) {
cloudClient.add(COLLECTION, sdoc("id", String.valueOf(i)));
}
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*");
QueryResponse resp = cloudClient.query(COLLECTION, params);
assertEquals(0, resp.getResults().getNumFound());
int logHeaderSize = Integer.MAX_VALUE;
Map<File, byte[]> contentFiles = new HashMap<>();
for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) {
for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) {
File tlogFolder = new File(solrCore.getUlogDir(), UpdateLog.TLOG_NAME);
String[] tLogFiles = tlogFolder.list();
Arrays.sort(tLogFiles);
File lastTLogFile = new File(tlogFolder.getAbsolutePath() + "/" + tLogFiles[tLogFiles.length - 1]);
byte[] tlogBytes = IOUtils.toByteArray(new FileInputStream(lastTLogFile));
contentFiles.put(lastTLogFile, tlogBytes);
logHeaderSize = Math.min(tlogBytes.length, logHeaderSize);
}
}
ChaosMonkey.stop(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
for (Map.Entry<File, byte[]> entry : contentFiles.entrySet()) {
byte[] tlogBytes = entry.getValue();
if (tlogBytes.length <= logHeaderSize)
continue;
FileOutputStream stream = new FileOutputStream(entry.getKey());
int skipLastBytes = Math.max(random().nextInt(tlogBytes.length - logHeaderSize), 2);
for (int i = 0; i < entry.getValue().length - skipLastBytes; i++) {
stream.write(tlogBytes[i]);
}
stream.close();
}
ChaosMonkey.start(cluster.getJettySolrRunners());
assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), COLLECTION, 120000));
resp = cloudClient.query(COLLECTION, params);
// Make sure cluster still healthy
assertTrue(resp.getResults().getNumFound() >= 2);
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class TestMiniSolrCloudCluster method testStopAllStartAll.
@Test
public void testStopAllStartAll() throws Exception {
final String collectionName = "testStopAllStartAllCollection";
final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
createCollection(miniCluster, collectionName, null, null, Boolean.TRUE, null);
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
cloudSolrClient.setDefaultCollection(collectionName);
final SolrQuery query = new SolrQuery("*:*");
final SolrInputDocument doc = new SolrInputDocument();
try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
zkStateReader.createClusterStateWatchersAndUpdate();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify collection
final int numDocs = 1 + random().nextInt(10);
for (int ii = 1; ii <= numDocs; ++ii) {
doc.setField("id", "" + ii);
cloudSolrClient.add(doc);
if (ii * 2 == numDocs)
cloudSolrClient.commit();
}
cloudSolrClient.commit();
// query collection
{
final QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(numDocs, rsp.getResults().getNumFound());
}
// the test itself
zkStateReader.forceUpdateCollection(collectionName);
final ClusterState clusterState = zkStateReader.getClusterState();
final HashSet<Integer> leaderIndices = new HashSet<Integer>();
final HashSet<Integer> followerIndices = new HashSet<Integer>();
{
final HashMap<String, Boolean> shardLeaderMap = new HashMap<String, Boolean>();
for (final Slice slice : clusterState.getSlices(collectionName)) {
for (final Replica replica : slice.getReplicas()) {
shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
}
shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
}
for (int ii = 0; ii < jettys.size(); ++ii) {
final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
if (Boolean.TRUE.equals(isLeader)) {
leaderIndices.add(new Integer(ii));
} else if (Boolean.FALSE.equals(isLeader)) {
followerIndices.add(new Integer(ii));
}
// else neither leader nor follower i.e. node without a replica (for our collection)
}
}
final List<Integer> leaderIndicesList = new ArrayList<Integer>(leaderIndices);
final List<Integer> followerIndicesList = new ArrayList<Integer>(followerIndices);
// first stop the followers (in no particular order)
Collections.shuffle(followerIndicesList, random());
for (Integer ii : followerIndicesList) {
if (!leaderIndices.contains(ii)) {
miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
}
}
// then stop the leaders (again in no particular order)
Collections.shuffle(leaderIndicesList, random());
for (Integer ii : leaderIndicesList) {
miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
}
// calculate restart order
final List<Integer> restartIndicesList = new ArrayList<Integer>();
Collections.shuffle(leaderIndicesList, random());
restartIndicesList.addAll(leaderIndicesList);
Collections.shuffle(followerIndicesList, random());
restartIndicesList.addAll(followerIndicesList);
if (random().nextBoolean())
Collections.shuffle(restartIndicesList, random());
// and then restart jettys in that order
for (Integer ii : restartIndicesList) {
final JettySolrRunner jetty = jettys.get(ii.intValue());
if (!jetty.isRunning()) {
miniCluster.startJettySolrRunner(jetty);
assertTrue(jetty.isRunning());
}
}
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
zkStateReader.forceUpdateCollection(collectionName);
// re-query collection
{
final QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(numDocs, rsp.getResults().getNumFound());
}
}
} finally {
miniCluster.shutdown();
}
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class TestMiniSolrCloudCluster method testCollectionCreateSearchDelete.
@Test
public void testCollectionCreateSearchDelete() throws Exception {
final String collectionName = "testcollection";
MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
try {
assertNotNull(miniCluster.getZkServer());
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
assertEquals(NUM_SERVERS, jettys.size());
for (JettySolrRunner jetty : jettys) {
assertTrue(jetty.isRunning());
}
// shut down a server
log.info("#### Stopping a server");
JettySolrRunner stoppedServer = miniCluster.stopJettySolrRunner(0);
assertTrue(stoppedServer.isStopped());
assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
// create a server
log.info("#### Starting a server");
JettySolrRunner startedServer = miniCluster.startJettySolrRunner();
assertTrue(startedServer.isRunning());
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
// create collection
log.info("#### Creating a collection");
final String asyncId = (random().nextBoolean() ? null : "asyncId(" + collectionName + ".create)=" + random().nextInt());
createCollection(miniCluster, collectionName, null, asyncId, null, null);
ZkStateReader zkStateReader = miniCluster.getSolrClient().getZkStateReader();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// modify/query collection
log.info("#### updating a querying collection");
cloudSolrClient.setDefaultCollection(collectionName);
SolrInputDocument doc = new SolrInputDocument();
doc.setField("id", "1");
cloudSolrClient.add(doc);
cloudSolrClient.commit();
SolrQuery query = new SolrQuery();
query.setQuery("*:*");
QueryResponse rsp = cloudSolrClient.query(query);
assertEquals(1, rsp.getResults().getNumFound());
// remove a server not hosting any replicas
zkStateReader.forceUpdateCollection(collectionName);
ClusterState clusterState = zkStateReader.getClusterState();
HashMap<String, JettySolrRunner> jettyMap = new HashMap<String, JettySolrRunner>();
for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) {
String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
jettyMap.put(key, jetty);
}
Collection<Slice> slices = clusterState.getSlices(collectionName);
// track the servers not host repliacs
for (Slice slice : slices) {
jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
for (Replica replica : slice.getReplicas()) {
jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
}
}
assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
log.info("#### Stopping a server");
JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
jettys = miniCluster.getJettySolrRunners();
for (int i = 0; i < jettys.size(); ++i) {
if (jettys.get(i).equals(jettyToStop)) {
miniCluster.stopJettySolrRunner(i);
assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
}
}
// re-create a server (to restore original NUM_SERVERS count)
log.info("#### Starting a server");
startedServer = miniCluster.startJettySolrRunner(jettyToStop);
assertTrue(startedServer.isRunning());
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
CollectionAdminRequest.deleteCollection(collectionName).process(miniCluster.getSolrClient());
// create it again
String asyncId2 = (random().nextBoolean() ? null : "asyncId(" + collectionName + ".create)=" + random().nextInt());
createCollection(miniCluster, collectionName, null, asyncId2, null, null);
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
// check that there's no left-over state
assertEquals(0, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
cloudSolrClient.add(doc);
cloudSolrClient.commit();
assertEquals(1, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
} finally {
miniCluster.shutdown();
}
}
use of org.apache.solr.client.solrj.response.QueryResponse in project lucene-solr by apache.
the class OneQuery method getCount.
public long getCount(HttpSolrClient client, String core) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("qt", "/select");
params.set("q", "*:*");
long numFound = 0;
client.setBaseURL(baseUrl + core);
try {
QueryResponse response = client.query(params);
numFound = response.getResults().getNumFound();
} catch (Exception e) {
e.printStackTrace();
}
return numFound;
}
Aggregations